Merge "Remove OfficialClient dependency from HACKING.rst"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index dfcbaba..1cccacc 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,1172 +1,1114 @@
 [DEFAULT]
 
 #
-# Options defined in tempest.openstack.common.lockutils
+# From tempest.config
 #
 
 # Whether to disable inter-process locks (boolean value)
-#disable_process_locking=false
+#disable_process_locking = false
 
 # Directory to use for lock files. (string value)
-#lock_path=<None>
-
+#lock_path = <None>
 
 #
-# Options defined in tempest.openstack.common.log
+# From tempest.config
 #
 
-# Print debugging output (set logging level to DEBUG instead
-# of default WARNING level). (boolean value)
-#debug=false
+# Print debugging output (set logging level to DEBUG instead of
+# default WARNING level). (boolean value)
+#debug = false
 
-# Print more verbose output (set logging level to INFO instead
-# of default WARNING level). (boolean value)
-#verbose=false
+# Print more verbose output (set logging level to INFO instead of
+# default WARNING level). (boolean value)
+#verbose = false
 
-# Log output to standard error. (boolean value)
-#use_stderr=true
+#
+# From tempest.config
+#
 
-# Format string to use for log messages with context. (string
-# value)
-#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
-
-# Format string to use for log messages without context.
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
 # (string value)
-#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-
-# Data to append to log format when level is DEBUG. (string
-# value)
-#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
-
-# Prefix each line of exception output with this format.
-# (string value)
-#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
-
-# List of logger=LEVEL pairs. (list value)
-#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN
-
-# Enables or disables publication of error events. (boolean
-# value)
-#publish_errors=false
-
-# Enables or disables fatal status of deprecations. (boolean
-# value)
-#fatal_deprecations=false
-
-# The format for an instance that is passed with the log
-# message. (string value)
-#instance_format="[instance: %(uuid)s] "
-
-# The format for an instance UUID that is passed with the log
-# message. (string value)
-#instance_uuid_format="[instance: %(uuid)s] "
-
-# The name of a logging configuration file. This file is
-# appended to any existing logging configuration files. For
-# details about logging configuration files, see the Python
-# logging module documentation. (string value)
 # Deprecated group/name - [DEFAULT]/log_config
-#log_config_append=<None>
+#log_config_append = <None>
 
-# DEPRECATED. A logging.Formatter log message format string
-# which may use any of the available logging.LogRecord
-# attributes. This option is deprecated.  Please use
-# logging_context_format_string and
-# logging_default_format_string instead. (string value)
-#log_format=<None>
+# Format string for %%(asctime)s in log records. Default: %(default)s
+# . (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
 
-# Format string for %%(asctime)s in log records. Default:
-# %(default)s . (string value)
-#log_date_format=%Y-%m-%d %H:%M:%S
-
-# (Optional) Name of log file to output to. If no default is
-# set, logging will go to stdout. (string value)
-# Deprecated group/name - [DEFAULT]/logfile
-#log_file=<None>
-
-# (Optional) The base directory used for relative --log-file
-# paths. (string value)
+# (Optional) The base directory used for relative --log-file paths.
+# (string value)
 # Deprecated group/name - [DEFAULT]/logdir
-#log_dir=<None>
+#log_dir = <None>
 
-# Use syslog for logging. Existing syslog format is DEPRECATED
-# during I, and will change in J to honor RFC5424. (boolean
-# value)
-#use_syslog=false
+# (Optional) Name of log file to output to. If no default is set,
+# logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
 
-# (Optional) Enables or disables syslog rfc5424 format for
-# logging. If enabled, prefixes the MSG part of the syslog
-# message with APP-NAME (RFC5424). The format without the APP-
-# NAME is deprecated in I, and will be removed in J. (boolean
-# value)
-#use_syslog_rfc_format=false
+# DEPRECATED. A logging.Formatter log message format string which may
+# use any of the available logging.LogRecord attributes. This option
+# is deprecated.  Please use logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format = <None>
 
 # Syslog facility to receive log lines. (string value)
-#syslog_log_facility=LOG_USER
+#syslog_log_facility = LOG_USER
+
+# Use syslog for logging. Existing syslog format is DEPRECATED during
+# I, and will change in J to honor RFC5424. (boolean value)
+#use_syslog = false
+
+# (Optional) Enables or disables syslog rfc5424 format for logging. If
+# enabled, prefixes the MSG part of the syslog message with APP-NAME
+# (RFC5424). The format without the APP-NAME is deprecated in I, and
+# will be removed in J. (boolean value)
+#use_syslog_rfc_format = false
+
+#
+# From tempest.config
+#
+
+# Log output to standard error. (boolean value)
+#use_stderr = true
+
+#
+# From tempest.config
+#
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Format string to use for log messages without context. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
 
 
 [auth]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Path to the yaml file that contains the list of credentials
-# to use for running tests (string value)
-#test_accounts_file=etc/accounts.yaml
+# Allows test cases to create/destroy tenants and users. This option
+# requires that OpenStack Identity API admin credentials are known. If
+# false, isolated test cases and parallel execution, can still be
+# achieved configuring a list of test accounts (boolean value)
+# Deprecated group/name - [compute]/allow_tenant_isolation
+# Deprecated group/name - [orchestration]/allow_tenant_isolation
+#allow_tenant_isolation = false
+
+# If set to True it enables the Accounts provider, which locks
+# credentials to allow for parallel execution with pre-provisioned
+# accounts. It can only be used to run tests that ensure credentials
+# cleanup happens. It requires at least `2 * CONC` distinct accounts
+# configured in `test_accounts_file`, with CONC == the number of
+# concurrent test processes. (boolean value)
+#locking_credentials_provider = false
+
+# Path to the yaml file that contains the list of credentials to use
+# for running tests (string value)
+#test_accounts_file = etc/accounts.yaml
 
 
 [baremetal]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Catalog type of the baremetal provisioning service (string
-# value)
-#catalog_type=baremetal
+# Timeout for Ironic node to completely provision (integer value)
+#active_timeout = 300
 
-# Whether the Ironic nova-compute driver is enabled (boolean
+# Timeout for association of Nova instance and Ironic node (integer
 # value)
-#driver_enabled=false
+#association_timeout = 30
+
+# Catalog type of the baremetal provisioning service (string value)
+#catalog_type = baremetal
 
 # Driver name which Ironic uses (string value)
-#driver=fake
+#driver = fake
 
-# The endpoint type to use for the baremetal provisioning
-# service (string value)
-#endpoint_type=publicURL
+# Whether the Ironic nova-compute driver is enabled (boolean value)
+#driver_enabled = false
 
-# Timeout for Ironic node to completely provision (integer
-# value)
-#active_timeout=300
-
-# Timeout for association of Nova instance and Ironic node
-# (integer value)
-#association_timeout=30
+# The endpoint type to use for the baremetal provisioning service
+# (string value)
+#endpoint_type = publicURL
 
 # Timeout for Ironic power transitions. (integer value)
-#power_timeout=60
+#power_timeout = 60
 
 # Timeout for unprovisioning an Ironic node. (integer value)
-#unprovision_timeout=60
+#unprovision_timeout = 60
 
 
 [boto]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# EC2 URL (string value)
-#ec2_url=http://localhost:8773/services/Cloud
-
-# S3 URL (string value)
-#s3_url=http://localhost:8080
-
-# AWS Secret Key (string value)
-#aws_secret=<None>
-
-# AWS Access Key (string value)
-#aws_access=<None>
-
-# AWS Zone for EC2 tests (string value)
-#aws_zone=nova
-
-# S3 Materials Path (string value)
-#s3_materials_path=/opt/stack/devstack/files/images/s3-materials/cirros-0.3.0
-
-# ARI Ramdisk Image manifest (string value)
-#ari_manifest=cirros-0.3.0-x86_64-initrd.manifest.xml
+# AKI Kernel Image manifest (string value)
+#aki_manifest = cirros-0.3.0-x86_64-vmlinuz.manifest.xml
 
 # AMI Machine Image manifest (string value)
-#ami_manifest=cirros-0.3.0-x86_64-blank.img.manifest.xml
+#ami_manifest = cirros-0.3.0-x86_64-blank.img.manifest.xml
 
-# AKI Kernel Image manifest (string value)
-#aki_manifest=cirros-0.3.0-x86_64-vmlinuz.manifest.xml
+# ARI Ramdisk Image manifest (string value)
+#ari_manifest = cirros-0.3.0-x86_64-initrd.manifest.xml
 
-# Instance type (string value)
-#instance_type=m1.tiny
+# AWS Access Key (string value)
+#aws_access = <None>
 
-# boto Http socket timeout (integer value)
-#http_socket_timeout=3
+# AWS Secret Key (string value)
+#aws_secret = <None>
 
-# boto num_retries on error (integer value)
-#num_retries=1
-
-# Status Change Timeout (integer value)
-#build_timeout=60
+# AWS Zone for EC2 tests (string value)
+#aws_zone = nova
 
 # Status Change Test Interval (integer value)
-#build_interval=1
+#build_interval = 1
+
+# Status Change Timeout (integer value)
+#build_timeout = 60
+
+# EC2 URL (string value)
+#ec2_url = http://localhost:8773/services/Cloud
+
+# boto Http socket timeout (integer value)
+#http_socket_timeout = 3
+
+# Instance type (string value)
+#instance_type = m1.tiny
+
+# boto num_retries on error (integer value)
+#num_retries = 1
+
+# S3 Materials Path (string value)
+#s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.0
+
+# S3 URL (string value)
+#s3_url = http://localhost:8080
 
 
 [cli]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# enable cli tests (boolean value)
-#enabled=true
+# directory where python client binaries are located (string value)
+#cli_dir = /usr/local/bin
 
-# directory where python client binaries are located (string
-# value)
-#cli_dir=/usr/local/bin
+# enable cli tests (boolean value)
+#enabled = true
 
 # Whether the tempest run location has access to the *-manage
-# commands. In a pure blackbox environment it will not.
-# (boolean value)
-#has_manage=true
+# commands. In a pure blackbox environment it will not. (boolean
+# value)
+#has_manage = true
 
 # Number of seconds to wait on a CLI timeout (integer value)
-#timeout=15
+#timeout = 15
 
 
 [compute]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Allows test cases to create/destroy tenants and users. This
-# option enables isolated test cases and better parallel
-# execution, but also requires that OpenStack Identity API
-# admin credentials are known. (boolean value)
-#allow_tenant_isolation=false
+# Time in seconds between build status checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for an instance to build. (integer value)
+#build_timeout = 300
+
+# Catalog type of the Compute service. (string value)
+#catalog_type = compute
+
+# Catalog type of the Compute v3 service. (string value)
+#catalog_v3_type = computev3
+
+# The endpoint type to use for the compute service. (string value)
+#endpoint_type = publicURL
+
+# Visible fixed network name  (string value)
+#fixed_network_name = private
+
+# Valid primary flavor to use in tests. (string value)
+#flavor_ref = 1
+
+# Valid secondary flavor to be used in tests. (string value)
+#flavor_ref_alt = 2
+
+# Unallocated floating IP range, which will be used to test the
+# floating IP bulk feature for CRUD operation. (string value)
+#floating_ip_range = 10.0.0.0/29
+
+# Password used to authenticate to an instance using the alternate
+# image. (string value)
+#image_alt_ssh_password = password
+
+# User name used to authenticate to an instance using the alternate
+# image. (string value)
+#image_alt_ssh_user = root
 
 # Valid primary image reference to be used in tests. This is a
 # required option (string value)
-#image_ref=<None>
+#image_ref = <None>
 
-# Valid secondary image reference to be used in tests. This is
-# a required option, but if only one image is available
-# duplicate the value of image_ref above (string value)
-#image_ref_alt=<None>
-
-# Valid primary flavor to use in tests. (string value)
-#flavor_ref=1
-
-# Valid secondary flavor to be used in tests. (string value)
-#flavor_ref_alt=2
-
-# User name used to authenticate to an instance. (string
-# value)
-#image_ssh_user=root
+# Valid secondary image reference to be used in tests. This is a
+# required option, but if only one image is available duplicate the
+# value of image_ref above (string value)
+#image_ref_alt = <None>
 
 # Password used to authenticate to an instance. (string value)
-#image_ssh_password=password
+#image_ssh_password = password
 
-# User name used to authenticate to an instance using the
-# alternate image. (string value)
-#image_alt_ssh_user=root
-
-# Password used to authenticate to an instance using the
-# alternate image. (string value)
-#image_alt_ssh_password=password
-
-# Time in seconds between build status checks. (integer value)
-#build_interval=1
-
-# Timeout in seconds to wait for an instance to build.
-# (integer value)
-#build_timeout=300
-
-# Should the tests ssh to instances? (boolean value)
-#run_ssh=false
-
-# Auth method used for authenticate to the instance. Valid
-# choices are: keypair, configured, adminpass. keypair: start
-# the servers with an ssh keypair. configured: use the
-# configured user and password. adminpass: use the injected
-# adminPass. disabled: avoid using ssh when it is an option.
-# (string value)
-#ssh_auth_method=keypair
-
-# How to connect to the instance? fixed: using the first ip
-# belongs the fixed network floating: creating and using a
-# floating ip (string value)
-#ssh_connect_method=fixed
-
-# User name used to authenticate to an instance. (string
-# value)
-#ssh_user=root
-
-# Timeout in seconds to wait for ping to succeed. (integer
-# value)
-#ping_timeout=120
-
-# Timeout in seconds to wait for authentication to succeed.
-# (integer value)
-#ssh_timeout=300
-
-# Additional wait time for clean state, when there is no OS-
-# EXT-STS extension available (integer value)
-#ready_wait=0
-
-# Timeout in seconds to wait for output from ssh channel.
-# (integer value)
-#ssh_channel_timeout=60
-
-# Visible fixed network name  (string value)
-#fixed_network_name=private
-
-# Network used for SSH connections. (string value)
-#network_for_ssh=public
+# User name used to authenticate to an instance. (string value)
+#image_ssh_user = root
 
 # IP version used for SSH connections. (integer value)
-#ip_version_for_ssh=4
+#ip_version_for_ssh = 4
 
-# Does SSH use Floating IPs? (boolean value)
-#use_floatingip_for_ssh=true
+# Network used for SSH connections. (string value)
+#network_for_ssh = public
 
-# Catalog type of the Compute service. (string value)
-#catalog_type=compute
+# Path to a private key file for SSH access to remote hosts (string
+# value)
+#path_to_private_key = <None>
+
+# Timeout in seconds to wait for ping to succeed. (integer value)
+#ping_timeout = 120
+
+# Additional wait time for clean state, when there is no OS-EXT-STS
+# extension available (integer value)
+#ready_wait = 0
 
 # The compute region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# Should the tests ssh to instances? (boolean value)
+#run_ssh = false
+
+# Time in seconds before a shelved instance is eligible for removing
+# from a host.  -1 never offload, 0 offload when shelved. This time
+# should be the same as the time of nova.conf, and some tests will run
+# for as long as the time. (integer value)
+#shelved_offload_time = 0
+
+# Auth method used for authenticate to the instance. Valid choices
+# are: keypair, configured, adminpass. keypair: start the servers with
+# an ssh keypair. configured: use the configured user and password.
+# adminpass: use the injected adminPass. disabled: avoid using ssh
+# when it is an option. (string value)
+#ssh_auth_method = keypair
+
+# Timeout in seconds to wait for output from ssh channel. (integer
 # value)
-#region=
+#ssh_channel_timeout = 60
 
-# The endpoint type to use for the compute service. (string
+# How to connect to the instance? fixed: using the first ip belongs
+# the fixed network floating: creating and using a floating ip (string
 # value)
-#endpoint_type=publicURL
+#ssh_connect_method = fixed
 
-# Catalog type of the Compute v3 service. (string value)
-#catalog_v3_type=computev3
+# Timeout in seconds to wait for authentication to succeed. (integer
+# value)
+#ssh_timeout = 300
 
-# Path to a private key file for SSH access to remote hosts
+# User name used to authenticate to an instance. (string value)
+#ssh_user = root
+
+# Does SSH use Floating IPs? (boolean value)
+#use_floatingip_for_ssh = true
+
+# Expected device name when a volume is attached to an instance
 # (string value)
-#path_to_private_key=<None>
-
-# Expected device name when a volume is attached to an
-# instance (string value)
-#volume_device_name=vdb
-
-# Time in seconds before a shelved instance is eligible for
-# removing from a host.  -1 never offload, 0 offload when
-# shelved. This time should be the same as the time of
-# nova.conf, and some tests will run for as long as the time.
-# (integer value)
-#shelved_offload_time=0
-
-# Unallocated floating IP range, which will be used to test
-# the floating IP bulk feature for CRUD operation. (string
-# value)
-#floating_ip_range=10.0.0.0/29
-
-# Allows test cases to create/destroy tenants and users. This
-# option enables isolated test cases and better parallel
-# execution, but also requires that OpenStack Identity API
-# admin credentials are known. (boolean value)
-#allow_tenant_isolation=false
-
-# Time in seconds between build status checks. (integer value)
-#build_interval=1
+#volume_device_name = vdb
 
 
 [compute-admin]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Administrative Username to use for Nova API requests.
-# (string value)
-#username=<None>
-
-# Administrative Tenant name to use for Nova API requests.
-# (string value)
-#tenant_name=<None>
+# Domain name for authentication as admin (Keystone V3).The same
+# domain applies to user and project (string value)
+#domain_name = <None>
 
 # API key to use when authenticating as admin. (string value)
-#password=<None>
+#password = <None>
 
-# Domain name for authentication as admin (Keystone V3).The
-# same domain applies to user and project (string value)
-#domain_name=<None>
+# Administrative Tenant name to use for Nova API requests. (string
+# value)
+#tenant_name = <None>
+
+# Administrative Username to use for Nova API requests. (string value)
+#username = <None>
 
 
 [compute-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# If false, skip all nova v3 tests. (boolean value)
-#api_v3=false
+# A list of enabled compute extensions with a special entry all which
+# indicates every extension is enabled. Each extension should be
+# specified with alias name. Empty list indicates all extensions are
+# disabled (list value)
+#api_extensions = all
 
-# If false skip all v2 api tests with xml (boolean value)
-#xml_api_v2=true
+# If false, skip all nova v3 tests. (boolean value)
+#api_v3 = false
+
+# A list of enabled v3 extensions with a special entry all which
+# indicates every extension is enabled. Each extension should be
+# specified with alias name. Empty list indicates all extensions are
+# disabled (list value)
+#api_v3_extensions = all
+
+# Does the test environment block migration support cinder iSCSI
+# volumes (boolean value)
+#block_migrate_cinder_iscsi = false
+
+# Does the test environment use block devices for live migration
+# (boolean value)
+#block_migration_for_live_migration = false
+
+# Does the test environment support changing the admin password?
+# (boolean value)
+#change_password = false
+
+# Does the test environment support obtaining instance serial console
+# output? (boolean value)
+#console_output = true
 
 # If false, skip disk config tests (boolean value)
-#disk_config=true
+#disk_config = true
 
-# A list of enabled compute extensions with a special entry
-# all which indicates every extension is enabled. Each
-# extension should be specified with alias name. Empty list
-# indicates all extensions are disabled (list value)
-#api_extensions=all
-
-# A list of enabled v3 extensions with a special entry all
-# which indicates every extension is enabled. Each extension
-# should be specified with alias name. Empty list indicates
-# all extensions are disabled (list value)
-#api_v3_extensions=all
-
-# Does the test environment support changing the admin
-# password? (boolean value)
-#change_password=false
-
-# Does the test environment support obtaining instance serial
-# console output? (boolean value)
-#console_output=true
-
-# Does the test environment support resizing? (boolean value)
-#resize=false
-
-# Does the test environment support pausing? (boolean value)
-#pause=true
-
-# Does the test environment support shelving/unshelving?
-# (boolean value)
-#shelve=true
-
-# Does the test environment support suspend/resume? (boolean
-# value)
-#suspend=true
-
-# Does the test environment support live migration available?
-# (boolean value)
-#live_migration=false
-
-# Does the test environment use block devices for live
-# migration (boolean value)
-#block_migration_for_live_migration=false
-
-# Does the test environment block migration support cinder
-# iSCSI volumes (boolean value)
-#block_migrate_cinder_iscsi=false
-
-# Enable VNC console. This configuration value should be same
-# as [nova.vnc]->vnc_enabled in nova.conf (boolean value)
-#vnc_console=false
-
-# Enable Spice console. This configuration value should be
-# same as [nova.spice]->enabled in nova.conf (boolean value)
-#spice_console=false
-
-# Enable RDP console. This configuration value should be same
-# as [nova.rdp]->enabled in nova.conf (boolean value)
-#rdp_console=false
-
-# Does the test environment support instance rescue mode?
-# (boolean value)
-#rescue=true
-
-# Enables returning of the instance password by the relevant
-# server API calls such as create, rebuild or rescue. (boolean
-# value)
-#enable_instance_password=true
+# Enables returning of the instance password by the relevant server
+# API calls such as create, rebuild or rescue. (boolean value)
+#enable_instance_password = true
 
 # Does the test environment support dynamic network interface
 # attachment? (boolean value)
-#interface_attach=true
+#interface_attach = true
 
-# Does the test environment support creating snapshot images
-# of running instances? (boolean value)
-#snapshot=true
+# Does the test environment support live migration available? (boolean
+# value)
+#live_migration = false
+
+# Does the test environment support pausing? (boolean value)
+#pause = true
+
+# Enable RDP console. This configuration value should be same as
+# [nova.rdp]->enabled in nova.conf (boolean value)
+#rdp_console = false
+
+# Does the test environment support instance rescue mode? (boolean
+# value)
+#rescue = true
+
+# Does the test environment support resizing? (boolean value)
+#resize = false
+
+# Does the test environment support shelving/unshelving? (boolean
+# value)
+#shelve = true
+
+# Does the test environment support creating snapshot images of
+# running instances? (boolean value)
+#snapshot = true
+
+# Enable Spice console. This configuration value should be same as
+# [nova.spice]->enabled in nova.conf (boolean value)
+#spice_console = false
+
+# Does the test environment support suspend/resume? (boolean value)
+#suspend = true
+
+# Enable VNC console. This configuration value should be same as
+# [nova.vnc]->vnc_enabled in nova.conf (boolean value)
+#vnc_console = false
+
+# If false skip all v2 api tests with xml (boolean value)
+#xml_api_v2 = true
 
 
 [dashboard]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Where the dashboard can be found (string value)
-#dashboard_url=http://localhost/
+#dashboard_url = http://localhost/
 
 # Login page for the dashboard (string value)
-#login_url=http://localhost/auth/login/
+#login_url = http://localhost/auth/login/
 
 
 [data_processing]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the data processing service. (string value)
-#catalog_type=data_processing
+#catalog_type = data_processing
 
-# The endpoint type to use for the data processing service.
-# (string value)
-#endpoint_type=publicURL
+# The endpoint type to use for the data processing service. (string
+# value)
+#endpoint_type = publicURL
 
 
 [database]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Database service. (string value)
-#catalog_type=database
+#catalog_type = database
 
-# Valid primary flavor to use in database tests. (string
-# value)
-#db_flavor_ref=1
+# Current database version to use in database tests. (string value)
+#db_current_version = v1.0
 
-# Current database version to use in database tests. (string
-# value)
-#db_current_version=v1.0
+# Valid primary flavor to use in database tests. (string value)
+#db_flavor_ref = 1
 
 
 [debug]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Enable diagnostic commands (boolean value)
-#enable=true
+#enable = true
 
-# A regex to determine which requests should be traced.  This
-# is a regex to match the caller for rest client requests to
-# be able to selectively trace calls out of specific classes
-# and methods. It largely exists for test development, and is
-# not expected to be used in a real deploy of tempest. This
-# will be matched against the discovered ClassName:method in
-# the test environment.  Expected values for this field are:
-# * ClassName:test_method_name - traces one test_method  *
-# ClassName:setUp(Class) - traces specific setup functions  *
-# ClassName:tearDown(Class) - traces specific teardown
-# functions  * ClassName:_run_cleanups - traces the cleanup
-# functions  If nothing is specified, this feature is not
-# enabled. To trace everything specify .* as the regex.
-# (string value)
-#trace_requests=
+# A regex to determine which requests should be traced.  This is a
+# regex to match the caller for rest client requests to be able to
+# selectively trace calls out of specific classes and methods. It
+# largely exists for test development, and is not expected to be used
+# in a real deploy of tempest. This will be matched against the
+# discovered ClassName:method in the test environment.  Expected
+# values for this field are:   * ClassName:test_method_name - traces
+# one test_method  * ClassName:setUp(Class) - traces specific setup
+# functions  * ClassName:tearDown(Class) - traces specific teardown
+# functions  * ClassName:_run_cleanups - traces the cleanup functions
+# If nothing is specified, this feature is not enabled. To trace
+# everything specify .* as the regex.  (string value)
+#trace_requests =
 
 
 [identity]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Catalog type of the Identity service. (string value)
-#catalog_type=identity
-
-# Set to True if using self-signed SSL certificates. (boolean
-# value)
-#disable_ssl_certificate_validation=false
-
-# Full URI of the OpenStack Identity API (Keystone), v2
-# (string value)
-#uri=<None>
-
-# Full URI of the OpenStack Identity API (Keystone), v3
-# (string value)
-#uri_v3=<None>
-
-# Identity API version to be used for authentication for API
-# tests. (string value)
-#auth_version=v2
-
-# The identity region name to use. Also used as the other
-# services' region name unless they are set explicitly. If no
-# such region is found in the service catalog, the first found
-# one is used. (string value)
-#region=RegionOne
-
-# The endpoint type to use for the identity service. (string
-# value)
-#endpoint_type=publicURL
-
-# Username to use for Nova API requests. (string value)
-#username=<None>
-
-# Tenant name to use for Nova API requests. (string value)
-#tenant_name=<None>
-
-# Role required to administrate keystone. (string value)
-#admin_role=admin
-
-# API key to use when authenticating. (string value)
-#password=<None>
-
-# Domain name for authentication (Keystone V3).The same domain
+# Admin domain name for authentication (Keystone V3).The same domain
 # applies to user and project (string value)
-#domain_name=<None>
-
-# Username of alternate user to use for Nova API requests.
-# (string value)
-#alt_username=<None>
-
-# Alternate user's Tenant name to use for Nova API requests.
-# (string value)
-#alt_tenant_name=<None>
-
-# API key to use when authenticating as alternate user.
-# (string value)
-#alt_password=<None>
-
-# Alternate domain name for authentication (Keystone V3).The
-# same domain applies to user and project (string value)
-#alt_domain_name=<None>
-
-# Administrative Username to use for Keystone API requests.
-# (string value)
-#admin_username=<None>
-
-# Administrative Tenant name to use for Keystone API requests.
-# (string value)
-#admin_tenant_name=<None>
+#admin_domain_name = <None>
 
 # API key to use when authenticating as admin. (string value)
-#admin_password=<None>
+#admin_password = <None>
 
-# Admin domain name for authentication (Keystone V3).The same
+# Role required to administrate keystone. (string value)
+#admin_role = admin
+
+# Administrative Tenant name to use for Keystone API requests. (string
+# value)
+#admin_tenant_name = <None>
+
+# Administrative Username to use for Keystone API requests. (string
+# value)
+#admin_username = <None>
+
+# Alternate domain name for authentication (Keystone V3).The same
 # domain applies to user and project (string value)
-#admin_domain_name=<None>
+#alt_domain_name = <None>
+
+# API key to use when authenticating as alternate user. (string value)
+#alt_password = <None>
+
+# Alternate user's Tenant name to use for Nova API requests. (string
+# value)
+#alt_tenant_name = <None>
+
+# Username of alternate user to use for Nova API requests. (string
+# value)
+#alt_username = <None>
+
+# Identity API version to be used for authentication for API tests.
+# (string value)
+#auth_version = v2
+
+# Catalog type of the Identity service. (string value)
+#catalog_type = identity
+
+# Set to True if using self-signed SSL certificates. (boolean value)
+#disable_ssl_certificate_validation = false
+
+# Domain name for authentication (Keystone V3).The same domain applies
+# to user and project (string value)
+#domain_name = <None>
+
+# The endpoint type to use for the identity service. (string value)
+#endpoint_type = publicURL
+
+# API key to use when authenticating. (string value)
+#password = <None>
+
+# The identity region name to use. Also used as the other services'
+# region name unless they are set explicitly. If no such region is
+# found in the service catalog, the first found one is used. (string
+# value)
+#region = RegionOne
+
+# Tenant name to use for Nova API requests. (string value)
+#tenant_name = <None>
+
+# Full URI of the OpenStack Identity API (Keystone), v2 (string value)
+#uri = <None>
+
+# Full URI of the OpenStack Identity API (Keystone), v3 (string value)
+#uri_v3 = <None>
+
+# Username to use for Nova API requests. (string value)
+#username = <None>
 
 
 [identity-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Does the identity service have delegation and impersonation
-# enabled (boolean value)
-#trust=true
-
 # Is the v2 identity API enabled (boolean value)
-#api_v2=true
+#api_v2 = true
 
 # Is the v3 identity API enabled (boolean value)
-#api_v3=true
+#api_v3 = true
+
+# Does the identity service have delegation and impersonation enabled
+# (boolean value)
+#trust = true
 
 
 [image]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Image service. (string value)
-#catalog_type=image
+#catalog_type = image
 
-# The image region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
-# value)
-#region=
-
-# The endpoint type to use for the image service. (string
-# value)
-#endpoint_type=publicURL
+# The endpoint type to use for the image service. (string value)
+#endpoint_type = publicURL
 
 # http accessible image (string value)
-#http_image=http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz
+#http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz
+
+# The image region name to use. If empty, the value of identity.region
+# is used instead. If no such region is found in the service catalog,
+# the first found one is used. (string value)
+#region =
 
 
 [image-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Is the v2 image API enabled (boolean value)
-#api_v2=true
-
 # Is the v1 image API enabled (boolean value)
-#api_v1=true
+#api_v1 = true
+
+# Is the v2 image API enabled (boolean value)
+#api_v2 = true
 
 
 [input-scenario]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Matching images become parameters for scenario tests (string
+# Matching flavors become parameters for scenario tests (string value)
+#flavor_regex = ^m1.nano$
+
+# Matching images become parameters for scenario tests (string value)
+#image_regex = ^cirros-0.3.1-x86_64-uec$
+
+# SSH verification in tests is skippedfor matching images (string
 # value)
-#image_regex=^cirros-0.3.1-x86_64-uec$
+#non_ssh_image_regex = ^.*[Ww]in.*$
 
-# Matching flavors become parameters for scenario tests
-# (string value)
-#flavor_regex=^m1.nano$
-
-# SSH verification in tests is skippedfor matching images
-# (string value)
-#non_ssh_image_regex=^.*[Ww]in.*$
-
-# List of user mapped to regex to matching image names.
-# (string value)
-#ssh_user_regex=[["^.*[Cc]irros.*$", "root"]]
+# List of user mapped to regex to matching image names. (string value)
+#ssh_user_regex = [["^.*[Cc]irros.*$", "root"]]
 
 
 [messaging]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Messaging service. (string value)
-#catalog_type=messaging
-
-# The maximum number of queue records per page when listing
-# queues (integer value)
-#max_queues_per_page=20
-
-# The maximum metadata size for a queue (integer value)
-#max_queue_metadata=65536
-
-# The maximum number of queue message per page when listing
-# (or) posting messages (integer value)
-#max_messages_per_page=20
-
-# The maximum size of a message body (integer value)
-#max_message_size=262144
-
-# The maximum number of messages per claim (integer value)
-#max_messages_per_claim=20
-
-# The maximum ttl for a message (integer value)
-#max_message_ttl=1209600
-
-# The maximum ttl for a claim (integer value)
-#max_claim_ttl=43200
+#catalog_type = messaging
 
 # The maximum grace period for a claim (integer value)
-#max_claim_grace=43200
+#max_claim_grace = 43200
+
+# The maximum ttl for a claim (integer value)
+#max_claim_ttl = 43200
+
+# The maximum size of a message body (integer value)
+#max_message_size = 262144
+
+# The maximum ttl for a message (integer value)
+#max_message_ttl = 1209600
+
+# The maximum number of messages per claim (integer value)
+#max_messages_per_claim = 20
+
+# The maximum number of queue message per page when listing (or)
+# posting messages (integer value)
+#max_messages_per_page = 20
+
+# The maximum metadata size for a queue (integer value)
+#max_queue_metadata = 65536
+
+# The maximum number of queue records per page when listing queues
+# (integer value)
+#max_queues_per_page = 20
 
 
 [negative]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Test generator class for all negative tests (string value)
-#test_generator=tempest.common.generator.negative_generator.NegativeTestGenerator
+#test_generator = tempest.common.generator.negative_generator.NegativeTestGenerator
 
 
 [network]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
+# Time in seconds between network operation status checks. (integer
+# value)
+#build_interval = 1
+
+# Timeout in seconds to wait for network operation to complete.
+# (integer value)
+#build_timeout = 300
+
 # Catalog type of the Neutron service. (string value)
-#catalog_type=network
+#catalog_type = network
+
+# List of dns servers whichs hould be used for subnet creation (list
+# value)
+#dns_servers = 8.8.8.8,8.8.4.4
+
+# The endpoint type to use for the network service. (string value)
+#endpoint_type = publicURL
+
+# Id of the public network that provides external connectivity (string
+# value)
+#public_network_id =
+
+# Id of the public router that provides external connectivity (string
+# value)
+#public_router_id =
 
 # The network region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
-# value)
-#region=
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
 
-# The endpoint type to use for the network service. (string
-# value)
-#endpoint_type=publicURL
-
-# The cidr block to allocate tenant ipv4 subnets from (string
-# value)
-#tenant_network_cidr=10.100.0.0/16
+# The cidr block to allocate tenant ipv4 subnets from (string value)
+#tenant_network_cidr = 10.100.0.0/16
 
 # The mask bits for tenant ipv4 subnets (integer value)
-#tenant_network_mask_bits=28
+#tenant_network_mask_bits = 28
 
-# The cidr block to allocate tenant ipv6 subnets from (string
-# value)
-#tenant_network_v6_cidr=2003::/48
+# The cidr block to allocate tenant ipv6 subnets from (string value)
+#tenant_network_v6_cidr = 2003::/48
 
 # The mask bits for tenant ipv6 subnets (integer value)
-#tenant_network_v6_mask_bits=64
+#tenant_network_v6_mask_bits = 64
 
-# Whether tenant network connectivity should be evaluated
-# directly (boolean value)
-#tenant_networks_reachable=false
-
-# Id of the public network that provides external connectivity
-# (string value)
-#public_network_id=
-
-# Id of the public router that provides external connectivity
-# (string value)
-#public_router_id=
-
-# Timeout in seconds to wait for network operation to
-# complete. (integer value)
-#build_timeout=300
-
-# Time in seconds between network operation status checks.
-# (integer value)
-#build_interval=1
-
-# List of dns servers whichs hould be used for subnet creation
-# (list value)
-#dns_servers=8.8.8.8,8.8.4.4
+# Whether tenant network connectivity should be evaluated directly
+# (boolean value)
+#tenant_networks_reachable = false
 
 
 [network-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
+# A list of enabled network extensions with a special entry all which
+# indicates every extension is enabled. Empty list indicates all
+# extensions are disabled (list value)
+#api_extensions = all
+
 # Allow the execution of IPv6 tests (boolean value)
-#ipv6=true
+#ipv6 = true
 
-# A list of enabled network extensions with a special entry
-# all which indicates every extension is enabled. Empty list
-# indicates all extensions are disabled (list value)
-#api_extensions=all
-
-# Allow the execution of IPv6 subnet tests that use the
-# extended IPv6 attributes ipv6_ra_mode and ipv6_address_mode
-# (boolean value)
-#ipv6_subnet_attributes=false
+# Allow the execution of IPv6 subnet tests that use the extended IPv6
+# attributes ipv6_ra_mode and ipv6_address_mode (boolean value)
+#ipv6_subnet_attributes = false
 
 
 [object-storage]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Object-Storage service. (string value)
-#catalog_type=object-store
+#catalog_type = object-store
 
-# The object-storage region name to use. If empty, the value
-# of identity.region is used instead. If no such region is
-# found in the service catalog, the first found one is used.
-# (string value)
-#region=
+# Number of seconds to wait while looping to check the status of a
+# container to container synchronization (integer value)
+#container_sync_interval = 5
 
-# The endpoint type to use for the object-store service.
-# (string value)
-#endpoint_type=publicURL
+# Number of seconds to time on waiting for a container to container
+# synchronization complete. (integer value)
+#container_sync_timeout = 120
 
-# Number of seconds to time on waiting for a container to
-# container synchronization complete. (integer value)
-#container_sync_timeout=120
+# The endpoint type to use for the object-store service. (string
+# value)
+#endpoint_type = publicURL
 
-# Number of seconds to wait while looping to check the status
-# of a container to container synchronization (integer value)
-#container_sync_interval=5
+# Role to add to users created for swift tests to enable creating
+# containers (string value)
+#operator_role = Member
 
-# Role to add to users created for swift tests to enable
-# creating containers (string value)
-#operator_role=Member
+# The object-storage region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
 
 # User role that has reseller admin (string value)
-#reseller_admin_role=ResellerAdmin
+#reseller_admin_role = ResellerAdmin
 
 
 [object-storage-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# A list of the enabled optional discoverable apis. A single
-# entry, all, indicates that all of these features are
-# expected to be enabled (list value)
-#discoverable_apis=all
-
 # Execute (old style) container-sync tests (boolean value)
-#container_sync=true
-
-# Execute object-versioning tests (boolean value)
-#object_versioning=true
+#container_sync = true
 
 # Execute discoverability tests (boolean value)
-#discoverability=true
+#discoverability = true
+
+# A list of the enabled optional discoverable apis. A single entry,
+# all, indicates that all of these features are expected to be enabled
+# (list value)
+#discoverable_apis = all
+
+# Execute object-versioning tests (boolean value)
+#object_versioning = true
 
 
 [orchestration]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
+# Time in seconds between build status checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for a stack to build. (integer value)
+#build_timeout = 1200
+
 # Catalog type of the Orchestration service. (string value)
-#catalog_type=orchestration
+#catalog_type = orchestration
+
+# The endpoint type to use for the orchestration service. (string
+# value)
+#endpoint_type = publicURL
+
+# Name of heat-cfntools enabled image to use when launching test
+# instances. (string value)
+#image_ref = <None>
+
+# Instance type for tests. Needs to be big enough for a full OS plus
+# the test workload (string value)
+#instance_type = m1.micro
+
+# Name of existing keypair to launch servers with. (string value)
+#keypair_name = <None>
+
+# Value must match heat configuration of the same name. (integer
+# value)
+#max_resources_per_stack = 1000
+
+# Value must match heat configuration of the same name. (integer
+# value)
+#max_template_size = 524288
 
 # The orchestration region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
-# value)
-#region=
-
-# The endpoint type to use for the orchestration service.
-# (string value)
-#endpoint_type=publicURL
-
-# Timeout in seconds to wait for a stack to build. (integer
-# value)
-#build_timeout=1200
-
-# Instance type for tests. Needs to be big enough for a full
-# OS plus the test workload (string value)
-#instance_type=m1.micro
-
-# Name of heat-cfntools enabled image to use when launching
-# test instances. (string value)
-#image_ref=<None>
-
-# Name of existing keypair to launch servers with. (string
-# value)
-#keypair_name=<None>
-
-# Value must match heat configuration of the same name.
-# (integer value)
-#max_template_size=524288
-
-# Value must match heat configuration of the same name.
-# (integer value)
-#max_resources_per_stack=1000
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
 
 
 [scenario]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
+# AKI image file name (string value)
+#aki_img_file = cirros-0.3.1-x86_64-vmlinuz
+
+# AMI image file name (string value)
+#ami_img_file = cirros-0.3.1-x86_64-blank.img
+
+# ARI image file name (string value)
+#ari_img_file = cirros-0.3.1-x86_64-initrd
+
+# Image container format (string value)
+#img_container_format = bare
+
 # Directory containing image files (string value)
-#img_dir=/opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
+#img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
+
+# Image disk format (string value)
+#img_disk_format = qcow2
 
 # Image file name (string value)
 # Deprecated group/name - [DEFAULT]/qcow2_img_file
-#img_file=cirros-0.3.1-x86_64-disk.img
+#img_file = cirros-0.3.1-x86_64-disk.img
 
-# Image disk format (string value)
-#img_disk_format=qcow2
-
-# Image container format (string value)
-#img_container_format=bare
-
-# AMI image file name (string value)
-#ami_img_file=cirros-0.3.1-x86_64-blank.img
-
-# ARI image file name (string value)
-#ari_img_file=cirros-0.3.1-x86_64-initrd
-
-# AKI image file name (string value)
-#aki_img_file=cirros-0.3.1-x86_64-vmlinuz
+# specifies how many resources to request at once. Used for large
+# operations testing. (integer value)
+#large_ops_number = 0
 
 # ssh username for the image file (string value)
-#ssh_user=cirros
-
-# specifies how many resources to request at once. Used for
-# large operations testing. (integer value)
-#large_ops_number=0
+#ssh_user = cirros
 
 
 [service_available]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Whether or not cinder is expected to be available (boolean
+# Whether or not Ceilometer is expected to be available (boolean
 # value)
-#cinder=true
+#ceilometer = true
 
-# Whether or not neutron is expected to be available (boolean
-# value)
-#neutron=false
+# Whether or not cinder is expected to be available (boolean value)
+#cinder = true
 
-# Whether or not glance is expected to be available (boolean
-# value)
-#glance=true
+# Whether or not glance is expected to be available (boolean value)
+#glance = true
 
-# Whether or not swift is expected to be available (boolean
-# value)
-#swift=true
+# Whether or not Heat is expected to be available (boolean value)
+#heat = false
 
-# Whether or not nova is expected to be available (boolean
-# value)
-#nova=true
+# Whether or not Horizon is expected to be available (boolean value)
+#horizon = true
 
-# Whether or not Heat is expected to be available (boolean
-# value)
-#heat=false
+# Whether or not Ironic is expected to be available (boolean value)
+#ironic = false
 
-# Whether or not Ceilometer is expected to be available
-# (boolean value)
-#ceilometer=true
+# Whether or not neutron is expected to be available (boolean value)
+#neutron = false
 
-# Whether or not Horizon is expected to be available (boolean
-# value)
-#horizon=true
+# Whether or not nova is expected to be available (boolean value)
+#nova = true
 
-# Whether or not Sahara is expected to be available (boolean
-# value)
-#sahara=false
+# Whether or not Sahara is expected to be available (boolean value)
+#sahara = false
 
-# Whether or not Ironic is expected to be available (boolean
-# value)
-#ironic=false
+# Whether or not swift is expected to be available (boolean value)
+#swift = true
 
-# Whether or not Trove is expected to be available (boolean
-# value)
-#trove=false
+# Whether or not Trove is expected to be available (boolean value)
+#trove = false
 
-# Whether or not Zaqar is expected to be available (boolean
-# value)
-#zaqar=false
+# Whether or not Zaqar is expected to be available (boolean value)
+#zaqar = false
 
 
 [stress]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Directory containing log files on the compute nodes (string
-# value)
-#nova_logdir=<None>
+# Controller host. (string value)
+#controller = <None>
 
-# Maximum number of instances to create during test. (integer
-# value)
-#max_instances=16
+# The number of threads created while stress test. (integer value)
+#default_thread_number_per_action = 4
+
+# Allows a full cleaning process after a stress test. Caution : this
+# cleanup will remove every objects of every tenant. (boolean value)
+#full_clean_stack = false
+
+# Prevent the cleaning (tearDownClass()) between each stress test run
+# if an exception occurs during this run. (boolean value)
+#leave_dirty_stack = false
+
+# time (in seconds) between log file error checks. (integer value)
+#log_check_interval = 60
+
+# Maximum number of instances to create during test. (integer value)
+#max_instances = 16
+
+# Directory containing log files on the compute nodes (string value)
+#nova_logdir = <None>
 
 # Controller host. (string value)
-#controller=<None>
-
-# Controller host. (string value)
-#target_controller=<None>
-
-# ssh user. (string value)
-#target_ssh_user=<None>
-
-# Path to private key. (string value)
-#target_private_key_path=<None>
+#target_controller = <None>
 
 # regexp for list of log files. (string value)
-#target_logfiles=<None>
+#target_logfiles = <None>
 
-# time (in seconds) between log file error checks. (integer
-# value)
-#log_check_interval=60
+# Path to private key. (string value)
+#target_private_key_path = <None>
 
-# The number of threads created while stress test. (integer
-# value)
-#default_thread_number_per_action=4
-
-# Prevent the cleaning (tearDownClass()) between each stress
-# test run if an exception occurs during this run. (boolean
-# value)
-#leave_dirty_stack=false
-
-# Allows a full cleaning process after a stress test. Caution
-# : this cleanup will remove every objects of every tenant.
-# (boolean value)
-#full_clean_stack=false
+# ssh user. (string value)
+#target_ssh_user = <None>
 
 
 [telemetry]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Telemetry service. (string value)
-#catalog_type=metering
+#catalog_type = metering
 
-# The endpoint type to use for the telemetry service. (string
+# The endpoint type to use for the telemetry service. (string value)
+#endpoint_type = publicURL
+
+# This variable is used as flag to enable notification tests (boolean
 # value)
-#endpoint_type=publicURL
-
-# This variable is used as flag to enable notification tests
-# (boolean value)
-#too_slow_to_test=true
+#too_slow_to_test = true
 
 
 [volume]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Time in seconds between volume availability checks. (integer
+# Name of the backend1 (must be declared in cinder.conf) (string
 # value)
-#build_interval=1
+#backend1_name = BACKEND_1
 
-# Timeout in seconds to wait for a volume to becomeavailable.
+# Name of the backend2 (must be declared in cinder.conf) (string
+# value)
+#backend2_name = BACKEND_2
+
+# Time in seconds between volume availability checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for a volume to become available.
 # (integer value)
-#build_timeout=300
+#build_timeout = 300
 
 # Catalog type of the Volume Service (string value)
-#catalog_type=volume
+#catalog_type = volume
+
+# Disk format to use when copying a volume to image (string value)
+#disk_format = raw
+
+# The endpoint type to use for the volume service. (string value)
+#endpoint_type = publicURL
 
 # The volume region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# Backend protocol to target when creating volume types (string value)
+#storage_protocol = iSCSI
+
+# Backend vendor to target when creating volume types (string value)
+#vendor_name = Open Source
+
+# Default size in GB for volumes created by volumes tests (integer
 # value)
-#region=
-
-# The endpoint type to use for the volume service. (string
-# value)
-#endpoint_type=publicURL
-
-# Name of the backend1 (must be declared in cinder.conf)
-# (string value)
-#backend1_name=BACKEND_1
-
-# Name of the backend2 (must be declared in cinder.conf)
-# (string value)
-#backend2_name=BACKEND_2
-
-# Backend protocol to target when creating volume types
-# (string value)
-#storage_protocol=iSCSI
-
-# Backend vendor to target when creating volume types (string
-# value)
-#vendor_name=Open Source
-
-# Disk format to use when copying a volume to image (string
-# value)
-#disk_format=raw
-
-# Default size in GB for volumes created by volumes tests
-# (integer value)
-#volume_size=1
+#volume_size = 1
 
 
 [volume-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Runs Cinder multi-backend test (requires 2 backends)
-# (boolean value)
-#multi_backend=false
-
-# Runs Cinder volumes backup test (boolean value)
-#backup=true
-
-# Runs Cinder volume snapshot test (boolean value)
-#snapshot=true
-
-# A list of enabled volume extensions with a special entry all
-# which indicates every extension is enabled. Empty list
-# indicates all extensions are disabled (list value)
-#api_extensions=all
+# A list of enabled volume extensions with a special entry all which
+# indicates every extension is enabled. Empty list indicates all
+# extensions are disabled (list value)
+#api_extensions = all
 
 # Is the v1 volume API enabled (boolean value)
-#api_v1=true
+#api_v1 = true
 
 # Is the v2 volume API enabled (boolean value)
-#api_v2=true
+#api_v2 = true
 
+# Runs Cinder volumes backup test (boolean value)
+#backup = true
 
+# Runs Cinder multi-backend test (requires 2 backends) (boolean value)
+#multi_backend = false
+
+# Runs Cinder volume snapshot test (boolean value)
+#snapshot = true
diff --git a/openstack-common.conf b/openstack-common.conf
index a9a6b0b..5ae2089 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,6 @@
 [DEFAULT]
 
 # The list of modules to copy from openstack-common
-module=config
 module=install_venv_common
 module=lockutils
 module=log
diff --git a/run_tests.sh b/run_tests.sh
index a12bf46..971f89b 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -103,8 +103,6 @@
       echo "Running flake8 without virtual env may miss OpenStack HACKING detection" >&2
   fi
   ${wrapper} flake8
-  export MODULEPATH=tempest.common.generate_sample_tempest
-  ${wrapper} tools/config/check_uptodate.sh
 }
 
 if [ $never_venv -eq 0 ]
diff --git a/setup.cfg b/setup.cfg
index 2e25ace..d010ccc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -24,6 +24,9 @@
     run-tempest-stress = tempest.cmd.run_stress:main
     tempest-cleanup = tempest.cmd.cleanup:main
 
+oslo.config.opts =
+    tempest.config = tempest.config:list_opts
+
 [build_sphinx]
 all_files = 1
 build-dir = doc/build
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index 90be820..8d6a7fc 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -68,7 +68,10 @@
         # NOTE(maurosr): since this exercises the same code snippet, we do it
         # only for reserve action
         body = {"reserve": "None"}
-        self.assertRaises(exceptions.NotFound,
+        # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
+        # change the error code to BadRequest, both exceptions should be
+        # accepted by tempest
+        self.assertRaises((exceptions.NotFound, exceptions.BadRequest),
                           self.client.reserve_fixed_ip,
                           "my.invalid.ip", body)
 
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 6c93d33..6507ce1 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -44,9 +44,9 @@
 
         # TODO(andreaf) WE should care also for the alt_manager here
         # but only once client lazy load in the manager is done
-        os = cls.get_client_manager()
+        cls.os = cls.get_client_manager()
+        cls.multi_user = cls.check_multi_user()
 
-        cls.os = os
         cls.build_interval = CONF.compute.build_interval
         cls.build_timeout = CONF.compute.build_timeout
         cls.ssh_user = CONF.compute.ssh_user
@@ -58,7 +58,6 @@
         cls.image_ssh_password = CONF.compute.image_ssh_password
         cls.servers = []
         cls.images = []
-        cls.multi_user = cls.get_multi_user()
         cls.security_groups = []
         cls.server_groups = []
 
@@ -118,30 +117,17 @@
             raise exceptions.InvalidConfiguration(message=msg)
 
     @classmethod
-    def get_multi_user(cls):
-        multi_user = True
-        # Determine if there are two regular users that can be
-        # used in testing. If the test cases are allowed to create
-        # users (config.compute.allow_tenant_isolation is true,
-        # then we allow multi-user.
-        if not CONF.compute.allow_tenant_isolation:
-            user1 = CONF.identity.username
-            user2 = CONF.identity.alt_username
-            if not user2 or user1 == user2:
-                multi_user = False
-            else:
-                user2_password = CONF.identity.alt_password
-                user2_tenant_name = CONF.identity.alt_tenant_name
-                if not user2_password or not user2_tenant_name:
-                    msg = ("Alternate user specified but not alternate "
-                           "tenant or password: alt_tenant_name=%s "
-                           "alt_password=%s"
-                           % (user2_tenant_name, user2_password))
-                    raise exceptions.InvalidConfiguration(msg)
-        return multi_user
+    def check_multi_user(cls):
+        # We have a list of accounts now, so just checking if the list is gt 2
+        if not cls.isolated_creds.is_multi_user():
+            msg = "Not enough users available for multi-user testing"
+            raise exceptions.InvalidConfiguration(msg)
+        return True
 
     @classmethod
     def clear_servers(cls):
+        LOG.debug('Clearing servers: %s', ','.join(
+            server['id'] for server in cls.servers))
         for server in cls.servers:
             try:
                 cls.servers_client.delete_server(server['id'])
@@ -181,6 +167,7 @@
 
     @classmethod
     def clear_images(cls):
+        LOG.debug('Clearing images: %s', ','.join(cls.images))
         for image_id in cls.images:
             try:
                 cls.images_client.delete_image(image_id)
@@ -192,6 +179,8 @@
 
     @classmethod
     def clear_security_groups(cls):
+        LOG.debug('Clearing security groups: %s', ','.join(
+            str(sg['id']) for sg in cls.security_groups))
         for sg in cls.security_groups:
             try:
                 resp, body =\
@@ -206,6 +195,7 @@
 
     @classmethod
     def clear_server_groups(cls):
+        LOG.debug('Clearing server groups: %s', ','.join(cls.server_groups))
         for server_group_id in cls.server_groups:
             try:
                 cls.client.delete_server_group(server_group_id)
@@ -390,19 +380,14 @@
     @classmethod
     def resource_setup(cls):
         super(BaseComputeAdminTest, cls).resource_setup()
-        if (CONF.compute.allow_tenant_isolation or
-            cls.force_tenant_isolation is True):
+        try:
             creds = cls.isolated_creds.get_admin_creds()
-            cls.os_adm = clients.Manager(credentials=creds,
-                                         interface=cls._interface)
-        else:
-            try:
-                cls.os_adm = clients.ComputeAdminManager(
-                    interface=cls._interface)
-            except exceptions.InvalidCredentials:
-                msg = ("Missing Compute Admin API credentials "
-                       "in configuration.")
-                raise cls.skipException(msg)
+            cls.os_adm = clients.Manager(
+                credentials=creds, interface=cls._interface)
+        except NotImplementedError:
+            msg = ("Missing Compute Admin API credentials in configuration.")
+            raise cls.skipException(msg)
+
         if cls._api_version == 2:
             cls.availability_zone_admin_client = (
                 cls.os_adm.availability_zone_client)
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index c0b6730..459d78b 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -105,7 +105,11 @@
             raise self.skipException("Not testable in XML")
         # prefix character is:
         # http://www.fileformat.info/info/unicode/char/1F4A9/index.htm
-        utf8_name = data_utils.rand_name(u'\xF0\x9F\x92\xA9')
+
+        # We use a string with 3 byte utf-8 character due to bug
+        # #1370954 in glance which will 500 if mysql is used as the
+        # backend and it attempts to store a 4 byte utf-8 character
+        utf8_name = data_utils.rand_name('\xe2\x82\xa1')
         resp, body = self.client.create_image(self.server_id, utf8_name)
         image_id = data_utils.parse_image_id(resp['location'])
         self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 901c377..4fd5c02 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import six
+
 from tempest.api.compute.security_groups import base
 from tempest import config
 from tempest import test
@@ -27,13 +29,40 @@
         super(SecurityGroupRulesTestJSON, cls).resource_setup()
         cls.client = cls.security_groups_client
         cls.neutron_available = CONF.service_available.neutron
+        cls.ip_protocol = 'tcp'
+        cls.from_port = 22
+        cls.to_port = 22
 
-    @classmethod
-    def setUpClass(self):
-        super(SecurityGroupRulesTestJSON, self).setUpClass()
-        self.ip_protocol = 'tcp'
-        self.from_port = 22
-        self.to_port = 22
+    def setUp(cls):
+        super(SecurityGroupRulesTestJSON, cls).setUp()
+
+        from_port = cls.from_port
+        to_port = cls.to_port
+        group = {}
+        ip_range = {}
+        if cls._interface == 'xml':
+            # NOTE: An XML response is different from the one of JSON
+            # like the following.
+            from_port = six.text_type(from_port)
+            to_port = six.text_type(to_port)
+            group = {'tenant_id': 'None', 'name': 'None'}
+            ip_range = {'cidr': 'None'}
+        cls.expected = {
+            'id': None,
+            'parent_group_id': None,
+            'ip_protocol': cls.ip_protocol,
+            'from_port': from_port,
+            'to_port': to_port,
+            'ip_range': ip_range,
+            'group': group
+        }
+
+    def _check_expected_response(self, actual_rule):
+        for key in self.expected:
+            if key == 'id':
+                continue
+            self.assertEqual(self.expected[key], actual_rule[key],
+                             "Miss-matched key is %s" % key)
 
     @test.attr(type='smoke')
     @test.services('network')
@@ -41,44 +70,68 @@
         # Positive test: Creation of Security Group rule
         # should be successful
         # Creating a Security Group to add rules to it
-        resp, security_group = self.create_security_group()
+        _, security_group = self.create_security_group()
         securitygroup_id = security_group['id']
         # Adding rules to the created Security Group
-        resp, rule = \
+        _, rule = \
             self.client.create_security_group_rule(securitygroup_id,
                                                    self.ip_protocol,
                                                    self.from_port,
                                                    self.to_port)
-        self.addCleanup(self.client.delete_security_group_rule, rule['id'])
-        self.assertEqual(200, resp.status)
+        self.expected['parent_group_id'] = securitygroup_id
+        self.expected['ip_range'] = {'cidr': '0.0.0.0/0'}
+        self._check_expected_response(rule)
 
     @test.attr(type='smoke')
     @test.services('network')
-    def test_security_group_rules_create_with_optional_arguments(self):
+    def test_security_group_rules_create_with_optional_cidr(self):
         # Positive test: Creation of Security Group rule
-        # with optional arguments
+        # with optional argument cidr
         # should be successful
 
-        secgroup1 = None
-        secgroup2 = None
         # Creating a Security Group to add rules to it
-        resp, security_group = self.create_security_group()
-        secgroup1 = security_group['id']
-        # Creating a Security Group so as to assign group_id to the rule
-        resp, security_group = self.create_security_group()
-        secgroup2 = security_group['id']
-        # Adding rules to the created Security Group with optional arguments
-        parent_group_id = secgroup1
+        _, security_group = self.create_security_group()
+        parent_group_id = security_group['id']
+
+        # Adding rules to the created Security Group with optional cidr
         cidr = '10.2.3.124/24'
-        group_id = secgroup2
-        resp, rule = \
+        _, rule = \
             self.client.create_security_group_rule(parent_group_id,
                                                    self.ip_protocol,
                                                    self.from_port,
                                                    self.to_port,
-                                                   cidr=cidr,
+                                                   cidr=cidr)
+        self.expected['parent_group_id'] = parent_group_id
+        self.expected['ip_range'] = {'cidr': cidr}
+        self._check_expected_response(rule)
+
+    @test.attr(type='smoke')
+    @test.services('network')
+    def test_security_group_rules_create_with_optional_group_id(self):
+        # Positive test: Creation of Security Group rule
+        # with optional argument group_id
+        # should be successful
+
+        # Creating a Security Group to add rules to it
+        _, security_group = self.create_security_group()
+        parent_group_id = security_group['id']
+
+        # Creating a Security Group so as to assign group_id to the rule
+        _, security_group = self.create_security_group()
+        group_id = security_group['id']
+        group_name = security_group['name']
+
+        # Adding rules to the created Security Group with optional group_id
+        _, rule = \
+            self.client.create_security_group_rule(parent_group_id,
+                                                   self.ip_protocol,
+                                                   self.from_port,
+                                                   self.to_port,
                                                    group_id=group_id)
-        self.assertEqual(200, resp.status)
+        self.expected['parent_group_id'] = parent_group_id
+        self.expected['group'] = {'tenant_id': self.client.tenant_id,
+                                  'name': group_name}
+        self._check_expected_response(rule)
 
     @test.attr(type='smoke')
     @test.services('network')
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 5df8d82..25dc87d 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -42,6 +42,7 @@
         personality = [{'path': '/test.txt',
                        'contents': base64.b64encode(file_contents)}]
         cls.client = cls.servers_client
+        cls.network_client = cls.os.network_client
         cli_resp = cls.create_test_server(name=cls.name,
                                           meta=cls.meta,
                                           accessIPv4=cls.accessIPv4,
@@ -124,6 +125,40 @@
         self.assertEqual(200, resp.status)
         self.assertIn(server['id'], server_group['members'])
 
+    @testtools.skipUnless(CONF.service_available.neutron,
+                          'Neutron service must be available.')
+    def test_verify_multiple_nics_order(self):
+        # Verify that the networks order given at the server creation is
+        # preserved within the server.
+        name_net1 = data_utils.rand_name(self.__class__.__name__)
+        _, net1 = self.network_client.create_network(name=name_net1)
+        name_net2 = data_utils.rand_name(self.__class__.__name__)
+        _, net2 = self.network_client.create_network(name=name_net2)
+
+        _, subnet1 = self.network_client.create_subnet(
+            network_id=net1['network']['id'],
+            cidr='19.80.0.0/24',
+            ip_version=4)
+        _, subnet2 = self.network_client.create_subnet(
+            network_id=net2['network']['id'],
+            cidr='19.86.0.0/24',
+            ip_version=4)
+
+        networks = [{'uuid': net1['network']['id']},
+                    {'uuid': net2['network']['id']}]
+
+        _, server_multi_nics = self.create_test_server(
+            networks=networks, wait_until='ACTIVE')
+
+        _, addresses = self.client.list_addresses(server_multi_nics['id'])
+
+        expected_addr = ['19.80.0.2', '19.86.0.2']
+
+        addr = [addresses[name_net1][0]['addr'],
+                addresses[name_net2][0]['addr']]
+
+        self.assertEqual(expected_addr, addr)
+
 
 class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
     disk_config = 'AUTO'
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 98fe387..e660f00 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -69,12 +69,12 @@
         resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
                                               flavor=cls.flavor_ref_alt,
                                               wait_until='ACTIVE')
-        if (CONF.service_available.neutron and
-                CONF.compute.allow_tenant_isolation):
-            network = cls.isolated_creds.get_primary_network()
-            cls.fixed_network_name = network['name']
-        else:
-            cls.fixed_network_name = CONF.compute.fixed_network_name
+
+        cls.fixed_network_name = CONF.compute.fixed_network_name
+        if CONF.service_available.neutron:
+            if hasattr(cls.isolated_creds, 'get_primary_network'):
+                network = cls.isolated_creds.get_primary_network()
+                cls.fixed_network_name = network['name']
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @test.attr(type='gate')
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 3aacf2a..b51b46e 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -358,6 +358,25 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     @test.attr(type='gate')
+    def test_get_console_output_with_unlimited_size(self):
+        _, server = self.create_test_server(wait_until='ACTIVE')
+
+        def _check_full_length_console_log():
+            _, output = self.servers_client.get_console_output(server['id'],
+                                                               None)
+            self.assertTrue(output, "Console output was empty.")
+            lines = len(output.split('\n'))
+
+            # NOTE: This test tries to get full length console log, and the
+            # length should be bigger than the one of test_get_console_output.
+            self.assertTrue(lines > 10, "Cannot get enough console log length."
+                                        " (lines: %s)" % lines)
+
+        self.wait_for(_check_full_length_console_log)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
+                          'Console output not supported.')
+    @test.attr(type='gate')
     def test_get_console_output_server_id_in_shutoff_status(self):
         # Positive test:Should be able to GET the console output
         # for a given server_id in SHUTOFF status
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index b86ee06..0349260 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -45,10 +45,7 @@
     def resource_setup(cls):
         super(ServersNegativeTestJSON, cls).resource_setup()
         cls.client = cls.servers_client
-        if CONF.compute.allow_tenant_isolation:
-            cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
-        else:
-            cls.alt_os = clients.AltManager()
+        cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
         cls.alt_client = cls.alt_os.servers_client
         resp, server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 015d9f5..175f008 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -45,12 +45,8 @@
         cls.keypairs_client = cls.os.keypairs_client
         cls.security_client = cls.os.security_groups_client
 
-        if CONF.compute.allow_tenant_isolation:
-            creds = cls.isolated_creds.get_alt_creds()
-            cls.alt_manager = clients.Manager(credentials=creds)
-        else:
-            # Use the alt_XXX credentials in the config file
-            cls.alt_manager = clients.AltManager()
+        creds = cls.isolated_creds.get_alt_creds()
+        cls.alt_manager = clients.Manager(credentials=creds)
 
         cls.alt_client = cls.alt_manager.servers_client
         cls.alt_images_client = cls.alt_manager.images_client
diff --git a/tempest/api/compute/v3/servers/test_list_server_filters.py b/tempest/api/compute/v3/servers/test_list_server_filters.py
index 209d293..73844cf 100644
--- a/tempest/api/compute/v3/servers/test_list_server_filters.py
+++ b/tempest/api/compute/v3/servers/test_list_server_filters.py
@@ -70,12 +70,11 @@
                                               flavor=cls.flavor_ref_alt,
                                               wait_until='ACTIVE')
 
-        if (CONF.service_available.neutron and
-                CONF.compute.allow_tenant_isolation):
-            network = cls.isolated_creds.get_primary_network()
-            cls.fixed_network_name = network['name']
-        else:
-            cls.fixed_network_name = CONF.compute.fixed_network_name
+        cls.fixed_network_name = CONF.compute.fixed_network_name
+        if CONF.service_available.neutron:
+            if hasattr(cls.isolated_creds, 'get_primary_network'):
+                network = cls.isolated_creds.get_primary_network()
+                cls.fixed_network_name = network['name']
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @test.attr(type='gate')
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index 30ac0ac..4b1fe04 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -45,10 +45,7 @@
     def resource_setup(cls):
         super(ServersNegativeV3Test, cls).resource_setup()
         cls.client = cls.servers_client
-        if CONF.compute.allow_tenant_isolation:
-            cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
-        else:
-            cls.alt_os = clients.AltManager()
+        cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
         cls.alt_client = cls.alt_os.servers_v3_client
         resp, server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index a225f12..1e4973b 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -202,7 +202,7 @@
         def _try_wrapper(func, item, **kwargs):
             try:
                 if kwargs:
-                    func(item['id'], kwargs)
+                    func(item['id'], **kwargs)
                 else:
                     func(item['id'])
             except exceptions.NotFound:
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 08767e3..74baba6 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -41,10 +41,7 @@
         if not CONF.service_available.glance:
             skip_msg = ("%s skipped as glance is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
-        if CONF.compute.allow_tenant_isolation:
-            cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
-        else:
-            cls.os = clients.Manager()
+        cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
 
     @classmethod
     def resource_cleanup(cls):
@@ -91,10 +88,7 @@
     @classmethod
     def resource_setup(cls):
         super(BaseV1ImageMembersTest, cls).resource_setup()
-        if CONF.compute.allow_tenant_isolation:
-            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
-        else:
-            cls.os_alt = clients.AltManager()
+        cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
 
         cls.alt_img_cli = cls.os_alt.image_client
         cls.alt_tenant_id = cls.alt_img_cli.tenant_id
@@ -126,11 +120,8 @@
     @classmethod
     def resource_setup(cls):
         super(BaseV2MemberImageTest, cls).resource_setup()
-        if CONF.compute.allow_tenant_isolation:
-            creds = cls.isolated_creds.get_alt_creds()
-            cls.os_alt = clients.Manager(creds)
-        else:
-            cls.os_alt = clients.AltManager()
+        creds = cls.isolated_creds.get_alt_creds()
+        cls.os_alt = clients.Manager(creds)
         cls.os_img_client = cls.os.image_client_v2
         cls.alt_img_client = cls.os_alt.image_client_v2
         cls.alt_tenant_id = cls.alt_img_client.tenant_id
diff --git a/tempest/api/messaging/test_queues.py b/tempest/api/messaging/test_queues.py
index ab099ff..8f9ac20 100644
--- a/tempest/api/messaging/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -32,11 +32,10 @@
     def test_create_queue(self):
         # Create Queue
         queue_name = data_utils.rand_name('test-')
-        resp, body = self.create_queue(queue_name)
+        _, body = self.create_queue(queue_name)
 
         self.addCleanup(self.client.delete_queue, queue_name)
 
-        self.assertEqual('201', resp['status'])
         self.assertEqual('', body)
 
 
@@ -57,30 +56,27 @@
     def test_delete_queue(self):
         # Delete Queue
         queue_name = self.queues.pop()
-        resp, body = self.delete_queue(queue_name)
-        self.assertEqual('204', resp['status'])
+        _, body = self.delete_queue(queue_name)
         self.assertEqual('', body)
 
     @test.attr(type='smoke')
     def test_check_queue_existence(self):
         # Checking Queue Existence
         for queue_name in self.queues:
-            resp, body = self.check_queue_exists(queue_name)
-            self.assertEqual('204', resp['status'])
+            _, body = self.check_queue_exists(queue_name)
             self.assertEqual('', body)
 
     @test.attr(type='smoke')
     def test_check_queue_head(self):
         # Checking Queue Existence by calling HEAD
         for queue_name in self.queues:
-            resp, body = self.check_queue_exists_head(queue_name)
-            self.assertEqual('204', resp['status'])
+            _, body = self.check_queue_exists_head(queue_name)
             self.assertEqual('', body)
 
     @test.attr(type='smoke')
     def test_list_queues(self):
         # Listing queues
-        resp, body = self.list_queues()
+        _, body = self.list_queues()
         self.assertEqual(len(body['queues']), len(self.queues))
         for item in body['queues']:
             self.assertIn(item['name'], self.queues)
@@ -91,7 +87,7 @@
         queue_name = self.queues[data_utils.rand_int_id(0,
                                                         len(self.queues) - 1)]
         # Get Queue Stats for a newly created Queue
-        resp, body = self.get_queue_stats(queue_name)
+        _, body = self.get_queue_stats(queue_name)
         msgs = body['messages']
         for element in ('free', 'claimed', 'total'):
             self.assertEqual(0, msgs[element])
@@ -104,8 +100,7 @@
         queue_name = self.queues[data_utils.rand_int_id(0,
                                                         len(self.queues) - 1)]
         # Check the Queue has no metadata
-        resp, body = self.get_queue_metadata(queue_name)
-        self.assertEqual('200', resp['status'])
+        _, body = self.get_queue_metadata(queue_name)
         self.assertThat(body, matchers.HasLength(0))
         # Create metadata
         key3 = [0, 1, 2, 3, 4]
@@ -116,12 +111,10 @@
         req_body = dict()
         req_body[data_utils.rand_name('key1')] = req_body1
         # Set Queue Metadata
-        resp, body = self.set_queue_metadata(queue_name, req_body)
-        self.assertEqual('204', resp['status'])
+        _, body = self.set_queue_metadata(queue_name, req_body)
         self.assertEqual('', body)
         # Get Queue Metadata
-        resp, body = self.get_queue_metadata(queue_name)
-        self.assertEqual('200', resp['status'])
+        _, body = self.get_queue_metadata(queue_name)
         self.assertThat(body, matchers.Equals(req_body))
 
     @classmethod
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 834c010..91e3e14 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -83,6 +83,7 @@
         cls.fw_rules = []
         cls.fw_policies = []
         cls.ipsecpolicies = []
+        cls.ethertype = "IPv" + str(cls._ip_version)
 
     @classmethod
     def resource_cleanup(cls):
@@ -365,19 +366,15 @@
     @classmethod
     def resource_setup(cls):
         super(BaseAdminNetworkTest, cls).resource_setup()
-        admin_username = CONF.compute_admin.username
-        admin_password = CONF.compute_admin.password
-        admin_tenant = CONF.compute_admin.tenant_name
-        if not (admin_username and admin_password and admin_tenant):
+
+        try:
+            creds = cls.isolated_creds.get_admin_creds()
+            cls.os_adm = clients.Manager(
+                credentials=creds, interface=cls._interface)
+        except NotImplementedError:
             msg = ("Missing Administrative Network API credentials "
                    "in configuration.")
             raise cls.skipException(msg)
-        if (CONF.compute.allow_tenant_isolation or
-            cls.force_tenant_isolation is True):
-            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
-                                         interface=cls._interface)
-        else:
-            cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
         cls.admin_client = cls.os_adm.network_client
 
     @classmethod
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 11588d6..8e2b7f5 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -36,6 +36,8 @@
         List firewall policies
         Create firewall policy
         Update firewall policy
+        Insert firewall rule to policy
+        Remove firewall rule from policy
         Delete firewall policy
         Show firewall policy
         List firewall
@@ -62,6 +64,14 @@
         except exceptions.NotFound:
             pass
 
+    def _try_delete_rule(self, rule_id):
+        # delete rule, if it exists
+        try:
+            self.client.delete_firewall_rule(rule_id)
+        # if rule is not found, this means it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
     def _try_delete_firewall(self, fw_id):
         # delete firewall, if it exists
         try:
@@ -211,6 +221,40 @@
         # Delete firewall
         self.client.delete_firewall(firewall_id)
 
+    @test.attr(type='smoke')
+    def test_insert_remove_firewall_rule_from_policy(self):
+        # Create firewall rule
+        resp, body = self.client.create_firewall_rule(
+            name=data_utils.rand_name("fw-rule"),
+            action="allow",
+            protocol="tcp")
+        fw_rule_id = body['firewall_rule']['id']
+        self.addCleanup(self._try_delete_rule, fw_rule_id)
+        # Create firewall policy
+        _, body = self.client.create_firewall_policy(
+            name=data_utils.rand_name("fw-policy"))
+        fw_policy_id = body['firewall_policy']['id']
+        self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+        # Insert rule to firewall policy
+        self.client.insert_firewall_rule_in_policy(
+            fw_policy_id, fw_rule_id, '', '')
+
+        # Verify insertion of rule in policy
+        self.assertIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+        # Remove rule from the firewall policy
+        self.client.remove_firewall_rule_from_policy(
+            fw_policy_id, fw_rule_id)
+
+        # Verify removal of rule from firewall policy
+        self.assertNotIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+
+    def _get_list_fw_rule_ids(self, fw_policy_id):
+        _, fw_policy = self.client.show_firewall_policy(
+            fw_policy_id)
+        return [ruleid for ruleid in fw_policy['firewall_policy']
+                ['firewall_rules']]
+
 
 class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 9764b4d..e20b58e 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -17,11 +17,15 @@
 
 from tempest.api.network import base_security_groups as base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import test
 
+CONF = config.CONF
+
 
 class SecGroupTest(base.BaseSecGroupTest):
     _interface = 'json'
+    _tenant_network_cidr = CONF.network.tenant_network_cidr
 
     @classmethod
     def resource_setup(cls):
@@ -30,6 +34,40 @@
             msg = "security-group extension not enabled."
             raise cls.skipException(msg)
 
+    def _create_verify_security_group_rule(self, sg_id, direction,
+                                           ethertype, protocol,
+                                           port_range_min,
+                                           port_range_max,
+                                           remote_group_id=None,
+                                           remote_ip_prefix=None):
+        # Create Security Group rule with the input params and validate
+        # that SG rule is created with the same parameters.
+        resp, rule_create_body = self.client.create_security_group_rule(
+            security_group_id=sg_id,
+            direction=direction,
+            ethertype=ethertype,
+            protocol=protocol,
+            port_range_min=port_range_min,
+            port_range_max=port_range_max,
+            remote_group_id=remote_group_id,
+            remote_ip_prefix=remote_ip_prefix
+        )
+
+        sec_group_rule = rule_create_body['security_group_rule']
+        self.addCleanup(self._delete_security_group_rule,
+                        sec_group_rule['id'])
+
+        expected = {'direction': direction, 'protocol': protocol,
+                    'ethertype': ethertype, 'port_range_min': port_range_min,
+                    'port_range_max': port_range_max,
+                    'remote_group_id': remote_group_id,
+                    'remote_ip_prefix': remote_ip_prefix}
+        for key, value in six.iteritems(expected):
+            self.assertEqual(value, sec_group_rule[key],
+                             "Field %s of the created security group "
+                             "rule does not match with %s." %
+                             (key, value))
+
     @test.attr(type='smoke')
     def test_list_security_groups(self):
         # Verify the that security group belonging to tenant exist in list
@@ -80,7 +118,8 @@
             _, rule_create_body = self.client.create_security_group_rule(
                 security_group_id=group_create_body['security_group']['id'],
                 protocol=protocol,
-                direction='ingress'
+                direction='ingress',
+                ethertype=self.ethertype
             )
 
             # Show details of the created security rule
@@ -102,30 +141,93 @@
 
     @test.attr(type='smoke')
     def test_create_security_group_rule_with_additional_args(self):
-        # Verify creating security group rule with the following
-        # arguments works: "protocol": "tcp", "port_range_max": 77,
-        # "port_range_min": 77, "direction":"ingress".
-        group_create_body, _ = self._create_security_group()
+        """Verify security group rule with additional arguments works.
 
+        direction:ingress, ethertype:[IPv4/IPv6],
+        protocol:tcp, port_range_min:77, port_range_max:77
+        """
+        group_create_body, _ = self._create_security_group()
+        sg_id = group_create_body['security_group']['id']
         direction = 'ingress'
         protocol = 'tcp'
         port_range_min = 77
         port_range_max = 77
-        _, rule_create_body = self.client.create_security_group_rule(
-            security_group_id=group_create_body['security_group']['id'],
-            direction=direction,
-            protocol=protocol,
-            port_range_min=port_range_min,
-            port_range_max=port_range_max
-        )
+        self._create_verify_security_group_rule(sg_id, direction,
+                                                self.ethertype, protocol,
+                                                port_range_min,
+                                                port_range_max)
 
-        sec_group_rule = rule_create_body['security_group_rule']
+    @test.attr(type='smoke')
+    def test_create_security_group_rule_with_icmp_type_code(self):
+        """Verify security group rule for icmp protocol works.
 
-        self.assertEqual(sec_group_rule['direction'], direction)
-        self.assertEqual(sec_group_rule['protocol'], protocol)
-        self.assertEqual(int(sec_group_rule['port_range_min']), port_range_min)
-        self.assertEqual(int(sec_group_rule['port_range_max']), port_range_max)
+        Specify icmp type (port_range_min) and icmp code
+        (port_range_max) with different values. A seperate testcase
+        is added for icmp protocol as icmp validation would be
+        different from tcp/udp.
+        """
+        group_create_body, _ = self._create_security_group()
+
+        sg_id = group_create_body['security_group']['id']
+        direction = 'ingress'
+        protocol = 'icmp'
+        icmp_type_codes = [(3, 2), (2, 3), (3, 0), (2, None)]
+        for icmp_type, icmp_code in icmp_type_codes:
+            self._create_verify_security_group_rule(sg_id, direction,
+                                                    self.ethertype, protocol,
+                                                    icmp_type, icmp_code)
+
+    @test.attr(type='smoke')
+    def test_create_security_group_rule_with_remote_group_id(self):
+        # Verify creating security group rule with remote_group_id works
+        sg1_body, _ = self._create_security_group()
+        sg2_body, _ = self._create_security_group()
+
+        sg_id = sg1_body['security_group']['id']
+        direction = 'ingress'
+        protocol = 'udp'
+        port_range_min = 50
+        port_range_max = 55
+        remote_id = sg2_body['security_group']['id']
+        self._create_verify_security_group_rule(sg_id, direction,
+                                                self.ethertype, protocol,
+                                                port_range_min,
+                                                port_range_max,
+                                                remote_group_id=remote_id)
+
+    @test.attr(type='smoke')
+    def test_create_security_group_rule_with_remote_ip_prefix(self):
+        # Verify creating security group rule with remote_ip_prefix works
+        sg1_body, _ = self._create_security_group()
+
+        sg_id = sg1_body['security_group']['id']
+        direction = 'ingress'
+        protocol = 'tcp'
+        port_range_min = 76
+        port_range_max = 77
+        ip_prefix = self._tenant_network_cidr
+        self._create_verify_security_group_rule(sg_id, direction,
+                                                self.ethertype, protocol,
+                                                port_range_min,
+                                                port_range_max,
+                                                remote_ip_prefix=ip_prefix)
 
 
 class SecGroupTestXML(SecGroupTest):
     _interface = 'xml'
+
+
+class SecGroupIPv6Test(SecGroupTest):
+    _ip_version = 6
+    _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+
+    @classmethod
+    def resource_setup(cls):
+        if not CONF.network_feature_enabled.ipv6:
+            skip_msg = "IPv6 Tests are disabled."
+            raise cls.skipException(skip_msg)
+        super(SecGroupIPv6Test, cls).resource_setup()
+
+
+class SecGroupIPv6TestXML(SecGroupIPv6Test):
+    _interface = 'xml'
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index 9c6c267..97e4cb7 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -16,12 +16,16 @@
 import uuid
 
 from tempest.api.network import base_security_groups as base
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class NegativeSecGroupTest(base.BaseSecGroupTest):
     _interface = 'json'
+    _tenant_network_cidr = CONF.network.tenant_network_cidr
 
     @classmethod
     def resource_setup(cls):
@@ -60,23 +64,87 @@
         self.assertRaises(
             exceptions.BadRequest, self.client.create_security_group_rule,
             security_group_id=group_create_body['security_group']['id'],
-            protocol=pname, direction='ingress')
+            protocol=pname, direction='ingress', ethertype=self.ethertype)
+
+    @test.attr(type=['negative', 'gate'])
+    def test_create_security_group_rule_with_bad_remote_ip_prefix(self):
+        group_create_body, _ = self._create_security_group()
+
+        # Create rule with bad remote_ip_prefix
+        prefix = ['192.168.1./24', '192.168.1.1/33', 'bad_prefix', '256']
+        for remote_ip_prefix in prefix:
+            self.assertRaises(
+                exceptions.BadRequest, self.client.create_security_group_rule,
+                security_group_id=group_create_body['security_group']['id'],
+                protocol='tcp', direction='ingress', ethertype=self.ethertype,
+                remote_ip_prefix=remote_ip_prefix)
+
+    @test.attr(type=['negative', 'gate'])
+    def test_create_security_group_rule_with_non_existent_remote_groupid(self):
+        group_create_body, _ = self._create_security_group()
+        non_exist_id = str(uuid.uuid4())
+
+        # Create rule with non existent remote_group_id
+        group_ids = ['bad_group_id', non_exist_id]
+        for remote_group_id in group_ids:
+            self.assertRaises(
+                exceptions.NotFound, self.client.create_security_group_rule,
+                security_group_id=group_create_body['security_group']['id'],
+                protocol='tcp', direction='ingress', ethertype=self.ethertype,
+                remote_group_id=remote_group_id)
+
+    @test.attr(type=['negative', 'gate'])
+    def test_create_security_group_rule_with_remote_ip_and_group(self):
+        sg1_body, _ = self._create_security_group()
+        sg2_body, _ = self._create_security_group()
+
+        # Create rule specifying both remote_ip_prefix and remote_group_id
+        prefix = self._tenant_network_cidr
+        self.assertRaises(
+            exceptions.BadRequest, self.client.create_security_group_rule,
+            security_group_id=sg1_body['security_group']['id'],
+            protocol='tcp', direction='ingress',
+            ethertype=self.ethertype, remote_ip_prefix=prefix,
+            remote_group_id=sg2_body['security_group']['id'])
+
+    @test.attr(type=['negative', 'gate'])
+    def test_create_security_group_rule_with_bad_ethertype(self):
+        group_create_body, _ = self._create_security_group()
+
+        # Create rule with bad ethertype
+        ethertype = 'bad_ethertype'
+        self.assertRaises(
+            exceptions.BadRequest, self.client.create_security_group_rule,
+            security_group_id=group_create_body['security_group']['id'],
+            protocol='udp', direction='ingress', ethertype=ethertype)
 
     @test.attr(type=['negative', 'gate'])
     def test_create_security_group_rule_with_invalid_ports(self):
         group_create_body, _ = self._create_security_group()
 
-        # Create rule with invalid ports
+        # Create rule for tcp protocol with invalid ports
         states = [(-16, 80, 'Invalid value for port -16'),
                   (80, 79, 'port_range_min must be <= port_range_max'),
                   (80, 65536, 'Invalid value for port 65536'),
+                  (None, 6, 'port_range_min must be <= port_range_max'),
                   (-16, 65536, 'Invalid value for port')]
         for pmin, pmax, msg in states:
             ex = self.assertRaises(
                 exceptions.BadRequest, self.client.create_security_group_rule,
                 security_group_id=group_create_body['security_group']['id'],
                 protocol='tcp', port_range_min=pmin, port_range_max=pmax,
-                direction='ingress')
+                direction='ingress', ethertype=self.ethertype)
+            self.assertIn(msg, str(ex))
+
+        # Create rule for icmp protocol with invalid ports
+        states = [(1, 256, 'Invalid value for ICMP code'),
+                  (300, 1, 'Invalid value for ICMP type')]
+        for pmin, pmax, msg in states:
+            ex = self.assertRaises(
+                exceptions.BadRequest, self.client.create_security_group_rule,
+                security_group_id=group_create_body['security_group']['id'],
+                protocol='icmp', port_range_min=pmin, port_range_max=pmax,
+                direction='ingress', ethertype=self.ethertype)
             self.assertIn(msg, str(ex))
 
     @test.attr(type=['negative', 'smoke'])
@@ -88,14 +156,54 @@
                           name=name)
 
     @test.attr(type=['negative', 'smoke'])
+    def test_create_duplicate_security_group_rule_fails(self):
+        # Create duplicate security group rule, it should fail.
+        body, _ = self._create_security_group()
+
+        min_port = 66
+        max_port = 67
+        # Create a rule with valid params
+        resp, _ = self.client.create_security_group_rule(
+            security_group_id=body['security_group']['id'],
+            direction='ingress',
+            ethertype=self.ethertype,
+            protocol='tcp',
+            port_range_min=min_port,
+            port_range_max=max_port
+        )
+
+        # Try creating the same security group rule, it should fail
+        self.assertRaises(
+            exceptions.Conflict, self.client.create_security_group_rule,
+            security_group_id=body['security_group']['id'],
+            protocol='tcp', direction='ingress', ethertype=self.ethertype,
+            port_range_min=min_port, port_range_max=max_port)
+
+    @test.attr(type=['negative', 'smoke'])
     def test_create_security_group_rule_with_non_existent_security_group(self):
         # Create security group rules with not existing security group.
         non_existent_sg = str(uuid.uuid4())
         self.assertRaises(exceptions.NotFound,
                           self.client.create_security_group_rule,
                           security_group_id=non_existent_sg,
-                          direction='ingress')
+                          direction='ingress', ethertype=self.ethertype)
 
 
 class NegativeSecGroupTestXML(NegativeSecGroupTest):
     _interface = 'xml'
+
+
+class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
+    _ip_version = 6
+    _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+
+    @classmethod
+    def resource_setup(cls):
+        if not CONF.network_feature_enabled.ipv6:
+            skip_msg = "IPv6 Tests are disabled."
+            raise cls.skipException(skip_msg)
+        super(NegativeSecGroupIPv6Test, cls).resource_setup()
+
+
+class NegativeSecGroupIPv6TestXML(NegativeSecGroupIPv6Test):
+    _interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 6a5fd3d..2e39cf9 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -36,18 +36,12 @@
             raise cls.skipException(skip_msg)
         cls.isolated_creds = isolated_creds.IsolatedCreds(
             cls.__name__, network_resources=cls.network_resources)
-        if CONF.compute.allow_tenant_isolation:
-            # Get isolated creds for normal user
-            cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
-            # Get isolated creds for admin user
-            cls.os_admin = clients.Manager(
-                cls.isolated_creds.get_admin_creds())
-            # Get isolated creds for alt user
-            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
-        else:
-            cls.os = clients.Manager()
-            cls.os_admin = clients.AdminManager()
-            cls.os_alt = clients.AltManager()
+        # Get isolated creds for normal user
+        cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
+        # Get isolated creds for admin user
+        cls.os_admin = clients.Manager(cls.isolated_creds.get_admin_creds())
+        # Get isolated creds for alt user
+        cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
 
         cls.object_client = cls.os.object_client
         cls.container_client = cls.os.container_client
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 3782b70..42e2a2d 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -32,7 +32,6 @@
     @test.attr(type="gate")
     @testtools.skipIf(not CONF.service_available.nova,
                       "Nova is not available.")
-    @test.skip_because(bug="1336755")
     def test_check_nova_notification(self):
 
         resp, body = self.create_server()
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index db2aab5..042cde9 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -21,19 +21,19 @@
 LOG = logging.getLogger(__name__)
 
 
-class VolumeMultiBackendTest(base.BaseVolumeV1AdminTest):
+class VolumeMultiBackendV2Test(base.BaseVolumeAdminTest):
     _interface = "json"
 
     @classmethod
     def resource_setup(cls):
-        super(VolumeMultiBackendTest, cls).resource_setup()
+        super(VolumeMultiBackendV2Test, cls).resource_setup()
         if not CONF.volume_feature_enabled.multi_backend:
             raise cls.skipException("Cinder multi-backend feature disabled")
 
         cls.backend1_name = CONF.volume.backend1_name
         cls.backend2_name = CONF.volume.backend2_name
 
-        cls.volume_client = cls.os_adm.volumes_client
+        cls.name_field = cls.special_fields['name_field']
         cls.volume_type_id_list = []
         cls.volume_id_list_with_prefix = []
         cls.volume_id_list_without_prefix = []
@@ -60,12 +60,13 @@
             extra_specs = {spec_key_with_prefix: backend_name_key}
         else:
             extra_specs = {spec_key_without_prefix: backend_name_key}
-        _, self.type = self.client.create_volume_type(
+        _, self.type = self.volume_types_client.create_volume_type(
             type_name, extra_specs=extra_specs)
         self.volume_type_id_list.append(self.type['id'])
 
-        _, self.volume = self.volume_client.create_volume(
-            size=1, display_name=vol_name, volume_type=type_name)
+        params = {self.name_field: vol_name, 'volume_type': type_name}
+
+        _, self.volume = self.volume_client.create_volume(size=1, **params)
         if with_prefix:
             self.volume_id_list_with_prefix.append(self.volume['id'])
         else:
@@ -90,9 +91,9 @@
         # volume types deletion
         volume_type_id_list = getattr(cls, 'volume_type_id_list', [])
         for volume_type_id in volume_type_id_list:
-            cls.client.delete_volume_type(volume_type_id)
+            cls.volume_types_client.delete_volume_type(volume_type_id)
 
-        super(VolumeMultiBackendTest, cls).resource_cleanup()
+        super(VolumeMultiBackendV2Test, cls).resource_cleanup()
 
     @test.attr(type='smoke')
     def test_backend_name_reporting(self):
@@ -149,3 +150,7 @@
         msg = ("volumes %s and %s were created in the same backend" %
                (volume1_id, volume2_id))
         self.assertNotEqual(volume1_host, volume2_host, msg)
+
+
+class VolumeMultiBackendV1Test(VolumeMultiBackendV2Test):
+    _api_version = 1
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 7e24fa4..ece4299 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -71,7 +71,8 @@
 
     @test.attr(type='gate')
     def test_show_quota_usage(self):
-        _, quota_usage = self.quotas_client.get_quota_usage(self.adm_tenant)
+        _, quota_usage = self.quotas_client.get_quota_usage(
+            self.os_adm.credentials.tenant_name)
         for key in QUOTA_KEYS:
             self.assertIn(key, quota_usage)
             for usage_key in QUOTA_USAGE_KEYS:
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 070d38f..a0792f1 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -29,12 +29,12 @@
         self.volumes_client.wait_for_resource_deletion(volume_id)
 
     def _delete_volume_type(self, volume_type_id):
-        self.client.delete_volume_type(volume_type_id)
+        self.volume_types_client.delete_volume_type(volume_type_id)
 
     @test.attr(type='smoke')
     def test_volume_type_list(self):
         # List Volume types.
-        _, body = self.client.list_volume_types()
+        _, body = self.volume_types_client.list_volume_types()
         self.assertIsInstance(body, list)
 
     @test.attr(type='smoke')
@@ -48,7 +48,7 @@
         extra_specs = {"storage_protocol": proto,
                        "vendor_name": vendor}
         body = {}
-        _, body = self.client.create_volume_type(
+        _, body = self.volume_types_client.create_volume_type(
             vol_type_name,
             extra_specs=extra_specs)
         self.assertIn('id', body)
@@ -87,7 +87,7 @@
         vendor = CONF.volume.vendor_name
         extra_specs = {"storage_protocol": proto,
                        "vendor_name": vendor}
-        _, body = self.client.create_volume_type(
+        _, body = self.volume_types_client.create_volume_type(
             name,
             extra_specs=extra_specs)
         self.assertIn('id', body)
@@ -98,7 +98,8 @@
                          "to the requested name")
         self.assertTrue(body['id'] is not None,
                         "Field volume_type id is empty or not found.")
-        _, fetched_volume_type = self.client.get_volume_type(body['id'])
+        _, fetched_volume_type = self.volume_types_client.get_volume_type(
+            body['id'])
         self.assertEqual(name, fetched_volume_type['name'],
                          'The fetched Volume_type is different '
                          'from the created Volume_type')
@@ -115,11 +116,11 @@
         provider = "LuksEncryptor"
         control_location = "front-end"
         name = data_utils.rand_name("volume-type-")
-        _, body = self.client.create_volume_type(name)
+        _, body = self.volume_types_client.create_volume_type(name)
         self.addCleanup(self._delete_volume_type, body['id'])
 
         # Create encryption type
-        _, encryption_type = self.client.create_encryption_type(
+        _, encryption_type = self.volume_types_client.create_encryption_type(
             body['id'], provider=provider,
             control_location=control_location)
         self.assertIn('volume_type_id', encryption_type)
@@ -131,8 +132,9 @@
                          "equal to the requested control_location")
 
         # Get encryption type
-        _, fetched_encryption_type = self.client.get_encryption_type(
-            encryption_type['volume_type_id'])
+        _, fetched_encryption_type = (
+            self.volume_types_client.get_encryption_type(
+                encryption_type['volume_type_id']))
         self.assertEqual(provider,
                          fetched_encryption_type['provider'],
                          'The fetched encryption_type provider is different '
@@ -143,11 +145,12 @@
                          'different from the created encryption_type')
 
         # Delete encryption type
-        self.client.delete_encryption_type(
+        self.volume_types_client.delete_encryption_type(
             encryption_type['volume_type_id'])
         resource = {"id": encryption_type['volume_type_id'],
                     "type": "encryption-type"}
-        self.client.wait_for_resource_deletion(resource)
-        _, deleted_encryption_type = self.client.get_encryption_type(
-            encryption_type['volume_type_id'])
+        self.volume_types_client.wait_for_resource_deletion(resource)
+        _, deleted_encryption_type = (
+            self.volume_types_client.get_encryption_type(
+                encryption_type['volume_type_id']))
         self.assertEmpty(deleted_encryption_type)
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index 2d72dd2..a154821 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -25,22 +25,23 @@
     def resource_setup(cls):
         super(VolumeTypesExtraSpecsTest, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type-')
-        _, cls.volume_type = cls.client.create_volume_type(vol_type_name)
+        _, cls.volume_type = cls.volume_types_client.create_volume_type(
+            vol_type_name)
 
     @classmethod
     def resource_cleanup(cls):
-        cls.client.delete_volume_type(cls.volume_type['id'])
+        cls.volume_types_client.delete_volume_type(cls.volume_type['id'])
         super(VolumeTypesExtraSpecsTest, cls).resource_cleanup()
 
     @test.attr(type='smoke')
     def test_volume_type_extra_specs_list(self):
         # List Volume types extra specs.
         extra_specs = {"spec1": "val1"}
-        _, body = self.client.create_volume_type_extra_specs(
+        _, body = self.volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'], extra_specs)
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
-        _, body = self.client.list_volume_types_extra_specs(
+        _, body = self.volume_types_client.list_volume_types_extra_specs(
             self.volume_type['id'])
         self.assertIsInstance(body, dict)
         self.assertIn('spec1', body)
@@ -49,13 +50,13 @@
     def test_volume_type_extra_specs_update(self):
         # Update volume type extra specs
         extra_specs = {"spec2": "val1"}
-        _, body = self.client.create_volume_type_extra_specs(
+        _, body = self.volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'], extra_specs)
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
 
         extra_spec = {"spec2": "val2"}
-        _, body = self.client.update_volume_type_extra_specs(
+        _, body = self.volume_types_client.update_volume_type_extra_specs(
             self.volume_type['id'],
             extra_spec.keys()[0],
             extra_spec)
@@ -67,18 +68,18 @@
     def test_volume_type_extra_spec_create_get_delete(self):
         # Create/Get/Delete volume type extra spec.
         extra_specs = {"spec3": "val1"}
-        _, body = self.client.create_volume_type_extra_specs(
+        _, body = self.volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'],
             extra_specs)
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
 
-        self.client.get_volume_type_extra_specs(
+        self.volume_types_client.get_volume_type_extra_specs(
             self.volume_type['id'],
             extra_specs.keys()[0])
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly fetched")
 
-        self.client.delete_volume_type_extra_specs(
+        self.volume_types_client.delete_volume_type_extra_specs(
             self.volume_type['id'],
             extra_specs.keys()[0])
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index f3eee00..8734b16 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -29,104 +29,115 @@
         super(ExtraSpecsNegativeTest, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type-')
         cls.extra_specs = {"spec1": "val1"}
-        _, cls.volume_type = cls.client.create_volume_type(
+        _, cls.volume_type = cls.volume_types_client.create_volume_type(
             vol_type_name,
             extra_specs=cls.extra_specs)
 
     @classmethod
     def resource_cleanup(cls):
-        cls.client.delete_volume_type(cls.volume_type['id'])
+        cls.volume_types_client.delete_volume_type(cls.volume_type['id'])
         super(ExtraSpecsNegativeTest, cls).resource_cleanup()
 
     @test.attr(type='gate')
     def test_update_no_body(self):
         # Should not update volume type extra specs with no body
         extra_spec = {"spec1": "val2"}
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.update_volume_type_extra_specs,
-                          self.volume_type['id'], extra_spec.keys()[0], None)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.update_volume_type_extra_specs,
+            self.volume_type['id'], extra_spec.keys()[0], None)
 
     @test.attr(type='gate')
     def test_update_nonexistent_extra_spec_id(self):
         # Should not update volume type extra specs with nonexistent id.
         extra_spec = {"spec1": "val2"}
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.update_volume_type_extra_specs,
-                          self.volume_type['id'], str(uuid.uuid4()),
-                          extra_spec)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.update_volume_type_extra_specs,
+            self.volume_type['id'], str(uuid.uuid4()),
+            extra_spec)
 
     @test.attr(type='gate')
     def test_update_none_extra_spec_id(self):
         # Should not update volume type extra specs with none id.
         extra_spec = {"spec1": "val2"}
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.update_volume_type_extra_specs,
-                          self.volume_type['id'], None, extra_spec)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.update_volume_type_extra_specs,
+            self.volume_type['id'], None, extra_spec)
 
     @test.attr(type='gate')
     def test_update_multiple_extra_spec(self):
         # Should not update volume type extra specs with multiple specs as
             # body.
         extra_spec = {"spec1": "val2", 'spec2': 'val1'}
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.update_volume_type_extra_specs,
-                          self.volume_type['id'], extra_spec.keys()[0],
-                          extra_spec)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.update_volume_type_extra_specs,
+            self.volume_type['id'], extra_spec.keys()[0],
+            extra_spec)
 
     @test.attr(type='gate')
     def test_create_nonexistent_type_id(self):
         # Should not create volume type extra spec for nonexistent volume
             # type id.
         extra_specs = {"spec2": "val1"}
-        self.assertRaises(exceptions.NotFound,
-                          self.client.create_volume_type_extra_specs,
-                          str(uuid.uuid4()), extra_specs)
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.create_volume_type_extra_specs,
+            str(uuid.uuid4()), extra_specs)
 
     @test.attr(type='gate')
     def test_create_none_body(self):
         # Should not create volume type extra spec for none POST body.
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.create_volume_type_extra_specs,
-                          self.volume_type['id'], None)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.create_volume_type_extra_specs,
+            self.volume_type['id'], None)
 
     @test.attr(type='gate')
     def test_create_invalid_body(self):
         # Should not create volume type extra spec for invalid POST body.
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.create_volume_type_extra_specs,
-                          self.volume_type['id'], ['invalid'])
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.create_volume_type_extra_specs,
+            self.volume_type['id'], ['invalid'])
 
     @test.attr(type='gate')
     def test_delete_nonexistent_volume_type_id(self):
         # Should not delete volume type extra spec for nonexistent
             # type id.
         extra_specs = {"spec1": "val1"}
-        self.assertRaises(exceptions.NotFound,
-                          self.client.delete_volume_type_extra_specs,
-                          str(uuid.uuid4()), extra_specs.keys()[0])
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.delete_volume_type_extra_specs,
+            str(uuid.uuid4()), extra_specs.keys()[0])
 
     @test.attr(type='gate')
     def test_list_nonexistent_volume_type_id(self):
         # Should not list volume type extra spec for nonexistent type id.
-        self.assertRaises(exceptions.NotFound,
-                          self.client.list_volume_types_extra_specs,
-                          str(uuid.uuid4()))
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.list_volume_types_extra_specs,
+            str(uuid.uuid4()))
 
     @test.attr(type='gate')
     def test_get_nonexistent_volume_type_id(self):
         # Should not get volume type extra spec for nonexistent type id.
         extra_specs = {"spec1": "val1"}
-        self.assertRaises(exceptions.NotFound,
-                          self.client.get_volume_type_extra_specs,
-                          str(uuid.uuid4()), extra_specs.keys()[0])
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.get_volume_type_extra_specs,
+            str(uuid.uuid4()), extra_specs.keys()[0])
 
     @test.attr(type='gate')
     def test_get_nonexistent_extra_spec_id(self):
         # Should not get volume type extra spec for nonexistent extra spec
             # id.
-        self.assertRaises(exceptions.NotFound,
-                          self.client.get_volume_type_extra_specs,
-                          self.volume_type['id'], str(uuid.uuid4()))
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.get_volume_type_extra_specs,
+            self.volume_type['id'], str(uuid.uuid4()))
 
 
 class ExtraSpecsNegativeTestXML(ExtraSpecsNegativeTest):
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index c18e15d..a4d6431 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -35,18 +35,20 @@
     def test_create_with_empty_name(self):
         # Should not be able to create volume type with an empty name.
         self.assertRaises(exceptions.BadRequest,
-                          self.client.create_volume_type, '')
+                          self.volume_types_client.create_volume_type, '')
 
     @test.attr(type='gate')
     def test_get_nonexistent_type_id(self):
         # Should not be able to get volume type with nonexistent type id.
-        self.assertRaises(exceptions.NotFound, self.client.get_volume_type,
+        self.assertRaises(exceptions.NotFound,
+                          self.volume_types_client.get_volume_type,
                           str(uuid.uuid4()))
 
     @test.attr(type='gate')
     def test_delete_nonexistent_type_id(self):
         # Should not be able to delete volume type with nonexistent type id.
-        self.assertRaises(exceptions.NotFound, self.client.delete_volume_type,
+        self.assertRaises(exceptions.NotFound,
+                          self.volume_types_client.delete_volume_type,
                           str(uuid.uuid4()))
 
 
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 7f5361d..d78ddb6 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -154,37 +154,34 @@
     @classmethod
     def resource_setup(cls):
         super(BaseVolumeAdminTest, cls).resource_setup()
-        cls.adm_user = CONF.identity.admin_username
-        cls.adm_pass = CONF.identity.admin_password
-        cls.adm_tenant = CONF.identity.admin_tenant_name
-        if not all((cls.adm_user, cls.adm_pass, cls.adm_tenant)):
-            msg = ("Missing Volume Admin API credentials "
-                   "in configuration.")
-            raise cls.skipException(msg)
 
-        if CONF.compute.allow_tenant_isolation:
-            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
-                                         interface=cls._interface)
-        else:
-            cls.os_adm = clients.AdminManager(interface=cls._interface)
+        try:
+            cls.adm_creds = cls.isolated_creds.get_admin_creds()
+            cls.os_adm = clients.Manager(
+                credentials=cls.adm_creds, interface=cls._interface)
+        except NotImplementedError:
+            msg = "Missing Volume Admin API credentials in configuration."
+            raise cls.skipException(msg)
 
         cls.qos_specs = []
 
-        cls.client = cls.os_adm.volume_types_client
         cls.hosts_client = cls.os_adm.volume_hosts_client
         cls.quotas_client = cls.os_adm.volume_quotas_client
-        cls.volume_types_client = cls.os_adm.volume_types_client
 
         if cls._api_version == 1:
             if not CONF.volume_feature_enabled.api_v1:
                 msg = "Volume API v1 is disabled"
                 raise cls.skipException(msg)
             cls.volume_qos_client = cls.os_adm.volume_qos_client
+            cls.volume_types_client = cls.os_adm.volume_types_client
+            cls.volume_client = cls.os_adm.volumes_client
         elif cls._api_version == 2:
             if not CONF.volume_feature_enabled.api_v2:
                 msg = "Volume API v2 is disabled"
                 raise cls.skipException(msg)
             cls.volume_qos_client = cls.os_adm.volume_qos_v2_client
+            cls.volume_types_client = cls.os_adm.volume_types_v2_client
+            cls.volume_client = cls.os_adm.volumes_v2_client
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 90ac9c1..fe217c1 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -30,15 +30,16 @@
         super(VolumesV2TransfersTest, cls).resource_setup()
 
         # Add another tenant to test volume-transfer
-        if CONF.compute.allow_tenant_isolation:
-            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
-                                         interface=cls._interface)
-            # Add admin tenant to cleanup resources
-            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
-                                         interface=cls._interface)
-        else:
-            cls.os_alt = clients.AltManager()
-            cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+        cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
+                                     interface=cls._interface)
+        # Add admin tenant to cleanup resources
+        try:
+            creds = cls.isolated_creds.get_admin_creds()
+            cls.os_adm = clients.Manager(
+                credentials=creds, interface=cls._interface)
+        except NotImplementedError:
+            msg = "Missing Volume Admin API credentials in configuration."
+            raise cls.skipException(msg)
 
         cls.client = cls.volumes_client
         cls.alt_client = cls.os_alt.volumes_client
diff --git a/tempest/api_schema/request/compute/flavors.py b/tempest/api_schema/request/compute/flavors.py
index 8fe9e3a..adaaf27 100644
--- a/tempest/api_schema/request/compute/flavors.py
+++ b/tempest/api_schema/request/compute/flavors.py
@@ -40,14 +40,19 @@
     "json-schema": {
         "type": "object",
         "properties": {
-            "name": {"type": "string"},
-            "ram": {"type": "integer", "minimum": 1},
-            "vcpus": {"type": "integer", "minimum": 1},
-            "disk": {"type": "integer"},
-            "id": {"type": "integer"},
-            "swap": {"type": "integer"},
-            "rxtx_factor": {"type": "integer"},
-            "OS-FLV-EXT-DATA:ephemeral": {"type": "integer"}
+           "flavor": {
+               "type": "object",
+               "properties": {
+                   "name": {"type": "string",
+                            "exclude_tests": ["gen_str_min_length"]},
+                   "ram": {"type": "integer", "minimum": 1},
+                   "vcpus": {"type": "integer", "minimum": 1},
+                   "disk": {"type": "integer"},
+                   "id": {"type": "integer",
+                          "exclude_tests": ["gen_none", "gen_string"]
+                          },
+                   }
+               }
         }
     }
 }
diff --git a/tempest/api_schema/response/messaging/v1/queues.py b/tempest/api_schema/response/messaging/v1/queues.py
index f0b2691..09e0147 100644
--- a/tempest/api_schema/response/messaging/v1/queues.py
+++ b/tempest/api_schema/response/messaging/v1/queues.py
@@ -105,7 +105,9 @@
 
 resource_schema = {
     'type': 'array',
-    'items': 'string',
+    'items': {
+        'type': 'string'
+    },
     'minItems': 1
 }
 
diff --git a/tempest/clients.py b/tempest/clients.py
index 2d07852..cf04929 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -181,6 +181,8 @@
 from tempest.services.volume.json.qos_client import QosSpecsClientJSON
 from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
 from tempest.services.volume.json.volumes_client import VolumesClientJSON
+from tempest.services.volume.v2.json.admin.volume_types_client import \
+    VolumeTypesV2ClientJSON
 from tempest.services.volume.v2.json.availability_zone_client import \
     VolumeV2AvailabilityZoneClientJSON
 from tempest.services.volume.v2.json.extensions_client import \
@@ -332,6 +334,8 @@
             self.volumes_v2_client = VolumesV2ClientJSON(self.auth_provider)
             self.volume_types_client = VolumeTypesClientJSON(
                 self.auth_provider)
+            self.volume_types_v2_client = VolumeTypesV2ClientJSON(
+                self.auth_provider)
             self.identity_client = IdentityClientJSON(self.auth_provider)
             self.identity_v3_client = IdentityV3ClientJSON(
                 self.auth_provider)
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 9ae3dfb..a305e42 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -12,7 +12,6 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations
 # under the License.
-# @author: David Paterson
 
 """
 Utility for cleaning up environment after Tempest run
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index f5f0db3..0d3c6c6 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -13,11 +13,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-'''
-Created on Sep 3, 2014
 
-@author: David_Paterson
-'''
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest import test
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 3c41dd9..0adc7e0 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -20,6 +20,7 @@
 """
 
 import argparse
+import collections
 import datetime
 import os
 import sys
@@ -43,7 +44,7 @@
 
 OPTS = {}
 USERS = {}
-RES = {}
+RES = collections.defaultdict(list)
 
 LOG = None
 
@@ -282,6 +283,8 @@
         If in check mode confirm that the oldest sample available is from
         before the upgrade.
         """
+        if not self.res.get('telemetry'):
+            return
         LOG.info("checking telemetry")
         for server in self.res['servers']:
             client = client_for_user(server['owner'])
@@ -508,6 +511,9 @@
 
 
 def create_volumes(volumes):
+    if not volumes:
+        return
+    LOG.info("Creating volumes")
     for volume in volumes:
         client = client_for_user(volume['owner'])
 
@@ -630,7 +636,7 @@
     global RES
     get_options()
     setup_logging()
-    RES = load_resources(OPTS.resources)
+    RES.update(load_resources(OPTS.resources))
 
     if OPTS.mode == 'create':
         create_resources()
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
index 19ee6d5..2d5e686 100644
--- a/tempest/cmd/resources.yaml
+++ b/tempest/cmd/resources.yaml
@@ -57,3 +57,4 @@
     name: javelin1
     owner: javelin
     file: /etc/hosts
+telemetry: true
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 5046bff..f426e4d 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -122,6 +122,18 @@
                             not CONF.volume_feature_enabled.api_v2, update)
 
 
+def verify_api_versions(os, service, update):
+    verify = {
+        'cinder': verify_cinder_api_versions,
+        'glance': verify_glance_api_versions,
+        'keystone': verify_keystone_api_versions,
+        'nova': verify_nova_api_versions,
+    }
+    if service not in verify:
+        return
+    verify[service](os, update)
+
+
 def get_extension_client(os, service):
     extensions_client = {
         'nova': os.extensions_client,
@@ -337,10 +349,13 @@
         elif service not in services:
             continue
         results = verify_extensions(os, service, results)
-    verify_keystone_api_versions(os, update)
-    verify_glance_api_versions(os, update)
-    verify_nova_api_versions(os, update)
-    verify_cinder_api_versions(os, update)
+
+    # Verify API verisons of all services in the keystone catalog and keystone
+    # itself.
+    services.append('keystone')
+    for service in services:
+        verify_api_versions(os, service, update)
+
     display_results(results, update, replace)
     if update:
         conf_file.close()
diff --git a/tempest/common/accounts.py b/tempest/common/accounts.py
index 7423c17..88e8ced 100644
--- a/tempest/common/accounts.py
+++ b/tempest/common/accounts.py
@@ -58,7 +58,12 @@
         return hash_dict
 
     def is_multi_user(self):
-        return len(self.hash_dict) > 1
+        # Default credentials is not a valid option with locking Account
+        if self.use_default_creds:
+            raise exceptions.InvalidConfiguration(
+                "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
+        else:
+            return len(self.hash_dict) > 1
 
     def _create_hash_file(self, hash_string):
         path = os.path.join(os.path.join(self.accounts_dir, hash_string))
@@ -144,6 +149,21 @@
     to preserve the current behaviour of the serial tempest run.
     """
 
+    def is_multi_user(self):
+        if self.use_default_creds:
+            # Verify that the configured users are valid and distinct
+            try:
+                user = self.get_primary_creds()
+                alt_user = self.get_alt_creds()
+                return user.username != alt_user.username
+            except exceptions.InvalidCredentials as ic:
+                msg = "At least one of the configured credentials is " \
+                      "not valid: %s" % ic.message
+                raise exceptions.InvalidConfiguration(msg)
+        else:
+            # TODO(andreaf) Add a uniqueness check here
+            return len(self.hash_dict) > 1
+
     def get_creds(self, id):
         try:
             # No need to sort the dict as within the same python process
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
index 56d34a5..b09c964 100644
--- a/tempest/common/cred_provider.py
+++ b/tempest/common/cred_provider.py
@@ -1,4 +1,5 @@
-# (c) 2014 Deutsche Telekom AG
+# Copyright (c) 2014 Deutsche Telekom AG
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
 #    Licensed under the Apache License, Version 2.0 (the "License");
 #    you may not use this file except in compliance with the License.
 #    You may obtain a copy of the License at
@@ -43,3 +44,7 @@
     @abc.abstractmethod
     def clear_isolated_creds(self):
         return
+
+    @abc.abstractmethod
+    def is_multi_user(self):
+        return
diff --git a/tempest/common/credentials.py b/tempest/common/credentials.py
new file mode 100644
index 0000000..08b592f
--- /dev/null
+++ b/tempest/common/credentials.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common import accounts
+from tempest.common import isolated_creds
+from tempest import config
+
+CONF = config.CONF
+
+
+# Return the right implementation of CredentialProvider based on config
+# Dropping interface and password, as they are never used anyways
+# TODO(andreaf) Drop them from the CredentialsProvider interface completely
+def get_isolated_credentials(name, network_resources=None,
+                             force_tenant_isolation=False):
+    # If a test requires a new account to work, it can have it via forcing
+    # tenant isolation. A new account will be produced only for that test.
+    # In case admin credentials are not available for the account creation,
+    # the test should be skipped else it would fail.
+    if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
+        return isolated_creds.IsolatedCreds(
+            name=name,
+            network_resources=network_resources)
+    else:
+        if CONF.auth.locking_credentials_provider:
+            # Most params are not relevant for pre-created accounts
+            return accounts.Accounts(name=name)
+        else:
+            return accounts.NotLockingAccounts(name=name)
diff --git a/tempest/common/generate_sample_tempest.py b/tempest/common/generate_sample_tempest.py
deleted file mode 100644
index ceb3394..0000000
--- a/tempest/common/generate_sample_tempest.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import sys
-
-import tempest.config
-from tempest.openstack.common.config import generator
-
-# NOTE(mtreinish): This hack is needed because of how oslo config is used in
-# tempest. Tempest is run from inside a test runner and so we can't rely on the
-# global CONF object being fully populated when we run a test. (test runners
-# don't init every file for running a test) So to get around that we manually
-# load the config file in tempest for each test class to ensure that every
-# config option is set. However, the tool expects the CONF object to be fully
-# populated when it inits all the files in the project. This just works around
-# the issue by manually loading the config file (which may or may not exist)
-# which will populate all the options before running the generator.
-
-
-if __name__ == "__main__":
-    tempest.config.register_opts()
-    generator.generate(sys.argv[1:])
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 0398af1..3f405b1 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -13,6 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
 import functools
 
 import jsonschema
@@ -30,9 +31,11 @@
     return expected_result
 
 
-def generator_type(*args):
+def generator_type(*args, **kwargs):
     def wrapper(func):
         func.types = args
+        for key in kwargs:
+            setattr(func, key, kwargs[key])
         return func
     return wrapper
 
@@ -106,37 +109,74 @@
             jsonschema.Draft4Validator.check_schema(schema['json-schema'])
         jsonschema.validate(schema, self.schema)
 
-    def generate(self, schema):
+    def generate_scenarios(self, schema, path=None):
         """
-        Generate an json dictionary based on a schema.
-        Only one value is mis-generated for each dictionary created.
+        Generates the scenario (all possible test cases) out of the given
+        schema.
 
-        Any generator must return a list of tuples or a single tuple.
-        The values of this tuple are:
-          result[0]: Name of the test
-          result[1]: json schema for the test
-          result[2]: expected result of the test (can be None)
+        :param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
+        :param path: the schema path if the given schema is a subschema
         """
-        LOG.debug("generate_invalid: %s" % schema)
-        schema_type = schema["type"]
-        if isinstance(schema_type, list):
+        schema_type = schema['type']
+        scenarios = []
+
+        if schema_type == 'object':
+            properties = schema["properties"]
+            for attribute, definition in properties.iteritems():
+                current_path = copy.copy(path)
+                if path is not None:
+                    current_path.append(attribute)
+                else:
+                    current_path = [attribute]
+                scenarios.extend(
+                    self.generate_scenarios(definition, current_path))
+        elif isinstance(schema_type, list):
             if "integer" in schema_type:
                 schema_type = "integer"
             else:
                 raise Exception("non-integer list types not supported")
-        result = []
-        if schema_type not in self.types_dict:
-            raise TypeError("generator (%s) doesn't support type: %s"
-                            % (self.__class__.__name__, schema_type))
         for generator in self.types_dict[schema_type]:
-            ret = generator(schema)
-            if ret is not None:
-                if isinstance(ret, list):
-                    result.extend(ret)
-                elif isinstance(ret, tuple):
-                    result.append(ret)
-                else:
-                    raise Exception("generator (%s) returns invalid result: %s"
-                                    % (generator, ret))
-        LOG.debug("result: %s" % result)
-        return result
+            if hasattr(generator, "needed_property"):
+                prop = generator.needed_property
+                if (prop not in schema or
+                    schema[prop] is None or
+                    schema[prop] is False):
+                    continue
+
+            name = generator.__name__
+            if ("exclude_tests" in schema and
+               name in schema["exclude_tests"]):
+                continue
+            if path is not None:
+                name = "%s_%s" % ("_".join(path), name)
+            scenarios.append({
+                "_negtest_name": name,
+                "_negtest_generator": generator,
+                "_negtest_schema": schema,
+                "_negtest_path": path})
+        return scenarios
+
+    def generate_payload(self, test, schema):
+        """
+        Generates one jsonschema out of the given test. It's mandatory to use
+        generate_scenarios before to register all needed variables to the test.
+
+        :param test: A test object (scenario) with all _negtest variables on it
+        :param schema: schema for the test
+        """
+        generator = test._negtest_generator
+        ret = generator(test._negtest_schema)
+        path = copy.copy(test._negtest_path)
+        expected_result = None
+
+        if ret is not None:
+            generator_result = generator(test._negtest_schema)
+            invalid_snippet = generator_result[1]
+            expected_result = generator_result[2]
+            element = path.pop()
+            if len(path) > 0:
+                schema_snip = reduce(dict.get, path, schema)
+                schema_snip[element] = invalid_snippet
+            else:
+                schema[element] = invalid_snippet
+        return expected_result
diff --git a/tempest/common/generator/negative_generator.py b/tempest/common/generator/negative_generator.py
index 4f3d2cd..1d5ed43 100644
--- a/tempest/common/generator/negative_generator.py
+++ b/tempest/common/generator/negative_generator.py
@@ -47,65 +47,32 @@
         if min_length > 0:
             return "x" * (min_length - 1)
 
-    @base.generator_type("string")
+    @base.generator_type("string", needed_property="maxLength")
     @base.simple_generator
     def gen_str_max_length(self, schema):
         max_length = schema.get("maxLength", -1)
-        if max_length > -1:
-            return "x" * (max_length + 1)
+        return "x" * (max_length + 1)
 
-    @base.generator_type("integer")
+    @base.generator_type("integer", needed_property="minimum")
     @base.simple_generator
     def gen_int_min(self, schema):
-        if "minimum" in schema:
-            minimum = schema["minimum"]
-            if "exclusiveMinimum" not in schema:
-                minimum -= 1
-            return minimum
+        minimum = schema["minimum"]
+        if "exclusiveMinimum" not in schema:
+            minimum -= 1
+        return minimum
 
-    @base.generator_type("integer")
+    @base.generator_type("integer", needed_property="maximum")
     @base.simple_generator
     def gen_int_max(self, schema):
-        if "maximum" in schema:
-            maximum = schema["maximum"]
-            if "exclusiveMaximum" not in schema:
-                maximum += 1
-            return maximum
+        maximum = schema["maximum"]
+        if "exclusiveMaximum" not in schema:
+            maximum += 1
+        return maximum
 
-    @base.generator_type("object")
-    def gen_obj_remove_attr(self, schema):
-        invalids = []
-        valid_schema = valid.ValidTestGenerator().generate_valid(schema)
-        required = schema.get("required", [])
-        for r in required:
-            new_valid = copy.deepcopy(valid_schema)
-            del new_valid[r]
-            invalids.append(("gen_obj_remove_attr", new_valid, None))
-        return invalids
-
-    @base.generator_type("object")
+    @base.generator_type("object", needed_property="additionalProperties")
     @base.simple_generator
     def gen_obj_add_attr(self, schema):
         valid_schema = valid.ValidTestGenerator().generate_valid(schema)
-        if not schema.get("additionalProperties", True):
-            new_valid = copy.deepcopy(valid_schema)
-            new_valid["$$$$$$$$$$"] = "xxx"
-            return new_valid
-
-    @base.generator_type("object")
-    def gen_inv_prop_obj(self, schema):
-        LOG.debug("generate_invalid_object: %s" % schema)
-        valid_schema = valid.ValidTestGenerator().generate_valid(schema)
-        invalids = []
-        properties = schema["properties"]
-
-        for k, v in properties.iteritems():
-            for invalid in self.generate(v):
-                LOG.debug(v)
-                new_valid = copy.deepcopy(valid_schema)
-                new_valid[k] = invalid[1]
-                name = "prop_%s_%s" % (k, invalid[0])
-                invalids.append((name, new_valid, invalid[2]))
-
-        LOG.debug("generate_invalid_object return: %s" % invalids)
-        return invalids
+        new_valid = copy.deepcopy(valid_schema)
+        new_valid["$$$$$$$$$$"] = "xxx"
+        return new_valid
diff --git a/tempest/common/generator/valid_generator.py b/tempest/common/generator/valid_generator.py
index 0d7b398..7b80afc 100644
--- a/tempest/common/generator/valid_generator.py
+++ b/tempest/common/generator/valid_generator.py
@@ -54,5 +54,28 @@
             obj[k] = self.generate_valid(v)
         return obj
 
+    def generate(self, schema):
+        schema_type = schema["type"]
+        if isinstance(schema_type, list):
+            if "integer" in schema_type:
+                schema_type = "integer"
+            else:
+                raise Exception("non-integer list types not supported")
+        result = []
+        if schema_type not in self.types_dict:
+            raise TypeError("generator (%s) doesn't support type: %s"
+                            % (self.__class__.__name__, schema_type))
+        for generator in self.types_dict[schema_type]:
+            ret = generator(schema)
+            if ret is not None:
+                if isinstance(ret, list):
+                    result.extend(ret)
+                elif isinstance(ret, tuple):
+                    result.append(ret)
+                else:
+                    raise Exception("generator (%s) returns invalid result: %s"
+                                    % (generator, ret))
+        return result
+
     def generate_valid(self, schema):
         return self.generate(schema)[0][1]
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index b2edfee..2d16107 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -351,3 +351,6 @@
             except exceptions.NotFound:
                 LOG.warn("tenant with name: %s not found for delete" %
                          creds.tenant_name)
+
+    def is_multi_user(self):
+        return True
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 00fe8d2..42e4f56 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -568,9 +568,10 @@
             if self.is_resource_deleted(id):
                 return
             if int(time.time()) - start_time >= self.build_timeout:
-                message = ('Failed to delete resource %(id)s within the '
-                           'required time (%(timeout)s s).' %
-                           {'id': id, 'timeout': self.build_timeout})
+                message = ('Failed to delete %(resource_type)s %(id)s within '
+                           'the required time (%(timeout)s s).' %
+                           {'resource_type': self.resource_type, 'id': id,
+                            'timeout': self.build_timeout})
                 caller = misc_utils.find_test_caller()
                 if caller:
                     message = '(%s) %s' % (caller, message)
@@ -585,6 +586,11 @@
                    % self.__class__.__name__)
         raise NotImplementedError(message)
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'resource'
+
     @classmethod
     def validate_response(cls, schema, resp, body):
         # Only check the response if the status code is a success code
diff --git a/tempest/config.py b/tempest/config.py
index 174a895..d8f22d4 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -20,6 +20,7 @@
 
 from oslo.config import cfg
 
+from tempest.openstack.common import lockutils
 from tempest.openstack.common import log as logging
 
 
@@ -38,9 +39,28 @@
                default='etc/accounts.yaml',
                help="Path to the yaml file that contains the list of "
                     "credentials to use for running tests"),
+    cfg.BoolOpt('allow_tenant_isolation',
+                default=False,
+                help="Allows test cases to create/destroy tenants and "
+                     "users. This option requires that OpenStack Identity "
+                     "API admin credentials are known. If false, isolated "
+                     "test cases and parallel execution, can still be "
+                     "achieved configuring a list of test accounts",
+                deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
+                                                   group='compute'),
+                                 cfg.DeprecatedOpt('allow_tenant_isolation',
+                                                   group='orchestration')]),
+    cfg.BoolOpt('locking_credentials_provider',
+                default=False,
+                help="If set to True it enables the Accounts provider, "
+                     "which locks credentials to allow for parallel execution "
+                     "with pre-provisioned accounts. It can only be used to "
+                     "run tests that ensure credentials cleanup happens. "
+                     "It requires at least `2 * CONC` distinct accounts "
+                     "configured in `test_accounts_file`, with CONC == the "
+                     "number of concurrent test processes."),
 ]
 
-
 identity_group = cfg.OptGroup(name='identity',
                               title="Keystone Configuration Options")
 
@@ -129,12 +149,6 @@
                              title='Compute Service Options')
 
 ComputeGroup = [
-    cfg.BoolOpt('allow_tenant_isolation',
-                default=False,
-                help="Allows test cases to create/destroy tenants and "
-                     "users. This option enables isolated test cases and "
-                     "better parallel execution, but also requires that "
-                     "OpenStack Identity API admin credentials are known."),
     cfg.StrOpt('image_ref',
                help="Valid primary image reference to be used in tests. "
                     "This is a required option"),
@@ -514,7 +528,7 @@
                help='Time in seconds between volume availability checks.'),
     cfg.IntOpt('build_timeout',
                default=300,
-               help='Timeout in seconds to wait for a volume to become'
+               help='Timeout in seconds to wait for a volume to become '
                     'available.'),
     cfg.StrOpt('catalog_type',
                default='volume',
@@ -666,12 +680,6 @@
                choices=['public', 'admin', 'internal',
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the orchestration service."),
-    cfg.BoolOpt('allow_tenant_isolation',
-                default=False,
-                help="Allows test cases to create/destroy tenants and "
-                     "users. This option enables isolated test cases and "
-                     "better parallel execution, but also requires that "
-                     "OpenStack Identity API admin credentials are known."),
     cfg.IntOpt('build_interval',
                default=1,
                help="Time in seconds between build status checks."),
@@ -1020,44 +1028,60 @@
                help="Test generator class for all negative tests"),
 ]
 
+_opts = [
+    (auth_group, AuthGroup),
+    (compute_group, ComputeGroup),
+    (compute_features_group, ComputeFeaturesGroup),
+    (identity_group, IdentityGroup),
+    (identity_feature_group, IdentityFeatureGroup),
+    (image_group, ImageGroup),
+    (image_feature_group, ImageFeaturesGroup),
+    (network_group, NetworkGroup),
+    (network_feature_group, NetworkFeaturesGroup),
+    (messaging_group, MessagingGroup),
+    (volume_group, VolumeGroup),
+    (volume_feature_group, VolumeFeaturesGroup),
+    (object_storage_group, ObjectStoreGroup),
+    (object_storage_feature_group, ObjectStoreFeaturesGroup),
+    (database_group, DatabaseGroup),
+    (orchestration_group, OrchestrationGroup),
+    (telemetry_group, TelemetryGroup),
+    (dashboard_group, DashboardGroup),
+    (data_processing_group, DataProcessingGroup),
+    (boto_group, BotoGroup),
+    (compute_admin_group, ComputeAdminGroup),
+    (stress_group, StressGroup),
+    (scenario_group, ScenarioGroup),
+    (service_available_group, ServiceAvailableGroup),
+    (debug_group, DebugGroup),
+    (baremetal_group, BaremetalGroup),
+    (input_scenario_group, InputScenarioGroup),
+    (cli_group, CLIGroup),
+    (negative_group, NegativeGroup)
+]
+
 
 def register_opts():
-    register_opt_group(cfg.CONF, auth_group, AuthGroup)
-    register_opt_group(cfg.CONF, compute_group, ComputeGroup)
-    register_opt_group(cfg.CONF, compute_features_group,
-                       ComputeFeaturesGroup)
-    register_opt_group(cfg.CONF, identity_group, IdentityGroup)
-    register_opt_group(cfg.CONF, identity_feature_group,
-                       IdentityFeatureGroup)
-    register_opt_group(cfg.CONF, image_group, ImageGroup)
-    register_opt_group(cfg.CONF, image_feature_group, ImageFeaturesGroup)
-    register_opt_group(cfg.CONF, network_group, NetworkGroup)
-    register_opt_group(cfg.CONF, network_feature_group,
-                       NetworkFeaturesGroup)
-    register_opt_group(cfg.CONF, messaging_group, MessagingGroup)
-    register_opt_group(cfg.CONF, volume_group, VolumeGroup)
-    register_opt_group(cfg.CONF, volume_feature_group,
-                       VolumeFeaturesGroup)
-    register_opt_group(cfg.CONF, object_storage_group, ObjectStoreGroup)
-    register_opt_group(cfg.CONF, object_storage_feature_group,
-                       ObjectStoreFeaturesGroup)
-    register_opt_group(cfg.CONF, database_group, DatabaseGroup)
-    register_opt_group(cfg.CONF, orchestration_group, OrchestrationGroup)
-    register_opt_group(cfg.CONF, telemetry_group, TelemetryGroup)
-    register_opt_group(cfg.CONF, dashboard_group, DashboardGroup)
-    register_opt_group(cfg.CONF, data_processing_group,
-                       DataProcessingGroup)
-    register_opt_group(cfg.CONF, boto_group, BotoGroup)
-    register_opt_group(cfg.CONF, compute_admin_group, ComputeAdminGroup)
-    register_opt_group(cfg.CONF, stress_group, StressGroup)
-    register_opt_group(cfg.CONF, scenario_group, ScenarioGroup)
-    register_opt_group(cfg.CONF, service_available_group,
-                       ServiceAvailableGroup)
-    register_opt_group(cfg.CONF, debug_group, DebugGroup)
-    register_opt_group(cfg.CONF, baremetal_group, BaremetalGroup)
-    register_opt_group(cfg.CONF, input_scenario_group, InputScenarioGroup)
-    register_opt_group(cfg.CONF, cli_group, CLIGroup)
-    register_opt_group(cfg.CONF, negative_group, NegativeGroup)
+    for g, o in _opts:
+        register_opt_group(cfg.CONF, g, o)
+
+
+def list_opts():
+    """Return a list of oslo.config options available.
+
+    The purpose of this is to allow tools like the Oslo sample config file
+    generator to discover the options exposed to users.
+    """
+    optlist = [(g.name, o) for g, o in _opts]
+
+    # NOTE(jgrimm): Can be removed once oslo-incubator/oslo changes happen.
+    optlist.append((None, lockutils.util_opts))
+    optlist.append((None, logging.common_cli_opts))
+    optlist.append((None, logging.logging_cli_opts))
+    optlist.append((None, logging.generic_log_opts))
+    optlist.append((None, logging.log_opts))
+
+    return optlist
 
 
 # this should never be called outside of this class
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 55cc89b..6014cff 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -24,7 +24,7 @@
 
 PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
 TEST_DEFINITION = re.compile(r'^\s*def test.*')
-SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
+SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
 SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
 VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
 mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
@@ -58,15 +58,15 @@
                         "T104: Scenario tests require a service decorator")
 
 
-def no_setupclass_for_unit_tests(physical_line, filename):
+def no_setup_teardown_class_for_tests(physical_line, filename):
 
     if pep8.noqa(physical_line):
         return
 
-    if 'tempest/tests' in filename:
-        if SETUPCLASS_DEFINITION.match(physical_line):
+    if 'tempest/test.py' not in filename:
+        if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
             return (physical_line.find('def'),
-                    "T105: setUpClass can not be used with unit tests")
+                    "T105: (setUp|tearDown)Class can not be used in tests")
 
 
 def no_vi_headers(physical_line, line_number, lines):
@@ -119,7 +119,7 @@
 def factory(register):
     register(import_no_clients_in_api)
     register(scenario_tests_need_service_tags)
-    register(no_setupclass_for_unit_tests)
+    register(no_setup_teardown_class_for_tests)
     register(no_vi_headers)
     register(service_tags_not_in_module_path)
     register(no_mutable_default_args)
diff --git a/tempest/openstack/common/config/generator.py b/tempest/openstack/common/config/generator.py
deleted file mode 100644
index 664200e..0000000
--- a/tempest/openstack/common/config/generator.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# Copyright 2012 SINA Corporation
-# Copyright 2014 Cisco Systems, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Extracts OpenStack config option info from module(s)."""
-
-from __future__ import print_function
-
-import argparse
-import imp
-import os
-import re
-import socket
-import sys
-import textwrap
-
-from oslo.config import cfg
-import six
-import stevedore.named
-
-from tempest.openstack.common import gettextutils
-from tempest.openstack.common import importutils
-
-gettextutils.install('tempest')
-
-STROPT = "StrOpt"
-BOOLOPT = "BoolOpt"
-INTOPT = "IntOpt"
-FLOATOPT = "FloatOpt"
-LISTOPT = "ListOpt"
-DICTOPT = "DictOpt"
-MULTISTROPT = "MultiStrOpt"
-
-OPT_TYPES = {
-    STROPT: 'string value',
-    BOOLOPT: 'boolean value',
-    INTOPT: 'integer value',
-    FLOATOPT: 'floating point value',
-    LISTOPT: 'list value',
-    DICTOPT: 'dict value',
-    MULTISTROPT: 'multi valued',
-}
-
-OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
-                                              FLOATOPT, LISTOPT, DICTOPT,
-                                              MULTISTROPT]))
-
-PY_EXT = ".py"
-BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                                       "../../../../"))
-WORDWRAP_WIDTH = 60
-
-
-def raise_extension_exception(extmanager, ep, err):
-    raise
-
-
-def generate(argv):
-    parser = argparse.ArgumentParser(
-        description='generate sample configuration file',
-    )
-    parser.add_argument('-m', dest='modules', action='append')
-    parser.add_argument('-l', dest='libraries', action='append')
-    parser.add_argument('srcfiles', nargs='*')
-    parsed_args = parser.parse_args(argv)
-
-    mods_by_pkg = dict()
-    for filepath in parsed_args.srcfiles:
-        pkg_name = filepath.split(os.sep)[1]
-        mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
-                            os.path.basename(filepath).split('.')[0]])
-        mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
-    # NOTE(lzyeval): place top level modules before packages
-    pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
-    ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
-    pkg_names.extend(ext_names)
-
-    # opts_by_group is a mapping of group name to an options list
-    # The options list is a list of (module, options) tuples
-    opts_by_group = {'DEFAULT': []}
-
-    if parsed_args.modules:
-        for module_name in parsed_args.modules:
-            module = _import_module(module_name)
-            if module:
-                for group, opts in _list_opts(module):
-                    opts_by_group.setdefault(group, []).append((module_name,
-                                                                opts))
-
-    # Look for entry points defined in libraries (or applications) for
-    # option discovery, and include their return values in the output.
-    #
-    # Each entry point should be a function returning an iterable
-    # of pairs with the group name (or None for the default group)
-    # and the list of Opt instances for that group.
-    if parsed_args.libraries:
-        loader = stevedore.named.NamedExtensionManager(
-            'oslo.config.opts',
-            names=list(set(parsed_args.libraries)),
-            invoke_on_load=False,
-            on_load_failure_callback=raise_extension_exception
-        )
-        for ext in loader:
-            for group, opts in ext.plugin():
-                opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
-                opt_list.append((ext.name, opts))
-
-    for pkg_name in pkg_names:
-        mods = mods_by_pkg.get(pkg_name)
-        mods.sort()
-        for mod_str in mods:
-            if mod_str.endswith('.__init__'):
-                mod_str = mod_str[:mod_str.rfind(".")]
-
-            mod_obj = _import_module(mod_str)
-            if not mod_obj:
-                raise RuntimeError("Unable to import module %s" % mod_str)
-
-            for group, opts in _list_opts(mod_obj):
-                opts_by_group.setdefault(group, []).append((mod_str, opts))
-
-    print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
-    for group in sorted(opts_by_group.keys()):
-        print_group_opts(group, opts_by_group[group])
-
-
-def _import_module(mod_str):
-    try:
-        if mod_str.startswith('bin.'):
-            imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
-            return sys.modules[mod_str[4:]]
-        else:
-            return importutils.import_module(mod_str)
-    except Exception as e:
-        sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
-        return None
-
-
-def _is_in_group(opt, group):
-    """Check if opt is in group."""
-    for value in group._opts.values():
-        # NOTE(llu): Temporary workaround for bug #1262148, wait until
-        # newly released oslo.config support '==' operator.
-        if not(value['opt'] != opt):
-            return True
-    return False
-
-
-def _guess_groups(opt):
-    # is it in the DEFAULT group?
-    if _is_in_group(opt, cfg.CONF):
-        return 'DEFAULT'
-
-    # what other groups is it in?
-    for value in cfg.CONF.values():
-        if isinstance(value, cfg.CONF.GroupAttr):
-            if _is_in_group(opt, value._group):
-                return value._group.name
-
-    raise RuntimeError(
-        "Unable to find group for option %s, "
-        "maybe it's defined twice in the same group?"
-        % opt.name
-    )
-
-
-def _list_opts(obj):
-    def is_opt(o):
-        return (isinstance(o, cfg.Opt) and
-                not isinstance(o, cfg.SubCommandOpt))
-
-    opts = list()
-    for attr_str in dir(obj):
-        attr_obj = getattr(obj, attr_str)
-        if is_opt(attr_obj):
-            opts.append(attr_obj)
-        elif (isinstance(attr_obj, list) and
-              all(map(lambda x: is_opt(x), attr_obj))):
-            opts.extend(attr_obj)
-
-    ret = {}
-    for opt in opts:
-        ret.setdefault(_guess_groups(opt), []).append(opt)
-    return ret.items()
-
-
-def print_group_opts(group, opts_by_module):
-    print("[%s]" % group)
-    print('')
-    for mod, opts in opts_by_module:
-        print('#')
-        print('# Options defined in %s' % mod)
-        print('#')
-        print('')
-        for opt in opts:
-            _print_opt(opt)
-        print('')
-
-
-def _get_my_ip():
-    try:
-        csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-        csock.connect(('8.8.8.8', 80))
-        (addr, port) = csock.getsockname()
-        csock.close()
-        return addr
-    except socket.error:
-        return None
-
-
-def _sanitize_default(name, value):
-    """Set up a reasonably sensible default for pybasedir, my_ip and host."""
-    hostname = socket.gethostname()
-    fqdn = socket.getfqdn()
-    if value.startswith(sys.prefix):
-        # NOTE(jd) Don't use os.path.join, because it is likely to think the
-        # second part is an absolute pathname and therefore drop the first
-        # part.
-        value = os.path.normpath("/usr/" + value[len(sys.prefix):])
-    elif value.startswith(BASEDIR):
-        return value.replace(BASEDIR, '/usr/lib/python/site-packages')
-    elif BASEDIR in value:
-        return value.replace(BASEDIR, '')
-    elif value == _get_my_ip():
-        return '10.0.0.1'
-    elif value in (hostname, fqdn):
-        if 'host' in name:
-            return 'tempest'
-    elif value.endswith(hostname):
-        return value.replace(hostname, 'tempest')
-    elif value.endswith(fqdn):
-        return value.replace(fqdn, 'tempest')
-    elif value.strip() != value:
-        return '"%s"' % value
-    return value
-
-
-def _print_opt(opt):
-    opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
-    if not opt_help:
-        sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
-        opt_help = ""
-    try:
-        opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
-    except (ValueError, AttributeError) as err:
-        sys.stderr.write("%s\n" % str(err))
-        sys.exit(1)
-    opt_help = u'%s (%s)' % (opt_help,
-                             OPT_TYPES[opt_type])
-    print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
-    if opt.deprecated_opts:
-        for deprecated_opt in opt.deprecated_opts:
-            if deprecated_opt.name:
-                deprecated_group = (deprecated_opt.group if
-                                    deprecated_opt.group else "DEFAULT")
-                print('# Deprecated group/name - [%s]/%s' %
-                      (deprecated_group,
-                       deprecated_opt.name))
-    try:
-        if opt_default is None:
-            print('#%s=<None>' % opt_name)
-        elif opt_type == STROPT:
-            assert(isinstance(opt_default, six.string_types))
-            print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
-                                                          opt_default)))
-        elif opt_type == BOOLOPT:
-            assert(isinstance(opt_default, bool))
-            print('#%s=%s' % (opt_name, str(opt_default).lower()))
-        elif opt_type == INTOPT:
-            assert(isinstance(opt_default, int) and
-                   not isinstance(opt_default, bool))
-            print('#%s=%s' % (opt_name, opt_default))
-        elif opt_type == FLOATOPT:
-            assert(isinstance(opt_default, float))
-            print('#%s=%s' % (opt_name, opt_default))
-        elif opt_type == LISTOPT:
-            assert(isinstance(opt_default, list))
-            print('#%s=%s' % (opt_name, ','.join(opt_default)))
-        elif opt_type == DICTOPT:
-            assert(isinstance(opt_default, dict))
-            opt_default_strlist = [str(key) + ':' + str(value)
-                                   for (key, value) in opt_default.items()]
-            print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
-        elif opt_type == MULTISTROPT:
-            assert(isinstance(opt_default, list))
-            if not opt_default:
-                opt_default = ['']
-            for default in opt_default:
-                print('#%s=%s' % (opt_name, default))
-        print('')
-    except Exception:
-        sys.stderr.write('Error in option "%s"\n' % opt_name)
-        sys.exit(1)
-
-
-def main():
-    generate(sys.argv[1:])
-
-if __name__ == '__main__':
-    main()
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 79207cd..928a8e1 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -23,8 +23,8 @@
 
 from tempest import auth
 from tempest import clients
+from tempest.common import credentials
 from tempest.common import debug
-from tempest.common import isolated_creds
 from tempest.common.utils import data_utils
 from tempest.common.utils.linux import remote_client
 from tempest import config
@@ -51,8 +51,9 @@
     @classmethod
     def resource_setup(cls):
         super(ScenarioTest, cls).resource_setup()
-        # Using tempest client for isolated credentials as well
-        cls.isolated_creds = isolated_creds.IsolatedCreds(
+        # TODO(andreaf) Some of the code from this resource_setup could be
+        # moved into `BaseTestCase`
+        cls.isolated_creds = credentials.get_isolated_credentials(
             cls.__name__, network_resources=cls.network_resources)
         cls.manager = clients.Manager(
             credentials=cls.credentials()
@@ -79,27 +80,19 @@
         cls.orchestration_client = cls.manager.orchestration_client
 
     @classmethod
-    def _get_credentials(cls, get_creds, ctype):
-        if CONF.compute.allow_tenant_isolation:
-            creds = get_creds()
-        else:
-            creds = auth.get_default_credentials(ctype)
-        return creds
-
-    @classmethod
     def credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_primary_creds,
-                                    'user')
+        return cls.isolated_creds.get_primary_creds()
 
     @classmethod
     def alt_credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_alt_creds,
-                                    'alt_user')
+        return cls.isolated_creds.get_alt_creds()
 
     @classmethod
     def admin_credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_admin_creds,
-                                    'identity_admin')
+        try:
+            return cls.isolated_creds.get_admin_creds()
+        except NotImplementedError:
+            raise cls.skipException('Admin Credentials are not available')
 
     # ## Methods to handle sync and async deletes
 
@@ -382,9 +375,16 @@
             _, servers = self.servers_client.list_servers()
             servers = servers['servers']
         for server in servers:
-            LOG.debug('Console output for %s', server['id'])
-            LOG.debug(self.servers_client.get_console_output(server['id'],
-                                                             length=None))
+            console_output = self.servers_client.get_console_output(
+                server['id'], length=None)
+            LOG.debug('Console output for %s\nhead=%s\nbody=\n%s',
+                      server['id'], console_output[0], console_output[1])
+
+    def _log_net_info(self, exc):
+        # network debug is called as part of ssh init
+        if not isinstance(exc, exceptions.SSHTimeout):
+            LOG.debug('Network information on a devstack host')
+            debug.log_net_debug()
 
     def create_server_snapshot(self, server, name=None):
         # Glance client
@@ -443,7 +443,9 @@
         if wait:
             self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
-    def ping_ip_address(self, ip_address, should_succeed=True):
+    def ping_ip_address(self, ip_address, should_succeed=True,
+                        ping_timeout=None):
+        timeout = ping_timeout or CONF.compute.ping_timeout
         cmd = ['ping', '-c1', '-w1', ip_address]
 
         def ping():
@@ -453,8 +455,7 @@
             proc.communicate()
             return (proc.returncode == 0) == should_succeed
 
-        return tempest.test.call_until_true(
-            ping, CONF.compute.ping_timeout, 1)
+        return tempest.test.call_until_true(ping, timeout, 1)
 
 
 class NetworkScenarioTest(ScenarioTest):
@@ -622,6 +623,23 @@
         self.assertIsNone(floating_ip.port_id)
         return floating_ip
 
+    def check_floating_ip_status(self, floating_ip, status):
+        """Verifies floatingip has reached given status. without waiting
+
+        :param floating_ip: net_resources.DeletableFloatingIp floating IP to
+        to check status
+        :param status: target status
+        :raises: AssertionError if status doesn't match
+        """
+        floating_ip.refresh()
+        self.assertEqual(status, floating_ip.status,
+                         message="FloatingIP: {fp} is at status: {cst}. "
+                                 "failed  to reach status: {st}"
+                         .format(fp=floating_ip, cst=floating_ip.status,
+                                 st=status))
+        LOG.info("FloatingIP: {fp} is at status: {st}"
+                 .format(fp=floating_ip, st=status))
+
     def _check_vm_connectivity(self, ip_address,
                                username=None,
                                private_key=None,
@@ -666,9 +684,7 @@
                 ex_msg += ": " + msg
             LOG.exception(ex_msg)
             self._log_console_output(servers)
-            # network debug is called as part of ssh init
-            if not isinstance(e, exceptions.SSHTimeout):
-                debug.log_net_debug()
+            self._log_net_info(e)
             raise
 
     def _check_tenant_network_connectivity(self, server,
@@ -692,9 +708,7 @@
         except Exception as e:
             LOG.exception('Tenant network connectivity check failed')
             self._log_console_output(servers_for_debug)
-            # network debug is called as part of ssh init
-            if not isinstance(e, exceptions.SSHTimeout):
-                debug.log_net_debug()
+            self._log_net_info(e)
             raise
 
     def _check_remote_connectivity(self, source, dest, should_succeed=True):
@@ -924,8 +938,8 @@
         router_id = CONF.network.public_router_id
         network_id = CONF.network.public_network_id
         if router_id:
-            result = client.show_router(router_id)
-            return net_resources.AttributeDict(**result['router'])
+            resp, body = client.show_router(router_id)
+            return net_resources.AttributeDict(**body['router'])
         elif network_id:
             router = self._create_router(client, tenant_id)
             router.set_gateway(network_id)
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index abda1f8..791c564 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -24,7 +24,6 @@
 
 class CfnInitScenarioTest(manager.OrchestrationScenarioTest):
 
-    @test.skip_because(bug="1374175")
     def setUp(self):
         super(CfnInitScenarioTest, self).setUp()
         if not CONF.orchestration.image_ref:
@@ -84,7 +83,8 @@
         server_ip =\
             server['addresses'][CONF.compute.network_for_ssh][0]['addr']
 
-        if not self.ping_ip_address(server_ip):
+        if not self.ping_ip_address(
+            server_ip, ping_timeout=CONF.orchestration.build_timeout):
             self._log_console_output(servers=[server])
             self.fail(
                 "(CfnInitScenarioTest:test_server_cfn_init) Timed out waiting "
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 58a028f..0c48334 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -87,6 +87,7 @@
         self._check_public_network_connectivity(floating_ip, username,
                                                 private_key, should_connect,
                                                 servers=[self.server])
+        self.check_floating_ip_status(self.floating_ip, 'ACTIVE')
 
     def _wait_server_status_and_check_network_connectivity(self):
         self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index de60745..5d75b64 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -176,16 +176,31 @@
 
     def _check_public_network_connectivity(self, should_connect=True,
                                            msg=None):
+        """Verifies connectivty to a VM via public network and floating IP,
+        and verifies floating IP has resource status is correct.
+
+        Floating IP status is verified after connectivity test in order to
+        not add extra waiting and mask racing conditions.
+
+        :param should_connect: bool. determines if connectivity check is
+        negative or positive.
+        :param msg: Failure message to add to Error message. Should describe
+        the place in the test scenario where the method was called,
+        to indicate the context of the failure
+        """
         ssh_login = CONF.compute.image_ssh_user
         floating_ip, server = self.floating_ip_tuple
         ip_address = floating_ip.floating_ip_address
         private_key = None
+        floatingip_status = 'DOWN'
         if should_connect:
             private_key = self._get_server_key(server)
+            floatingip_status = 'ACTIVE'
         # call the common method in the parent class
         super(TestNetworkBasicOps, self)._check_public_network_connectivity(
             ip_address, ssh_login, private_key, should_connect, msg,
             self.servers)
+        self.check_floating_ip_status(floating_ip, floatingip_status)
 
     def _disassociate_floating_ips(self):
         floating_ip, server = self.floating_ip_tuple
@@ -350,6 +365,8 @@
         VMs are created with unique keypair so connectivity also asserts that
         floating IP is associated with the new VM instead of the old one
 
+        Verifies that floating IP status is updated correctly after each change
+
 
         """
         self._setup_network_and_servers()
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 188dea8..6c36034 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -241,7 +241,11 @@
             'security_groups': security_groups,
             'tenant_id': tenant.creds.tenant_id
         }
-        return self.create_server(name=name, create_kwargs=create_kwargs)
+        server = self.create_server(name=name, create_kwargs=create_kwargs)
+        self.assertEqual(
+            sorted([s['name'] for s in security_groups]),
+            sorted([s['name'] for s in server['security_groups']]))
+        return server
 
     def _create_tenant_servers(self, tenant, num=1):
         for i in range(num):
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 1cb010d..09927d3 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -79,6 +79,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'aggregate'
+
     def add_host(self, aggregate_id, host):
         """Adds a host to the given aggregate."""
         post_body = {
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index 5452f3a..8faf8a7 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -99,6 +99,11 @@
                 return False
         return True
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'flavor'
+
     def set_flavor_extra_spec(self, flavor_id, specs):
         """Sets extra Specs to the mentioned flavor."""
         post_body = json.dumps({'extra_specs': specs})
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
index 8b020d0..0ed1720 100644
--- a/tempest/services/compute/json/floating_ips_client.py
+++ b/tempest/services/compute/json/floating_ips_client.py
@@ -102,6 +102,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'floating_ip'
+
     def list_floating_ip_pools(self, params=None):
         """Returns a list of all floating IP Pools."""
         url = 'os-floating-ip-pools'
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 9877391..079a91e 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -76,7 +76,7 @@
     def get_image(self, image_id):
         """Returns the details of a single image."""
         resp, body = self.get("images/%s" % str(image_id))
-        self.expected_success(200, resp)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         self.validate_response(schema.get_image, resp, body)
         return resp, body['image']
@@ -143,3 +143,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'image'
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 29859a9..733a50b 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -143,3 +143,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'security_group'
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 673e365..309dc5b 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -116,3 +116,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume'
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 960fe05..e11ed45 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -79,6 +79,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'aggregate'
+
     def add_host(self, aggregate_id, host):
         """Adds a host to the given aggregate."""
         post_body = {
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index d1eee5b..fdca6b3 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -99,6 +99,11 @@
                 return False
         return True
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'flavor'
+
     def set_flavor_extra_spec(self, flavor_id, specs):
         """Sets extra Specs to the mentioned flavor."""
         post_body = json.dumps({'extra_specs': specs})
diff --git a/tempest/services/compute/xml/aggregates_client.py b/tempest/services/compute/xml/aggregates_client.py
index 9c2d4aa..47cde65 100644
--- a/tempest/services/compute/xml/aggregates_client.py
+++ b/tempest/services/compute/xml/aggregates_client.py
@@ -94,6 +94,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'aggregate'
+
     def add_host(self, aggregate_id, host):
         """Adds a host to the given aggregate."""
         post_body = xml_utils.Element("add_host", host=host)
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 68ef323..63d1a4d 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -136,6 +136,11 @@
                 return False
         return True
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'flavor'
+
     def set_flavor_extra_spec(self, flavor_id, specs):
         """Sets extra Specs to the mentioned flavor."""
         extra_specs = xml_utils.Element("extra_specs")
diff --git a/tempest/services/compute/xml/floating_ips_client.py b/tempest/services/compute/xml/floating_ips_client.py
index 730e870..84f06ab 100644
--- a/tempest/services/compute/xml/floating_ips_client.py
+++ b/tempest/services/compute/xml/floating_ips_client.py
@@ -108,6 +108,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'floating_ip'
+
     def list_floating_ip_pools(self, params=None):
         """Returns a list of all floating IP Pools."""
         url = 'os-floating-ip-pools'
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index 6b15404..ce37b07 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -127,7 +127,7 @@
     def get_image(self, image_id):
         """Returns the details of a single image."""
         resp, body = self.get("images/%s" % str(image_id))
-        self.expected_success(200, resp)
+        self.expected_success(200, resp.status)
         body = self._parse_image(etree.fromstring(body))
         return resp, body
 
@@ -204,3 +204,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'image'
diff --git a/tempest/services/compute/xml/security_groups_client.py b/tempest/services/compute/xml/security_groups_client.py
index 56ac7ba..e529623 100644
--- a/tempest/services/compute/xml/security_groups_client.py
+++ b/tempest/services/compute/xml/security_groups_client.py
@@ -159,3 +159,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'security_group'
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 156d889..06f1b83 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -349,8 +349,11 @@
             networks = xml_utils.Element("networks")
             server.append(networks)
             for network in kwargs['networks']:
-                s = xml_utils.Element("network", uuid=network['uuid'],
-                                      fixed_ip=network['fixed_ip'])
+                if 'fixed_ip' in network:
+                    s = xml_utils.Element("network", uuid=network['uuid'],
+                                          fixed_ip=network['fixed_ip'])
+                else:
+                    s = xml_utils.Element("network", uuid=network['uuid'])
                 networks.append(s)
 
         if 'meta' in kwargs:
diff --git a/tempest/services/compute/xml/volumes_extensions_client.py b/tempest/services/compute/xml/volumes_extensions_client.py
index e9c5035..da1764a 100644
--- a/tempest/services/compute/xml/volumes_extensions_client.py
+++ b/tempest/services/compute/xml/volumes_extensions_client.py
@@ -141,3 +141,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume'
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index df424ca..5ad416c 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -31,14 +31,11 @@
         self.endpoint_url = 'adminURL'
         self.api_version = "v3"
 
-    def create_user(self, user_name, **kwargs):
+    def create_user(self, user_name, password=None, project_id=None,
+                    email=None, domain_id='default', **kwargs):
         """Creates a user."""
-        password = kwargs.get('password', None)
-        email = kwargs.get('email', None)
         en = kwargs.get('enabled', True)
-        project_id = kwargs.get('project_id', None)
         description = kwargs.get('description', None)
-        domain_id = kwargs.get('domain_id', 'default')
         post_body = {
             'project_id': project_id,
             'description': description,
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 5c43692..fdc0a0a 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -95,14 +95,11 @@
         _json = common.xml_to_json(body)
         return _json
 
-    def create_user(self, user_name, **kwargs):
+    def create_user(self, user_name, password=None, project_id=None,
+                    email=None, domain_id='default', **kwargs):
         """Creates a user."""
-        password = kwargs.get('password', None)
-        email = kwargs.get('email', None)
         en = kwargs.get('enabled', 'true')
-        project_id = kwargs.get('project_id', None)
         description = kwargs.get('description', None)
-        domain_id = kwargs.get('domain_id', 'default')
         post_body = common.Element("user",
                                    xmlns=XMLNS,
                                    name=user_name,
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index bc5e04a..d0d32e5 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -240,6 +240,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'image_meta'
+
     def get_image_membership(self, image_id):
         url = 'v1/images/%s/members' % image_id
         resp, body = self.get(url)
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index c420df9..4865073 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -117,6 +117,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'image'
+
     def store_image(self, image_id, data):
         url = 'v2/images/%s/file' % image_id
         headers = {'Content-Type': 'application/octet-stream'}
diff --git a/tempest/services/messaging/json/messaging_client.py b/tempest/services/messaging/json/messaging_client.py
index 3e82399..2794ea9 100644
--- a/tempest/services/messaging/json/messaging_client.py
+++ b/tempest/services/messaging/json/messaging_client.py
@@ -48,22 +48,26 @@
     def create_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.put(uri, body=None)
+        self.expected_success(201, resp.status)
         return resp, body
 
     def get_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.get(uri)
+        self.expected_success(204, resp.status)
         return resp, body
 
     def head_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.head(uri)
+        self.expected_success(204, resp.status)
         return resp, body
 
     def delete_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
-        resp = self.delete(uri)
-        return resp
+        resp, body = self.delete(uri)
+        self.expected_success(204, resp.status)
+        return resp, body
 
     def get_queue_stats(self, queue_name):
         uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
@@ -75,12 +79,14 @@
     def get_queue_metadata(self, queue_name):
         uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
         resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body
 
     def set_queue_metadata(self, queue_name, rbody):
         uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
         resp, body = self.put(uri, body=json.dumps(rbody))
+        self.expected_success(204, resp.status)
         return resp, body
 
     def post_messages(self, queue_name, rbody):
@@ -90,6 +96,7 @@
                                headers=self.headers)
 
         body = json.loads(body)
+        self.validate_response(queues_schema.post_messages, resp, body)
         return resp, body
 
     def list_messages(self, queue_name):
@@ -126,7 +133,7 @@
 
     def delete_messages(self, message_uri):
         resp, body = self.delete(message_uri)
-        assert(resp['status'] == '204')
+        self.expected_success(204, resp.status)
         return resp, body
 
     def post_claims(self, queue_name, rbody, url_params=False):
@@ -152,10 +159,10 @@
 
     def update_claim(self, claim_uri, rbody):
         resp, body = self.patch(claim_uri, body=json.dumps(rbody))
-        assert(resp['status'] == '204')
+        self.expected_success(204, resp.status)
         return resp, body
 
     def release_claim(self, claim_uri):
         resp, body = self.delete(claim_uri)
-        assert(resp['status'] == '204')
+        self.expected_success(204, resp.status)
         return resp, body
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 16a4f5c..78ed56f 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -320,3 +320,30 @@
         self.rest_client.expected_success(201, resp.status)
         body = json.loads(body)
         return resp, body
+
+    def insert_firewall_rule_in_policy(self, firewall_policy_id,
+                                       firewall_rule_id, insert_after="",
+                                       insert_before=""):
+        uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
+                                                          firewall_policy_id)
+        body = {
+            "firewall_rule_id": firewall_rule_id,
+            "insert_after": insert_after,
+            "insert_before": insert_before
+        }
+        body = json.dumps(body)
+        resp, body = self.put(uri, body)
+        self.rest_client.expected_success(200, resp.status)
+        body = json.loads(body)
+        return resp, body
+
+    def remove_firewall_rule_from_policy(self, firewall_policy_id,
+                                         firewall_rule_id):
+        uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
+                                                          firewall_policy_id)
+        update_body = {"firewall_rule_id": firewall_rule_id}
+        update_body = json.dumps(update_body)
+        resp, body = self.put(uri, update_body)
+        self.rest_client.expected_success(200, resp.status)
+        body = json.loads(body)
+        return resp, body
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
index 2b182d0..a84b4d5 100644
--- a/tempest/services/network/resources.py
+++ b/tempest/services/network/resources.py
@@ -52,7 +52,7 @@
         return
 
     @abc.abstractmethod
-    def show(self):
+    def refresh(self):
         return
 
     def __hash__(self):
@@ -62,7 +62,11 @@
         if not hasattr(self, 'status'):
             return
 
-        return self.client.wait_for_resource_status(self.show, status)
+        def helper_get():
+            self.refresh()
+            return self
+
+        return self.client.wait_for_resource_status(helper_get, status)
 
 
 class DeletableNetwork(DeletableResource):
@@ -116,6 +120,12 @@
 
 class DeletableFloatingIp(DeletableResource):
 
+    def refresh(self, *args, **kwargs):
+        _, result = self.client.show_floatingip(self.id,
+                                                *args,
+                                                **kwargs)
+        super(DeletableFloatingIp, self).update(**result['floatingip'])
+
     def update(self, *args, **kwargs):
         _, result = self.client.update_floatingip(self.id,
                                                   *args,
@@ -172,7 +182,6 @@
     def delete(self):
         self.client.delete_vip(self.id)
 
-    def show(self):
+    def refresh(self):
         _, result = self.client.show_vip(self.id)
-        super(DeletableVip, self).update(**result['vip'])
-        return self
+        super(DeletableVip, self).update(**result['vip'])
\ No newline at end of file
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 17b1f8e..c65390e 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -25,7 +25,8 @@
     # list of plurals used for xml serialization
     PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
                'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
-               'health_monitors', 'vips', 'members', 'allowed_address_pairs']
+               'health_monitors', 'vips', 'members', 'allowed_address_pairs',
+               'firewall_rules']
 
     def get_rest_client(self, auth_provider):
         rc = rest_client.RestClient(auth_provider)
@@ -281,6 +282,27 @@
         body = _root_tag_fetcher_and_xml_to_json_parse(body)
         return resp, body
 
+    def insert_firewall_rule_in_policy(self, firewall_policy_id,
+                                       firewall_rule_id, insert_after="",
+                                       insert_before=""):
+        uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
+                                                          firewall_policy_id)
+        rule = common.Element("firewall_rule_id", firewall_rule_id)
+        resp, body = self.put(uri, str(common.Document(rule)))
+        self.rest_client.expected_success(200, resp.status)
+        body = _root_tag_fetcher_and_xml_to_json_parse(body)
+        return resp, body
+
+    def remove_firewall_rule_from_policy(self, firewall_policy_id,
+                                         firewall_rule_id):
+        uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
+                                                          firewall_policy_id)
+        rule = common.Element("firewall_rule_id", firewall_rule_id)
+        resp, body = self.put(uri, str(common.Document(rule)))
+        self.rest_client.expected_success(200, resp.status)
+        body = _root_tag_fetcher_and_xml_to_json_parse(body)
+        return resp, body
+
 
 def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
     body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/json/admin/volume_types_client.py
index 44ef9fe..eedf880 100644
--- a/tempest/services/volume/json/admin/volume_types_client.py
+++ b/tempest/services/volume/json/admin/volume_types_client.py
@@ -23,13 +23,13 @@
 CONF = config.CONF
 
 
-class VolumeTypesClientJSON(rest_client.RestClient):
+class BaseVolumeTypesClientJSON(rest_client.RestClient):
     """
     Client class to send CRUD Volume Types API requests to a Cinder endpoint
     """
 
     def __init__(self, auth_provider):
-        super(VolumeTypesClientJSON, self).__init__(auth_provider)
+        super(BaseVolumeTypesClientJSON, self).__init__(auth_provider)
 
         self.service = CONF.volume.catalog_type
         self.build_interval = CONF.volume.build_interval
@@ -55,6 +55,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume-type/encryption-type'
+
     def list_volume_types(self, params=None):
         """List all the volume_types created."""
         url = 'types'
@@ -188,3 +193,7 @@
         resp, body = self.delete(
             "/types/%s/encryption/provider" % str(vol_type_id))
         self.expected_success(202, resp.status)
+
+
+class VolumeTypesClientJSON(BaseVolumeTypesClientJSON):
+    """Volume V1 Volume Types client"""
diff --git a/tempest/services/volume/json/qos_client.py b/tempest/services/volume/json/qos_client.py
index 6e0bee9..b647bc7 100644
--- a/tempest/services/volume/json/qos_client.py
+++ b/tempest/services/volume/json/qos_client.py
@@ -38,6 +38,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'qos'
+
     def wait_for_qos_operations(self, qos_id, operation, args=None):
         """Waits for a qos operations to be completed.
 
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index 1f8065b..e9d5b83 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -138,6 +138,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume-snapshot'
+
     def reset_snapshot_status(self, snapshot_id, status):
         """Reset the specified snapshot's status."""
         post_body = json.dumps({'os-reset_status': {"status": status}})
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index c3a9269..cf2837b 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -187,6 +187,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume'
+
     def extend_volume(self, volume_id, extend_size):
         """Extend a volume."""
         post_body = {
diff --git a/tempest/openstack/common/config/__init__.py b/tempest/services/volume/v2/json/admin/__init__.py
similarity index 100%
rename from tempest/openstack/common/config/__init__.py
rename to tempest/services/volume/v2/json/admin/__init__.py
diff --git a/tempest/services/volume/v2/json/admin/volume_types_client.py b/tempest/services/volume/v2/json/admin/volume_types_client.py
new file mode 100644
index 0000000..76fa45d
--- /dev/null
+++ b/tempest/services/volume/v2/json/admin/volume_types_client.py
@@ -0,0 +1,28 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from tempest.services.volume.json.admin import volume_types_client
+
+
+class VolumeTypesV2ClientJSON(volume_types_client.BaseVolumeTypesClientJSON):
+    """
+    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
+    """
+
+    def __init__(self, auth_provider):
+        super(VolumeTypesV2ClientJSON, self).__init__(auth_provider)
+
+        self.api_version = "v2"
diff --git a/tempest/services/volume/xml/admin/volume_types_client.py b/tempest/services/volume/xml/admin/volume_types_client.py
index 679d097..2464016 100644
--- a/tempest/services/volume/xml/admin/volume_types_client.py
+++ b/tempest/services/volume/xml/admin/volume_types_client.py
@@ -205,3 +205,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume-type'
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index ce98eea..fb591b1 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -153,6 +153,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume-snapshot'
+
     def reset_snapshot_status(self, snapshot_id, status):
         """Reset the specified snapshot's status."""
         post_body = common.Element("os-reset_status", status=status)
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index a8c1ae5..0fe7e0d 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -226,6 +226,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume'
+
     def attach_volume(self, volume_id, instance_uuid, mountpoint):
         """Attaches a volume to a given instance on a given mountpoint."""
         post_body = common.Element("os-attach",
diff --git a/tempest/stress/actions/server_create_destroy.py b/tempest/stress/actions/server_create_destroy.py
index 4a9f0d5..34e299d 100644
--- a/tempest/stress/actions/server_create_destroy.py
+++ b/tempest/stress/actions/server_create_destroy.py
@@ -28,15 +28,13 @@
     def run(self):
         name = data_utils.rand_name("instance")
         self.logger.info("creating %s" % name)
-        resp, server = self.manager.servers_client.create_server(
+        _, server = self.manager.servers_client.create_server(
             name, self.image, self.flavor)
         server_id = server['id']
-        assert(resp.status == 202)
         self.manager.servers_client.wait_for_server_status(server_id,
                                                            'ACTIVE')
         self.logger.info("created %s" % server_id)
         self.logger.info("deleting %s" % name)
-        resp, _ = self.manager.servers_client.delete_server(server_id)
-        assert(resp.status == 204)
+        self.manager.servers_client.delete_server(server_id)
         self.manager.servers_client.wait_for_server_termination(server_id)
         self.logger.info("deleted %s" % server_id)
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index d78112c..5bc8cac 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -74,19 +74,17 @@
         self.logger.info("creating %s" % name)
         vm_args = self.vm_extra_args.copy()
         vm_args['security_groups'] = [self.sec_grp]
-        resp, server = servers_client.create_server(name, self.image,
-                                                    self.flavor,
-                                                    **vm_args)
+        _, server = servers_client.create_server(name, self.image,
+                                                 self.flavor,
+                                                 **vm_args)
         self.server_id = server['id']
-        assert(resp.status == 202)
         if self.wait_after_vm_create:
             self.manager.servers_client.wait_for_server_status(self.server_id,
                                                                'ACTIVE')
 
     def _destroy_vm(self):
         self.logger.info("deleting %s" % self.server_id)
-        resp, _ = self.manager.servers_client.delete_server(self.server_id)
-        assert(resp.status == 204)  # It cannot be 204 if I had to wait..
+        self.manager.servers_client.delete_server(self.server_id)
         self.manager.servers_client.wait_for_server_termination(self.server_id)
         self.logger.info("deleted %s" % self.server_id)
 
diff --git a/tempest/stress/actions/volume_attach_delete.py b/tempest/stress/actions/volume_attach_delete.py
index e0238d3..9c4070f 100644
--- a/tempest/stress/actions/volume_attach_delete.py
+++ b/tempest/stress/actions/volume_attach_delete.py
@@ -28,10 +28,9 @@
         # Step 1: create volume
         name = data_utils.rand_name("volume")
         self.logger.info("creating volume: %s" % name)
-        resp, volume = self.manager.volumes_client.create_volume(
+        _, volume = self.manager.volumes_client.create_volume(
             size=1,
             display_name=name)
-        assert(resp.status == 200)
         self.manager.volumes_client.wait_for_volume_status(volume['id'],
                                                            'available')
         self.logger.info("created volume: %s" % volume['id'])
@@ -39,20 +38,18 @@
         # Step 2: create vm instance
         vm_name = data_utils.rand_name("instance")
         self.logger.info("creating vm: %s" % vm_name)
-        resp, server = self.manager.servers_client.create_server(
+        _, server = self.manager.servers_client.create_server(
             vm_name, self.image, self.flavor)
         server_id = server['id']
-        assert(resp.status == 202)
         self.manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
         self.logger.info("created vm %s" % server_id)
 
         # Step 3: attach volume to vm
         self.logger.info("attach volume (%s) to vm %s" %
                          (volume['id'], server_id))
-        resp, body = self.manager.servers_client.attach_volume(server_id,
-                                                               volume['id'],
-                                                               '/dev/vdc')
-        assert(resp.status == 200)
+        self.manager.servers_client.attach_volume(server_id,
+                                                  volume['id'],
+                                                  '/dev/vdc')
         self.manager.volumes_client.wait_for_volume_status(volume['id'],
                                                            'in-use')
         self.logger.info("volume (%s) attached to vm %s" %
@@ -60,14 +57,12 @@
 
         # Step 4: delete vm
         self.logger.info("deleting vm: %s" % vm_name)
-        resp, _ = self.manager.servers_client.delete_server(server_id)
-        assert(resp.status == 204)
+        self.manager.servers_client.delete_server(server_id)
         self.manager.servers_client.wait_for_server_termination(server_id)
         self.logger.info("deleted vm: %s" % server_id)
 
         # Step 5: delete volume
         self.logger.info("deleting volume: %s" % volume['id'])
-        resp, _ = self.manager.volumes_client.delete_volume(volume['id'])
-        assert(resp.status == 202)
+        self.manager.volumes_client.delete_volume(volume['id'])
         self.manager.volumes_client.wait_for_resource_deletion(volume['id'])
         self.logger.info("deleted volume: %s" % volume['id'])
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
index 0d3cb23..a13d890 100644
--- a/tempest/stress/actions/volume_attach_verify.py
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -24,12 +24,10 @@
 
     def _create_keypair(self):
         keyname = data_utils.rand_name("key")
-        resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
-        assert(resp.status == 200)
+        _, self.key = self.manager.keypairs_client.create_keypair(keyname)
 
     def _delete_keypair(self):
-        resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
-        assert(resp.status == 202)
+        self.manager.keypairs_client.delete_keypair(self.key['name'])
 
     def _create_vm(self):
         self.name = name = data_utils.rand_name("instance")
@@ -38,18 +36,16 @@
         vm_args = self.vm_extra_args.copy()
         vm_args['security_groups'] = [self.sec_grp]
         vm_args['key_name'] = self.key['name']
-        resp, server = servers_client.create_server(name, self.image,
-                                                    self.flavor,
-                                                    **vm_args)
+        _, server = servers_client.create_server(name, self.image,
+                                                 self.flavor,
+                                                 **vm_args)
         self.server_id = server['id']
-        assert(resp.status == 202)
         self.manager.servers_client.wait_for_server_status(self.server_id,
                                                            'ACTIVE')
 
     def _destroy_vm(self):
         self.logger.info("deleting server: %s" % self.server_id)
-        resp, _ = self.manager.servers_client.delete_server(self.server_id)
-        assert(resp.status == 204)  # It cannot be 204 if I had to wait..
+        self.manager.servers_client.delete_server(self.server_id)
         self.manager.servers_client.wait_for_server_termination(self.server_id)
         self.logger.info("deleted server: %s" % self.server_id)
 
@@ -81,10 +77,9 @@
         name = data_utils.rand_name("volume")
         self.logger.info("creating volume: %s" % name)
         volumes_client = self.manager.volumes_client
-        resp, self.volume = volumes_client.create_volume(
+        _, self.volume = volumes_client.create_volume(
             size=1,
             display_name=name)
-        assert(resp.status == 200)
         volumes_client.wait_for_volume_status(self.volume['id'],
                                               'available')
         self.logger.info("created volume: %s" % self.volume['id'])
@@ -92,8 +87,7 @@
     def _delete_volume(self):
         self.logger.info("deleting volume: %s" % self.volume['id'])
         volumes_client = self.manager.volumes_client
-        resp, _ = volumes_client.delete_volume(self.volume['id'])
-        assert(resp.status == 202)
+        volumes_client.delete_volume(self.volume['id'])
         volumes_client.wait_for_resource_deletion(self.volume['id'])
         self.logger.info("deleted volume: %s" % self.volume['id'])
 
@@ -193,10 +187,9 @@
         servers_client = self.manager.servers_client
         self.logger.info("attach volume (%s) to vm %s" %
                          (self.volume['id'], self.server_id))
-        resp, body = servers_client.attach_volume(self.server_id,
-                                                  self.volume['id'],
-                                                  self.part_name)
-        assert(resp.status == 200)
+        servers_client.attach_volume(self.server_id,
+                                     self.volume['id'],
+                                     self.part_name)
         self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
                                                            'in-use')
         if self.enable_ssh_verify:
@@ -204,9 +197,8 @@
                              % self.server_id)
             self.part_wait(self.attach_match_count)
 
-        resp, body = servers_client.detach_volume(self.server_id,
-                                                  self.volume['id'])
-        assert(resp.status == 202)
+        servers_client.detach_volume(self.server_id,
+                                     self.volume['id'])
         self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
                                                            'available')
         if self.enable_ssh_verify:
diff --git a/tempest/stress/actions/volume_create_delete.py b/tempest/stress/actions/volume_create_delete.py
index 4e75be0..b1c5bb7 100644
--- a/tempest/stress/actions/volume_create_delete.py
+++ b/tempest/stress/actions/volume_create_delete.py
@@ -20,14 +20,12 @@
         name = data_utils.rand_name("volume")
         self.logger.info("creating %s" % name)
         volumes_client = self.manager.volumes_client
-        resp, volume = volumes_client.create_volume(size=1,
-                                                    display_name=name)
-        assert(resp.status == 200)
+        _, volume = volumes_client.create_volume(size=1,
+                                                 display_name=name)
         vol_id = volume['id']
         volumes_client.wait_for_volume_status(vol_id, 'available')
         self.logger.info("created %s" % volume['id'])
         self.logger.info("deleting %s" % name)
-        resp, _ = volumes_client.delete_volume(vol_id)
-        assert(resp.status == 202)
+        volumes_client.delete_volume(vol_id)
         volumes_client.wait_for_resource_deletion(vol_id)
         self.logger.info("deleted %s" % vol_id)
diff --git a/tempest/test.py b/tempest/test.py
index 4a22b1b..1c6265d 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -29,8 +29,8 @@
 import testtools
 
 from tempest import clients
+from tempest.common import credentials
 import tempest.common.generator.valid_generator as valid
-from tempest.common import isolated_creds
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import importutils
@@ -66,35 +66,6 @@
     return decorator
 
 
-def safe_setup(f):
-    """A decorator used to wrap the setUpClass for cleaning up resources
-       when setUpClass failed.
-
-    Deprecated, see:
-    http://specs.openstack.org/openstack/qa-specs/specs/resource-cleanup.html
-    """
-    @functools.wraps(f)
-    def decorator(cls):
-            try:
-                f(cls)
-            except Exception as se:
-                etype, value, trace = sys.exc_info()
-                if etype is cls.skipException:
-                    LOG.info("setUpClass skipped: %s:" % se)
-                else:
-                    LOG.exception("setUpClass failed: %s" % se)
-                try:
-                    cls.tearDownClass()
-                except Exception as te:
-                    LOG.exception("tearDownClass failed: %s" % te)
-                try:
-                    raise etype(value), None, trace
-                finally:
-                    del trace  # for avoiding circular refs
-
-    return decorator
-
-
 def get_service_list():
     service_list = {
         'compute': CONF.service_available.nova,
@@ -123,7 +94,7 @@
     def decorator(f):
         services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
                     'network', 'identity', 'object_storage', 'dashboard',
-                    'ceilometer', 'data_processing']
+                    'telemetry', 'data_processing']
         for service in args:
             if service not in services:
                 raise exceptions.InvalidServiceTag('%s is not a valid '
@@ -299,7 +270,14 @@
             try:
                 cls.tearDownClass()
             except Exception as te:
-                LOG.exception("tearDownClass failed: %s" % te)
+                tetype, _, _ = sys.exc_info()
+                # TODO(gmann): Till we split-up resource_setup &
+                # resource_cleanup in more structural way, log
+                # AttributeError as info instead of exception.
+                if tetype is AttributeError:
+                    LOG.info("tearDownClass failed: %s" % te)
+                else:
+                    LOG.exception("tearDownClass failed: %s" % te)
             try:
                 raise etype(value), None, trace
             finally:
@@ -362,31 +340,20 @@
         """
         Returns an OpenStack client manager
         """
-        cls.isolated_creds = isolated_creds.IsolatedCreds(
-            cls.__name__, network_resources=cls.network_resources)
-
         force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
-        if CONF.compute.allow_tenant_isolation or force_tenant_isolation:
-            creds = cls.isolated_creds.get_primary_creds()
-            if getattr(cls, '_interface', None):
-                os = clients.Manager(credentials=creds,
-                                     interface=cls._interface,
-                                     service=cls._service)
-            elif interface:
-                os = clients.Manager(credentials=creds,
-                                     interface=interface,
-                                     service=cls._service)
-            else:
-                os = clients.Manager(credentials=creds,
-                                     service=cls._service)
-        else:
-            if getattr(cls, '_interface', None):
-                os = clients.Manager(interface=cls._interface,
-                                     service=cls._service)
-            elif interface:
-                os = clients.Manager(interface=interface, service=cls._service)
-            else:
-                os = clients.Manager(service=cls._service)
+
+        cls.isolated_creds = credentials.get_isolated_credentials(
+            name=cls.__name__, network_resources=cls.network_resources,
+            force_tenant_isolation=force_tenant_isolation,
+        )
+
+        creds = cls.isolated_creds.get_primary_creds()
+        params = dict(credentials=creds, service=cls._service)
+        if getattr(cls, '_interface', None):
+            interface = cls._interface
+        if interface:
+            params['interface'] = interface
+        os = clients.Manager(**params)
         return os
 
     @classmethod
@@ -510,13 +477,9 @@
                                              "expected_result": expected_result
                                              }))
         if schema is not None:
-            for name, schema, expected_result in generator.generate(schema):
-                if (expected_result is None and
-                    "default_result_code" in description):
-                    expected_result = description["default_result_code"]
-                scenario_list.append((name,
-                                      {"schema": schema,
-                                       "expected_result": expected_result}))
+            for scenario in generator.generate_scenarios(schema):
+                scenario_list.append((scenario['_negtest_name'],
+                                      scenario))
         LOG.debug(scenario_list)
         return scenario_list
 
@@ -546,8 +509,14 @@
         """
         LOG.info("Executing %s" % description["name"])
         LOG.debug(description)
+        generator = importutils.import_class(
+            CONF.negative.test_generator)()
+        schema = description.get("json-schema", None)
         method = description["http-method"]
         url = description["url"]
+        expected_result = None
+        if "default_result_code" in description:
+            expected_result = description["default_result_code"]
 
         resources = [self.get_resource(r) for
                      r in description.get("resources", [])]
@@ -557,13 +526,19 @@
             # entry (see get_resource).
             # We just send a valid json-schema with it
             valid_schema = None
-            schema = description.get("json-schema", None)
             if schema:
                 valid_schema = \
                     valid.ValidTestGenerator().generate_valid(schema)
             new_url, body = self._http_arguments(valid_schema, url, method)
-        elif hasattr(self, "schema"):
-            new_url, body = self._http_arguments(self.schema, url, method)
+        elif hasattr(self, "_negtest_name"):
+            schema_under_test = \
+                valid.ValidTestGenerator().generate_valid(schema)
+            local_expected_result = \
+                generator.generate_payload(self, schema_under_test)
+            if local_expected_result is not None:
+                expected_result = local_expected_result
+            new_url, body = \
+                self._http_arguments(schema_under_test, url, method)
         else:
             raise Exception("testscenarios are not active. Please make sure "
                             "that your test runner supports the load_tests "
@@ -575,7 +550,7 @@
             client = self.client
         resp, resp_body = client.send_request(method, new_url,
                                               resources, body=body)
-        self._check_negative_response(resp.status, resp_body)
+        self._check_negative_response(expected_result, resp.status, resp_body)
 
     def _http_arguments(self, json_dict, url, method):
         LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
@@ -586,8 +561,7 @@
         else:
             return url, json.dumps(json_dict)
 
-    def _check_negative_response(self, result, body):
-        expected_result = getattr(self, "expected_result", None)
+    def _check_negative_response(self, expected_result, result, body):
         self.assertTrue(result >= 400 and result < 500 and result != 413,
                         "Expected client error, got %s:%s" %
                         (result, body))
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index a28684e..6679c79 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -86,6 +86,24 @@
         self.assertIn('v2.0', versions)
         self.assertIn('v3.0', versions)
 
+    def test_verify_api_versions(self):
+        api_services = ['cinder', 'glance', 'keystone', 'nova']
+        fake_os = mock.MagicMock()
+        for svc in api_services:
+            m = 'verify_%s_api_versions' % svc
+            with mock.patch.object(verify_tempest_config, m) as verify_mock:
+                verify_tempest_config.verify_api_versions(fake_os, svc, True)
+                verify_mock.assert_called_once_with(fake_os, True)
+
+    def test_verify_api_versions_not_implemented(self):
+        api_services = ['cinder', 'glance', 'keystone', 'nova']
+        fake_os = mock.MagicMock()
+        for svc in api_services:
+            m = 'verify_%s_api_versions' % svc
+            with mock.patch.object(verify_tempest_config, m) as verify_mock:
+                verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
+                self.assertFalse(verify_mock.called)
+
     def test_verify_keystone_api_versions_no_v3(self):
         self.useFixture(mockpatch.PatchObject(
             verify_tempest_config, '_get_unversioned_endpoint',
diff --git a/tempest/tests/common/utils/test_misc.py b/tempest/tests/common/utils/test_misc.py
index aee9805..554027f 100644
--- a/tempest/tests/common/utils/test_misc.py
+++ b/tempest/tests/common/utils/test_misc.py
@@ -82,7 +82,7 @@
         self.assertEqual(':tearDown', tearDown())
 
     def test_find_test_caller_teardown_class(self):
-        def tearDownClass(cls):
+        def tearDownClass(cls):  # noqa
             return misc.find_test_caller()
         self.assertEqual('TestMisc:tearDownClass',
                          tearDownClass(self.__class__))
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
index dddd083..fb1da43 100644
--- a/tempest/tests/negative/test_negative_auto_test.py
+++ b/tempest/tests/negative/test_negative_auto_test.py
@@ -43,9 +43,9 @@
     def _check_prop_entries(self, result, entry):
         entries = [a for a in result if entry in a[0]]
         self.assertIsNotNone(entries)
-        self.assertIs(len(entries), 2)
+        self.assertGreater(len(entries), 1)
         for entry in entries:
-            self.assertIsNotNone(entry[1]['schema'])
+            self.assertIsNotNone(entry[1]['_negtest_name'])
 
     def _check_resource_entries(self, result, entry):
         entries = [a for a in result if entry in a[0]]
@@ -57,12 +57,11 @@
     def test_generate_scenario(self):
         scenarios = test.NegativeAutoTest.\
             generate_scenario(self.fake_input_desc)
-
         self.assertIsInstance(scenarios, list)
         for scenario in scenarios:
             self.assertIsInstance(scenario, tuple)
             self.assertIsInstance(scenario[0], str)
             self.assertIsInstance(scenario[1], dict)
-        self._check_prop_entries(scenarios, "prop_minRam")
-        self._check_prop_entries(scenarios, "prop_minDisk")
+        self._check_prop_entries(scenarios, "minRam")
+        self._check_prop_entries(scenarios, "minDisk")
         self._check_resource_entries(scenarios, "inv_res")
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index a7af619..2fa6933 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
 import jsonschema
 import mock
 
@@ -86,15 +88,6 @@
 class BaseNegativeGenerator(object):
     types = ['string', 'integer', 'object']
 
-    fake_input_str = {"type": "string",
-                      "minLength": 2,
-                      "maxLength": 8,
-                      'results': {'gen_int': 404}}
-
-    fake_input_int = {"type": "integer",
-                      "maximum": 255,
-                      "minimum": 1}
-
     fake_input_obj = {"type": "object",
                       "properties": {"minRam": {"type": "integer"},
                                      "diskName": {"type": "string"},
@@ -106,31 +99,21 @@
         "type": "not_defined"
     }
 
-    def _validate_result(self, data):
-        self.assertTrue(isinstance(data, list))
-        for t in data:
-            self.assertIsInstance(t, tuple)
-            self.assertEqual(3, len(t))
-            self.assertIsInstance(t[0], str)
+    class fake_test_class(object):
+        def __init__(self, scenario):
+            for k, v in scenario.iteritems():
+                setattr(self, k, v)
 
-    def test_generate_string(self):
-        result = self.generator.generate(self.fake_input_str)
-        self._validate_result(result)
-
-    def test_generate_integer(self):
-        result = self.generator.generate(self.fake_input_int)
-        self._validate_result(result)
-
-    def test_generate_obj(self):
-        result = self.generator.generate(self.fake_input_obj)
-        self._validate_result(result)
+    def _validate_result(self, valid_schema, invalid_schema):
+        for k, v in valid_schema.iteritems():
+            self.assertTrue(k in invalid_schema)
 
     def test_generator_mandatory_functions(self):
         for data_type in self.types:
             self.assertIn(data_type, self.generator.types_dict)
 
     def test_generate_with_unknown_type(self):
-        self.assertRaises(TypeError, self.generator.generate,
+        self.assertRaises(TypeError, self.generator.generate_payload,
                           self.unknown_type_schema)
 
 
@@ -151,3 +134,16 @@
     def setUp(self):
         super(TestNegativeNegativeGenerator, self).setUp()
         self.generator = negative_generator.NegativeTestGenerator()
+
+    def test_generate_obj(self):
+        schema = self.fake_input_obj
+        scenarios = self.generator.generate_scenarios(schema)
+        for scenario in scenarios:
+            test = self.fake_test_class(scenario)
+            valid_schema = \
+                valid_generator.ValidTestGenerator().generate_valid(schema)
+            schema_under_test = copy.copy(valid_schema)
+            expected_result = \
+                self.generator.generate_payload(test, schema_under_test)
+            self.assertEqual(expected_result, None)
+            self._validate_result(valid_schema, schema_under_test)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 12104ec..32cefd0 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -97,6 +97,28 @@
                           self._test_services_helper, 'compute',
                           'volume')
 
+    def test_services_list(self):
+        service_list = test.get_service_list()
+        for service in service_list:
+            try:
+                self._test_services_helper(service)
+            except exceptions.InvalidServiceTag:
+                self.fail('%s is not listed in the valid service tag list'
+                          % service)
+            except KeyError:
+                # NOTE(mtreinish): This condition is to test for a entry in
+                # the outer decorator list but not in the service_list dict.
+                # However, because we're looping over the service_list dict
+                # it's unlikely we'll trigger this. So manual review is still
+                # need for the list in the outer decorator.
+                self.fail('%s is in the list of valid service tags but there '
+                          'is no corresponding entry in the dict returned from'
+                          ' get_service_list()' % service)
+            except testtools.TestCase.skipException:
+                # Test didn't raise an exception because of an incorrect list
+                # entry so move onto the next entry
+                continue
+
 
 class TestStressDecorator(BaseDecoratorsTest):
     def _test_stresstest_helper(self, expected_frequency='process',
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 37ad18e..6857461 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -47,13 +47,27 @@
     just assertTrue if the check is expected to fail and assertFalse if it
     should pass.
     """
-    def test_no_setupclass_for_unit_tests(self):
-        self.assertTrue(checks.no_setupclass_for_unit_tests(
+    def test_no_setup_teardown_class_for_tests(self):
+        self.assertTrue(checks.no_setup_teardown_class_for_tests(
             "  def setUpClass(cls):", './tempest/tests/fake_test.py'))
-        self.assertIsNone(checks.no_setupclass_for_unit_tests(
+        self.assertIsNone(checks.no_setup_teardown_class_for_tests(
             "  def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
-        self.assertFalse(checks.no_setupclass_for_unit_tests(
+        self.assertTrue(checks.no_setup_teardown_class_for_tests(
             "  def setUpClass(cls):", './tempest/api/fake_test.py'))
+        self.assertTrue(checks.no_setup_teardown_class_for_tests(
+            "  def setUpClass(cls):", './tempest/scenario/fake_test.py'))
+        self.assertFalse(checks.no_setup_teardown_class_for_tests(
+            "  def setUpClass(cls):", './tempest/test.py'))
+        self.assertTrue(checks.no_setup_teardown_class_for_tests(
+            "  def tearDownClass(cls):", './tempest/tests/fake_test.py'))
+        self.assertIsNone(checks.no_setup_teardown_class_for_tests(
+            "  def tearDownClass(cls): # noqa", './tempest/tests/fake_test.py'))
+        self.assertTrue(checks.no_setup_teardown_class_for_tests(
+            "  def tearDownClass(cls):", './tempest/api/fake_test.py'))
+        self.assertTrue(checks.no_setup_teardown_class_for_tests(
+            "  def tearDownClass(cls):", './tempest/scenario/fake_test.py'))
+        self.assertFalse(checks.no_setup_teardown_class_for_tests(
+            "  def tearDownClass(cls):", './tempest/test.py'))
 
     def test_import_no_clients_in_api(self):
         for client in checks.PYTHON_CLIENTS:
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 3496dce..62073bd 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -498,7 +498,10 @@
         def _volume_state():
             volume.update(validate=True)
             try:
-                if volume.status != "available":
+                # NOTE(gmann): Make sure volume is attached.
+                # Checking status as 'not "available"' is not enough to make
+                # sure volume is attached as it can be in "error" state
+                if volume.status == "in-use":
                     volume.detach(force=True)
             except BaseException:
                 LOG.exception("Failed to detach volume %s" % volume)
diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh
index 0f0d77e..7b08695 100755
--- a/tools/config/check_uptodate.sh
+++ b/tools/config/check_uptodate.sh
@@ -15,7 +15,7 @@
 TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
 trap "rm -rf $TEMPDIR" EXIT
 
-tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
+oslo-config-generator --config-file tools/config/config-generator.tempest.conf --output-file ${TEMPDIR}/${CFGFILE_NAME}
 if [ $? != 0 ]
 then
     exit 1
@@ -24,6 +24,6 @@
 if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
 then
    echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
-   echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
+   echo "${0##*/}: Please run tox -egenconfig."
    exit 1
 fi
diff --git a/tools/config/config-generator.tempest.conf b/tools/config/config-generator.tempest.conf
new file mode 100644
index 0000000..e5a02f8
--- /dev/null
+++ b/tools/config/config-generator.tempest.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+output_file = etc/tempest.conf.sample
+namespace = tempest.config
diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh
deleted file mode 100755
index d22b2f0..0000000
--- a/tools/config/generate_sample.sh
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/env bash
-
-# Generate sample configuration for your project.
-#
-# Aside from the command line flags, it also respects a config file which
-# should be named oslo.config.generator.rc and be placed in the same directory.
-#
-# You can then export the following variables:
-# TEMPEST_CONFIG_GENERATOR_EXTRA_MODULES: list of modules to interrogate for options.
-# TEMPEST_CONFIG_GENERATOR_EXTRA_LIBRARIES: list of libraries to discover.
-# TEMPEST_CONFIG_GENERATOR_EXCLUDED_FILES: list of files to remove from automatic listing.
-
-print_hint() {
-    echo "Try \`${0##*/} --help' for more information." >&2
-}
-
-PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
-                 --long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
-
-if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
-
-eval set -- "$PARSED_OPTIONS"
-
-while true; do
-    case "$1" in
-        -h|--help)
-            echo "${0##*/} [options]"
-            echo ""
-            echo "options:"
-            echo "-h, --help                show brief help"
-            echo "-b, --base-dir=DIR        project base directory"
-            echo "-p, --package-name=NAME   project package name"
-            echo "-o, --output-dir=DIR      file output directory"
-            echo "-m, --module=MOD          extra python module to interrogate for options"
-            echo "-l, --library=LIB         extra library that registers options for discovery"
-            exit 0
-            ;;
-        -b|--base-dir)
-            shift
-            BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
-            shift
-            ;;
-        -p|--package-name)
-            shift
-            PACKAGENAME=`echo $1`
-            shift
-            ;;
-        -o|--output-dir)
-            shift
-            OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
-            shift
-            ;;
-        -m|--module)
-            shift
-            MODULES="$MODULES -m $1"
-            shift
-            ;;
-        -l|--library)
-            shift
-            LIBRARIES="$LIBRARIES -l $1"
-            shift
-            ;;
-        --)
-            break
-            ;;
-    esac
-done
-
-BASEDIR=${BASEDIR:-`pwd`}
-if ! [ -d $BASEDIR ]
-then
-    echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
-elif [[ $BASEDIR != /* ]]
-then
-    BASEDIR=$(cd "$BASEDIR" && pwd)
-fi
-
-PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)}
-TARGETDIR=$BASEDIR/$PACKAGENAME
-if ! [ -d $TARGETDIR ]
-then
-    echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
-fi
-
-OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
-# NOTE(bnemec): Some projects put their sample config in etc/,
-#               some in etc/$PACKAGENAME/
-if [ -d $OUTPUTDIR/$PACKAGENAME ]
-then
-    OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
-elif ! [ -d $OUTPUTDIR ]
-then
-    echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
-    exit 1
-fi
-
-BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
-find $TARGETDIR -type f -name "*.pyc" -delete
-FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
-        -exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
-
-RC_FILE="`dirname $0`/oslo.config.generator.rc"
-if test -r "$RC_FILE"
-then
-    source "$RC_FILE"
-fi
-
-for filename in ${TEMPEST_CONFIG_GENERATOR_EXCLUDED_FILES}; do
-    FILES="${FILES[@]/$filename/}"
-done
-
-for mod in ${TEMPEST_CONFIG_GENERATOR_EXTRA_MODULES}; do
-    MODULES="$MODULES -m $mod"
-done
-
-for lib in ${TEMPEST_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
-    LIBRARIES="$LIBRARIES -l $lib"
-done
-
-export EVENTLET_NO_GREENDNS=yes
-
-OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
-[ "$OS_VARS" ] && eval "unset \$OS_VARS"
-DEFAULT_MODULEPATH=tempest.openstack.common.config.generator
-MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
-OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
-python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
-if [ $? != 0 ]
-then
-    echo "Can not generate $OUTPUTFILE"
-    exit 1
-fi
-
-# Hook to allow projects to append custom config file snippets
-CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
-for CONCAT_FILE in $CONCAT_FILES; do
-    cat $CONCAT_FILE >> $OUTPUTFILE
-done
diff --git a/tools/config/oslo.config.generator.rc b/tools/config/oslo.config.generator.rc
deleted file mode 100644
index 303e156..0000000
--- a/tools/config/oslo.config.generator.rc
+++ /dev/null
@@ -1 +0,0 @@
-MODULEPATH=tempest.common.generate_sample_tempest
diff --git a/tox.ini b/tox.ini
index cab59a8..c07dbbf 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,79 +3,81 @@
 minversion = 1.6
 skipsdist = True
 
-[testenv]
+[tempestenv]
+sitepackages = True
 setenv = VIRTUAL_ENV={envdir}
          OS_TEST_PATH=./tempest/test_discover
+deps = -r{toxinidir}/requirements.txt
+
+[testenv]
+setenv = VIRTUAL_ENV={envdir}
+         OS_TEST_PATH=./tempest/tests
 usedevelop = True
 install_command = pip install -U {opts} {packages}
-whitelist_externals = bash
+whitelist_externals = *
+deps = -r{toxinidir}/requirements.txt
+       -r{toxinidir}/test-requirements.txt
+commands = bash tools/pretty_tox.sh '{posargs}'
 
-
-[testenv:py26]
-setenv = OS_TEST_PATH=./tempest/tests
-commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
-
-[testenv:py33]
-setenv = OS_TEST_PATH=./tempest/tests
-commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
-
-[testenv:py34]
-setenv = OS_TEST_PATH=./tempest/tests
-         PYTHONHASHSEED=0
-commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
-
-[testenv:py27]
-setenv = OS_TEST_PATH=./tempest/tests
-commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+[testenv:genconfig]
+commands = oslo-config-generator --config-file tools/config/config-generator.tempest.conf
 
 [testenv:cover]
 setenv = OS_TEST_PATH=./tempest/tests
 commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}'
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
 
 [testenv:all]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 commands =
   bash tools/pretty_tox.sh '{posargs}'
 
 [testenv:full]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
 commands =
   bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
 
 [testenv:full-serial]
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
 commands =
   bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
 
-[testenv:testr-full]
-sitepackages = True
-commands =
-  bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
-
 [testenv:heat-slow]
-sitepackages = True
-setenv = OS_TEST_TIMEOUT=1200
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+         OS_TEST_TIMEOUT=1200
+deps = {[tempestenv]deps}
 # The regex below is used to select heat api/scenario tests tagged as slow.
 commands =
   bash tools/pretty_tox.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
 
 [testenv:large-ops]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 commands =
   python setup.py testr --slowest --testr-args='tempest.scenario.test_large_ops {posargs}'
 
 [testenv:smoke]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 commands =
    bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])((smoke)|(^tempest\.scenario)) {posargs}'
 
 [testenv:smoke-serial]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 # This is still serial because neutron doesn't work with parallel. See:
 # https://bugs.launchpad.net/tempest/+bug/1216076 so the neutron smoke
 # job would fail if we moved it to parallel.
@@ -83,19 +85,17 @@
    bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])((smoke)|(^tempest\.scenario)) {posargs}'
 
 [testenv:stress]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 commands =
-    run-tempest-stress -a -d 3600 -S
+    run-tempest-stress '{posargs}'
 
 [testenv:venv]
 commands = {posargs}
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
 
 [testenv:docs]
 commands = python setup.py build_sphinx {posargs}
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
 
 [testenv:pep8]
 setenv = PYTHONHASHSEED=0
@@ -103,9 +103,6 @@
    flake8 {posargs}
    {toxinidir}/tools/config/check_uptodate.sh
 
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
-
 [hacking]
 local-check-factory = tempest.hacking.checks.factory
 import_exceptions = tempest.services