Merge "Fixed a comment on dirty logs list"
diff --git a/HACKING.rst b/HACKING.rst
index 29d5bf4..fd63d64 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -259,16 +259,16 @@
 docstrings for the workflow in each test methods can be used instead. A good
 example of this would be::
 
-    class TestVolumeBootPattern(manager.OfficialClientTest):
-    """
-    This test case attempts to reproduce the following steps:
+    class TestVolumeBootPattern(manager.ScenarioTest):
+        """
+        This test case attempts to reproduce the following steps:
 
-     * Create in Cinder some bootable volume importing a Glance image
-     * Boot an instance from the bootable volume
-     * Write content to the volume
-     * Delete an instance and Boot a new instance from the volume
-     * Check written content in the instance
-     * Create a volume snapshot while the instance is running
-     * Boot an additional instance from the new snapshot based volume
-     * Check written content in the instance booted from snapshot
-    """
+         * Create in Cinder some bootable volume importing a Glance image
+         * Boot an instance from the bootable volume
+         * Write content to the volume
+         * Delete an instance and Boot a new instance from the volume
+         * Check written content in the instance
+         * Create a volume snapshot while the instance is running
+         * Boot an additional instance from the new snapshot based volume
+         * Check written content in the instance booted from snapshot
+        """
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index dfcbaba..9a9952d 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,1172 +1,1117 @@
 [DEFAULT]
 
 #
-# Options defined in tempest.openstack.common.lockutils
+# From tempest.config
 #
 
 # Whether to disable inter-process locks (boolean value)
-#disable_process_locking=false
+#disable_process_locking = false
 
 # Directory to use for lock files. (string value)
-#lock_path=<None>
-
+#lock_path = <None>
 
 #
-# Options defined in tempest.openstack.common.log
+# From tempest.config
 #
 
-# Print debugging output (set logging level to DEBUG instead
-# of default WARNING level). (boolean value)
-#debug=false
+# Print debugging output (set logging level to DEBUG instead of
+# default WARNING level). (boolean value)
+#debug = false
 
-# Print more verbose output (set logging level to INFO instead
-# of default WARNING level). (boolean value)
-#verbose=false
+# Print more verbose output (set logging level to INFO instead of
+# default WARNING level). (boolean value)
+#verbose = false
 
-# Log output to standard error. (boolean value)
-#use_stderr=true
+#
+# From tempest.config
+#
 
-# Format string to use for log messages with context. (string
-# value)
-#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
-
-# Format string to use for log messages without context.
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
 # (string value)
-#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-
-# Data to append to log format when level is DEBUG. (string
-# value)
-#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
-
-# Prefix each line of exception output with this format.
-# (string value)
-#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
-
-# List of logger=LEVEL pairs. (list value)
-#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN
-
-# Enables or disables publication of error events. (boolean
-# value)
-#publish_errors=false
-
-# Enables or disables fatal status of deprecations. (boolean
-# value)
-#fatal_deprecations=false
-
-# The format for an instance that is passed with the log
-# message. (string value)
-#instance_format="[instance: %(uuid)s] "
-
-# The format for an instance UUID that is passed with the log
-# message. (string value)
-#instance_uuid_format="[instance: %(uuid)s] "
-
-# The name of a logging configuration file. This file is
-# appended to any existing logging configuration files. For
-# details about logging configuration files, see the Python
-# logging module documentation. (string value)
 # Deprecated group/name - [DEFAULT]/log_config
-#log_config_append=<None>
+#log_config_append = <None>
 
-# DEPRECATED. A logging.Formatter log message format string
-# which may use any of the available logging.LogRecord
-# attributes. This option is deprecated.  Please use
-# logging_context_format_string and
-# logging_default_format_string instead. (string value)
-#log_format=<None>
+# Format string for %%(asctime)s in log records. Default: %(default)s
+# . (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
 
-# Format string for %%(asctime)s in log records. Default:
-# %(default)s . (string value)
-#log_date_format=%Y-%m-%d %H:%M:%S
-
-# (Optional) Name of log file to output to. If no default is
-# set, logging will go to stdout. (string value)
-# Deprecated group/name - [DEFAULT]/logfile
-#log_file=<None>
-
-# (Optional) The base directory used for relative --log-file
-# paths. (string value)
+# (Optional) The base directory used for relative --log-file paths.
+# (string value)
 # Deprecated group/name - [DEFAULT]/logdir
-#log_dir=<None>
+#log_dir = <None>
 
-# Use syslog for logging. Existing syslog format is DEPRECATED
-# during I, and will change in J to honor RFC5424. (boolean
-# value)
-#use_syslog=false
+# (Optional) Name of log file to output to. If no default is set,
+# logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
 
-# (Optional) Enables or disables syslog rfc5424 format for
-# logging. If enabled, prefixes the MSG part of the syslog
-# message with APP-NAME (RFC5424). The format without the APP-
-# NAME is deprecated in I, and will be removed in J. (boolean
-# value)
-#use_syslog_rfc_format=false
+# DEPRECATED. A logging.Formatter log message format string which may
+# use any of the available logging.LogRecord attributes. This option
+# is deprecated.  Please use logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format = <None>
 
 # Syslog facility to receive log lines. (string value)
-#syslog_log_facility=LOG_USER
+#syslog_log_facility = LOG_USER
+
+# Use syslog for logging. Existing syslog format is DEPRECATED during
+# I, and will change in J to honor RFC5424. (boolean value)
+#use_syslog = false
+
+# (Optional) Enables or disables syslog rfc5424 format for logging. If
+# enabled, prefixes the MSG part of the syslog message with APP-NAME
+# (RFC5424). The format without the APP-NAME is deprecated in I, and
+# will be removed in J. (boolean value)
+#use_syslog_rfc_format = false
+
+#
+# From tempest.config
+#
+
+# Log output to standard error. (boolean value)
+#use_stderr = true
+
+#
+# From tempest.config
+#
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Format string to use for log messages without context. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
 
 
 [auth]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Path to the yaml file that contains the list of credentials
-# to use for running tests (string value)
-#test_accounts_file=etc/accounts.yaml
+# Allows test cases to create/destroy tenants and users. This option
+# requires that OpenStack Identity API admin credentials are known. If
+# false, isolated test cases and parallel execution, can still be
+# achieved configuring a list of test accounts (boolean value)
+# Deprecated group/name - [compute]/allow_tenant_isolation
+# Deprecated group/name - [orchestration]/allow_tenant_isolation
+#allow_tenant_isolation = false
+
+# If set to True it enables the Accounts provider, which locks
+# credentials to allow for parallel execution with pre-provisioned
+# accounts. It can only be used to run tests that ensure credentials
+# cleanup happens. It requires at least `2 * CONC` distinct accounts
+# configured in `test_accounts_file`, with CONC == the number of
+# concurrent test processes. (boolean value)
+#locking_credentials_provider = false
+
+# Path to the yaml file that contains the list of credentials to use
+# for running tests (string value)
+#test_accounts_file = etc/accounts.yaml
 
 
 [baremetal]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Catalog type of the baremetal provisioning service (string
-# value)
-#catalog_type=baremetal
+# Timeout for Ironic node to completely provision (integer value)
+#active_timeout = 300
 
-# Whether the Ironic nova-compute driver is enabled (boolean
+# Timeout for association of Nova instance and Ironic node (integer
 # value)
-#driver_enabled=false
+#association_timeout = 30
+
+# Catalog type of the baremetal provisioning service (string value)
+#catalog_type = baremetal
 
 # Driver name which Ironic uses (string value)
-#driver=fake
+#driver = fake
 
-# The endpoint type to use for the baremetal provisioning
-# service (string value)
-#endpoint_type=publicURL
+# Whether the Ironic nova-compute driver is enabled (boolean value)
+#driver_enabled = false
 
-# Timeout for Ironic node to completely provision (integer
-# value)
-#active_timeout=300
-
-# Timeout for association of Nova instance and Ironic node
-# (integer value)
-#association_timeout=30
+# The endpoint type to use for the baremetal provisioning service
+# (string value)
+#endpoint_type = publicURL
 
 # Timeout for Ironic power transitions. (integer value)
-#power_timeout=60
+#power_timeout = 60
 
 # Timeout for unprovisioning an Ironic node. (integer value)
-#unprovision_timeout=60
+#unprovision_timeout = 60
 
 
 [boto]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# EC2 URL (string value)
-#ec2_url=http://localhost:8773/services/Cloud
-
-# S3 URL (string value)
-#s3_url=http://localhost:8080
-
-# AWS Secret Key (string value)
-#aws_secret=<None>
-
-# AWS Access Key (string value)
-#aws_access=<None>
-
-# AWS Zone for EC2 tests (string value)
-#aws_zone=nova
-
-# S3 Materials Path (string value)
-#s3_materials_path=/opt/stack/devstack/files/images/s3-materials/cirros-0.3.0
-
-# ARI Ramdisk Image manifest (string value)
-#ari_manifest=cirros-0.3.0-x86_64-initrd.manifest.xml
+# AKI Kernel Image manifest (string value)
+#aki_manifest = cirros-0.3.0-x86_64-vmlinuz.manifest.xml
 
 # AMI Machine Image manifest (string value)
-#ami_manifest=cirros-0.3.0-x86_64-blank.img.manifest.xml
+#ami_manifest = cirros-0.3.0-x86_64-blank.img.manifest.xml
 
-# AKI Kernel Image manifest (string value)
-#aki_manifest=cirros-0.3.0-x86_64-vmlinuz.manifest.xml
+# ARI Ramdisk Image manifest (string value)
+#ari_manifest = cirros-0.3.0-x86_64-initrd.manifest.xml
 
-# Instance type (string value)
-#instance_type=m1.tiny
+# AWS Access Key (string value)
+#aws_access = <None>
 
-# boto Http socket timeout (integer value)
-#http_socket_timeout=3
+# AWS Secret Key (string value)
+#aws_secret = <None>
 
-# boto num_retries on error (integer value)
-#num_retries=1
-
-# Status Change Timeout (integer value)
-#build_timeout=60
+# AWS Zone for EC2 tests (string value)
+#aws_zone = nova
 
 # Status Change Test Interval (integer value)
-#build_interval=1
+#build_interval = 1
+
+# Status Change Timeout (integer value)
+#build_timeout = 60
+
+# EC2 URL (string value)
+#ec2_url = http://localhost:8773/services/Cloud
+
+# boto Http socket timeout (integer value)
+#http_socket_timeout = 3
+
+# Instance type (string value)
+#instance_type = m1.tiny
+
+# boto num_retries on error (integer value)
+#num_retries = 1
+
+# S3 Materials Path (string value)
+#s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.0
+
+# S3 URL (string value)
+#s3_url = http://localhost:8080
 
 
 [cli]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# enable cli tests (boolean value)
-#enabled=true
+# directory where python client binaries are located (string value)
+#cli_dir = /usr/local/bin
 
-# directory where python client binaries are located (string
-# value)
-#cli_dir=/usr/local/bin
+# enable cli tests (boolean value)
+#enabled = true
 
 # Whether the tempest run location has access to the *-manage
-# commands. In a pure blackbox environment it will not.
-# (boolean value)
-#has_manage=true
+# commands. In a pure blackbox environment it will not. (boolean
+# value)
+#has_manage = true
 
 # Number of seconds to wait on a CLI timeout (integer value)
-#timeout=15
+#timeout = 15
 
 
 [compute]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Allows test cases to create/destroy tenants and users. This
-# option enables isolated test cases and better parallel
-# execution, but also requires that OpenStack Identity API
-# admin credentials are known. (boolean value)
-#allow_tenant_isolation=false
+# Time in seconds between build status checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for an instance to build. (integer value)
+#build_timeout = 300
+
+# Catalog type of the Compute service. (string value)
+#catalog_type = compute
+
+# Catalog type of the Compute v3 service. (string value)
+#catalog_v3_type = computev3
+
+# The endpoint type to use for the compute service. (string value)
+#endpoint_type = publicURL
+
+# Visible fixed network name  (string value)
+#fixed_network_name = private
+
+# Valid primary flavor to use in tests. (string value)
+#flavor_ref = 1
+
+# Valid secondary flavor to be used in tests. (string value)
+#flavor_ref_alt = 2
+
+# Unallocated floating IP range, which will be used to test the
+# floating IP bulk feature for CRUD operation. (string value)
+#floating_ip_range = 10.0.0.0/29
+
+# Password used to authenticate to an instance using the alternate
+# image. (string value)
+#image_alt_ssh_password = password
+
+# User name used to authenticate to an instance using the alternate
+# image. (string value)
+#image_alt_ssh_user = root
 
 # Valid primary image reference to be used in tests. This is a
 # required option (string value)
-#image_ref=<None>
+#image_ref = <None>
 
-# Valid secondary image reference to be used in tests. This is
-# a required option, but if only one image is available
-# duplicate the value of image_ref above (string value)
-#image_ref_alt=<None>
-
-# Valid primary flavor to use in tests. (string value)
-#flavor_ref=1
-
-# Valid secondary flavor to be used in tests. (string value)
-#flavor_ref_alt=2
-
-# User name used to authenticate to an instance. (string
-# value)
-#image_ssh_user=root
+# Valid secondary image reference to be used in tests. This is a
+# required option, but if only one image is available duplicate the
+# value of image_ref above (string value)
+#image_ref_alt = <None>
 
 # Password used to authenticate to an instance. (string value)
-#image_ssh_password=password
+#image_ssh_password = password
 
-# User name used to authenticate to an instance using the
-# alternate image. (string value)
-#image_alt_ssh_user=root
-
-# Password used to authenticate to an instance using the
-# alternate image. (string value)
-#image_alt_ssh_password=password
-
-# Time in seconds between build status checks. (integer value)
-#build_interval=1
-
-# Timeout in seconds to wait for an instance to build.
-# (integer value)
-#build_timeout=300
-
-# Should the tests ssh to instances? (boolean value)
-#run_ssh=false
-
-# Auth method used for authenticate to the instance. Valid
-# choices are: keypair, configured, adminpass. keypair: start
-# the servers with an ssh keypair. configured: use the
-# configured user and password. adminpass: use the injected
-# adminPass. disabled: avoid using ssh when it is an option.
-# (string value)
-#ssh_auth_method=keypair
-
-# How to connect to the instance? fixed: using the first ip
-# belongs the fixed network floating: creating and using a
-# floating ip (string value)
-#ssh_connect_method=fixed
-
-# User name used to authenticate to an instance. (string
-# value)
-#ssh_user=root
-
-# Timeout in seconds to wait for ping to succeed. (integer
-# value)
-#ping_timeout=120
-
-# Timeout in seconds to wait for authentication to succeed.
-# (integer value)
-#ssh_timeout=300
-
-# Additional wait time for clean state, when there is no OS-
-# EXT-STS extension available (integer value)
-#ready_wait=0
-
-# Timeout in seconds to wait for output from ssh channel.
-# (integer value)
-#ssh_channel_timeout=60
-
-# Visible fixed network name  (string value)
-#fixed_network_name=private
-
-# Network used for SSH connections. (string value)
-#network_for_ssh=public
+# User name used to authenticate to an instance. (string value)
+#image_ssh_user = root
 
 # IP version used for SSH connections. (integer value)
-#ip_version_for_ssh=4
+#ip_version_for_ssh = 4
 
-# Does SSH use Floating IPs? (boolean value)
-#use_floatingip_for_ssh=true
+# Network used for SSH connections. (string value)
+#network_for_ssh = public
 
-# Catalog type of the Compute service. (string value)
-#catalog_type=compute
+# Path to a private key file for SSH access to remote hosts (string
+# value)
+#path_to_private_key = <None>
+
+# Timeout in seconds to wait for ping to succeed. (integer value)
+#ping_timeout = 120
+
+# Additional wait time for clean state, when there is no OS-EXT-STS
+# extension available (integer value)
+#ready_wait = 0
 
 # The compute region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# Should the tests ssh to instances? (boolean value)
+#run_ssh = false
+
+# Time in seconds before a shelved instance is eligible for removing
+# from a host.  -1 never offload, 0 offload when shelved. This time
+# should be the same as the time of nova.conf, and some tests will run
+# for as long as the time. (integer value)
+#shelved_offload_time = 0
+
+# Auth method used for authenticate to the instance. Valid choices
+# are: keypair, configured, adminpass. keypair: start the servers with
+# an ssh keypair. configured: use the configured user and password.
+# adminpass: use the injected adminPass. disabled: avoid using ssh
+# when it is an option. (string value)
+#ssh_auth_method = keypair
+
+# Timeout in seconds to wait for output from ssh channel. (integer
 # value)
-#region=
+#ssh_channel_timeout = 60
 
-# The endpoint type to use for the compute service. (string
+# How to connect to the instance? fixed: using the first ip belongs
+# the fixed network floating: creating and using a floating ip (string
 # value)
-#endpoint_type=publicURL
+#ssh_connect_method = fixed
 
-# Catalog type of the Compute v3 service. (string value)
-#catalog_v3_type=computev3
+# Timeout in seconds to wait for authentication to succeed. (integer
+# value)
+#ssh_timeout = 300
 
-# Path to a private key file for SSH access to remote hosts
+# User name used to authenticate to an instance. (string value)
+#ssh_user = root
+
+# Does SSH use Floating IPs? (boolean value)
+#use_floatingip_for_ssh = true
+
+# Expected device name when a volume is attached to an instance
 # (string value)
-#path_to_private_key=<None>
-
-# Expected device name when a volume is attached to an
-# instance (string value)
-#volume_device_name=vdb
-
-# Time in seconds before a shelved instance is eligible for
-# removing from a host.  -1 never offload, 0 offload when
-# shelved. This time should be the same as the time of
-# nova.conf, and some tests will run for as long as the time.
-# (integer value)
-#shelved_offload_time=0
-
-# Unallocated floating IP range, which will be used to test
-# the floating IP bulk feature for CRUD operation. (string
-# value)
-#floating_ip_range=10.0.0.0/29
-
-# Allows test cases to create/destroy tenants and users. This
-# option enables isolated test cases and better parallel
-# execution, but also requires that OpenStack Identity API
-# admin credentials are known. (boolean value)
-#allow_tenant_isolation=false
-
-# Time in seconds between build status checks. (integer value)
-#build_interval=1
+#volume_device_name = vdb
 
 
 [compute-admin]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Administrative Username to use for Nova API requests.
-# (string value)
-#username=<None>
-
-# Administrative Tenant name to use for Nova API requests.
-# (string value)
-#tenant_name=<None>
+# Domain name for authentication as admin (Keystone V3).The same
+# domain applies to user and project (string value)
+#domain_name = <None>
 
 # API key to use when authenticating as admin. (string value)
-#password=<None>
+#password = <None>
 
-# Domain name for authentication as admin (Keystone V3).The
-# same domain applies to user and project (string value)
-#domain_name=<None>
+# Administrative Tenant name to use for Nova API requests. (string
+# value)
+#tenant_name = <None>
+
+# Administrative Username to use for Nova API requests. (string value)
+#username = <None>
 
 
 [compute-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# If false, skip all nova v3 tests. (boolean value)
-#api_v3=false
+# A list of enabled compute extensions with a special entry all which
+# indicates every extension is enabled. Each extension should be
+# specified with alias name. Empty list indicates all extensions are
+# disabled (list value)
+#api_extensions = all
 
-# If false skip all v2 api tests with xml (boolean value)
-#xml_api_v2=true
+# If false, skip all nova v3 tests. (boolean value)
+#api_v3 = false
+
+# A list of enabled v3 extensions with a special entry all which
+# indicates every extension is enabled. Each extension should be
+# specified with alias name. Empty list indicates all extensions are
+# disabled (list value)
+#api_v3_extensions = all
+
+# Does the test environment block migration support cinder iSCSI
+# volumes (boolean value)
+#block_migrate_cinder_iscsi = false
+
+# Does the test environment use block devices for live migration
+# (boolean value)
+#block_migration_for_live_migration = false
+
+# Does the test environment support changing the admin password?
+# (boolean value)
+#change_password = false
+
+# Does the test environment support obtaining instance serial console
+# output? (boolean value)
+#console_output = true
 
 # If false, skip disk config tests (boolean value)
-#disk_config=true
+#disk_config = true
 
-# A list of enabled compute extensions with a special entry
-# all which indicates every extension is enabled. Each
-# extension should be specified with alias name. Empty list
-# indicates all extensions are disabled (list value)
-#api_extensions=all
-
-# A list of enabled v3 extensions with a special entry all
-# which indicates every extension is enabled. Each extension
-# should be specified with alias name. Empty list indicates
-# all extensions are disabled (list value)
-#api_v3_extensions=all
-
-# Does the test environment support changing the admin
-# password? (boolean value)
-#change_password=false
-
-# Does the test environment support obtaining instance serial
-# console output? (boolean value)
-#console_output=true
-
-# Does the test environment support resizing? (boolean value)
-#resize=false
-
-# Does the test environment support pausing? (boolean value)
-#pause=true
-
-# Does the test environment support shelving/unshelving?
-# (boolean value)
-#shelve=true
-
-# Does the test environment support suspend/resume? (boolean
-# value)
-#suspend=true
-
-# Does the test environment support live migration available?
-# (boolean value)
-#live_migration=false
-
-# Does the test environment use block devices for live
-# migration (boolean value)
-#block_migration_for_live_migration=false
-
-# Does the test environment block migration support cinder
-# iSCSI volumes (boolean value)
-#block_migrate_cinder_iscsi=false
-
-# Enable VNC console. This configuration value should be same
-# as [nova.vnc]->vnc_enabled in nova.conf (boolean value)
-#vnc_console=false
-
-# Enable Spice console. This configuration value should be
-# same as [nova.spice]->enabled in nova.conf (boolean value)
-#spice_console=false
-
-# Enable RDP console. This configuration value should be same
-# as [nova.rdp]->enabled in nova.conf (boolean value)
-#rdp_console=false
-
-# Does the test environment support instance rescue mode?
-# (boolean value)
-#rescue=true
-
-# Enables returning of the instance password by the relevant
-# server API calls such as create, rebuild or rescue. (boolean
-# value)
-#enable_instance_password=true
+# Enables returning of the instance password by the relevant server
+# API calls such as create, rebuild or rescue. (boolean value)
+#enable_instance_password = true
 
 # Does the test environment support dynamic network interface
 # attachment? (boolean value)
-#interface_attach=true
+#interface_attach = true
 
-# Does the test environment support creating snapshot images
-# of running instances? (boolean value)
-#snapshot=true
+# Does the test environment support live migration available? (boolean
+# value)
+#live_migration = false
+
+# Does the test environment support pausing? (boolean value)
+#pause = true
+
+# Enable RDP console. This configuration value should be same as
+# [nova.rdp]->enabled in nova.conf (boolean value)
+#rdp_console = false
+
+# Does the test environment support instance rescue mode? (boolean
+# value)
+#rescue = true
+
+# Does the test environment support resizing? (boolean value)
+#resize = false
+
+# Does the test environment support shelving/unshelving? (boolean
+# value)
+#shelve = true
+
+# Does the test environment support creating snapshot images of
+# running instances? (boolean value)
+#snapshot = true
+
+# Enable Spice console. This configuration value should be same as
+# [nova.spice]->enabled in nova.conf (boolean value)
+#spice_console = false
+
+# Does the test environment support suspend/resume? (boolean value)
+#suspend = true
+
+# Enable VNC console. This configuration value should be same as
+# [nova.vnc]->vnc_enabled in nova.conf (boolean value)
+#vnc_console = false
+
+# If false skip all v2 api tests with xml (boolean value)
+#xml_api_v2 = true
 
 
 [dashboard]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Where the dashboard can be found (string value)
-#dashboard_url=http://localhost/
+#dashboard_url = http://localhost/
 
 # Login page for the dashboard (string value)
-#login_url=http://localhost/auth/login/
+#login_url = http://localhost/auth/login/
 
 
 [data_processing]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the data processing service. (string value)
-#catalog_type=data_processing
+#catalog_type = data_processing
 
-# The endpoint type to use for the data processing service.
-# (string value)
-#endpoint_type=publicURL
+# The endpoint type to use for the data processing service. (string
+# value)
+#endpoint_type = publicURL
 
 
 [database]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Database service. (string value)
-#catalog_type=database
+#catalog_type = database
 
-# Valid primary flavor to use in database tests. (string
-# value)
-#db_flavor_ref=1
+# Current database version to use in database tests. (string value)
+#db_current_version = v1.0
 
-# Current database version to use in database tests. (string
-# value)
-#db_current_version=v1.0
+# Valid primary flavor to use in database tests. (string value)
+#db_flavor_ref = 1
 
 
 [debug]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Enable diagnostic commands (boolean value)
-#enable=true
+#enable = true
 
-# A regex to determine which requests should be traced.  This
-# is a regex to match the caller for rest client requests to
-# be able to selectively trace calls out of specific classes
-# and methods. It largely exists for test development, and is
-# not expected to be used in a real deploy of tempest. This
-# will be matched against the discovered ClassName:method in
-# the test environment.  Expected values for this field are:
-# * ClassName:test_method_name - traces one test_method  *
-# ClassName:setUp(Class) - traces specific setup functions  *
-# ClassName:tearDown(Class) - traces specific teardown
-# functions  * ClassName:_run_cleanups - traces the cleanup
-# functions  If nothing is specified, this feature is not
-# enabled. To trace everything specify .* as the regex.
-# (string value)
-#trace_requests=
+# A regex to determine which requests should be traced.  This is a
+# regex to match the caller for rest client requests to be able to
+# selectively trace calls out of specific classes and methods. It
+# largely exists for test development, and is not expected to be used
+# in a real deploy of tempest. This will be matched against the
+# discovered ClassName:method in the test environment.  Expected
+# values for this field are:   * ClassName:test_method_name - traces
+# one test_method  * ClassName:setUp(Class) - traces specific setup
+# functions  * ClassName:tearDown(Class) - traces specific teardown
+# functions  * ClassName:_run_cleanups - traces the cleanup functions
+# If nothing is specified, this feature is not enabled. To trace
+# everything specify .* as the regex.  (string value)
+#trace_requests =
 
 
 [identity]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Catalog type of the Identity service. (string value)
-#catalog_type=identity
-
-# Set to True if using self-signed SSL certificates. (boolean
-# value)
-#disable_ssl_certificate_validation=false
-
-# Full URI of the OpenStack Identity API (Keystone), v2
-# (string value)
-#uri=<None>
-
-# Full URI of the OpenStack Identity API (Keystone), v3
-# (string value)
-#uri_v3=<None>
-
-# Identity API version to be used for authentication for API
-# tests. (string value)
-#auth_version=v2
-
-# The identity region name to use. Also used as the other
-# services' region name unless they are set explicitly. If no
-# such region is found in the service catalog, the first found
-# one is used. (string value)
-#region=RegionOne
-
-# The endpoint type to use for the identity service. (string
-# value)
-#endpoint_type=publicURL
-
-# Username to use for Nova API requests. (string value)
-#username=<None>
-
-# Tenant name to use for Nova API requests. (string value)
-#tenant_name=<None>
-
-# Role required to administrate keystone. (string value)
-#admin_role=admin
-
-# API key to use when authenticating. (string value)
-#password=<None>
-
-# Domain name for authentication (Keystone V3).The same domain
+# Admin domain name for authentication (Keystone V3).The same domain
 # applies to user and project (string value)
-#domain_name=<None>
-
-# Username of alternate user to use for Nova API requests.
-# (string value)
-#alt_username=<None>
-
-# Alternate user's Tenant name to use for Nova API requests.
-# (string value)
-#alt_tenant_name=<None>
-
-# API key to use when authenticating as alternate user.
-# (string value)
-#alt_password=<None>
-
-# Alternate domain name for authentication (Keystone V3).The
-# same domain applies to user and project (string value)
-#alt_domain_name=<None>
-
-# Administrative Username to use for Keystone API requests.
-# (string value)
-#admin_username=<None>
-
-# Administrative Tenant name to use for Keystone API requests.
-# (string value)
-#admin_tenant_name=<None>
+#admin_domain_name = <None>
 
 # API key to use when authenticating as admin. (string value)
-#admin_password=<None>
+#admin_password = <None>
 
-# Admin domain name for authentication (Keystone V3).The same
+# Role required to administrate keystone. (string value)
+#admin_role = admin
+
+# Administrative Tenant name to use for Keystone API requests. (string
+# value)
+#admin_tenant_name = <None>
+
+# Administrative Username to use for Keystone API requests. (string
+# value)
+#admin_username = <None>
+
+# Alternate domain name for authentication (Keystone V3).The same
 # domain applies to user and project (string value)
-#admin_domain_name=<None>
+#alt_domain_name = <None>
+
+# API key to use when authenticating as alternate user. (string value)
+#alt_password = <None>
+
+# Alternate user's Tenant name to use for Nova API requests. (string
+# value)
+#alt_tenant_name = <None>
+
+# Username of alternate user to use for Nova API requests. (string
+# value)
+#alt_username = <None>
+
+# Identity API version to be used for authentication for API tests.
+# (string value)
+#auth_version = v2
+
+# Catalog type of the Identity service. (string value)
+#catalog_type = identity
+
+# Set to True if using self-signed SSL certificates. (boolean value)
+#disable_ssl_certificate_validation = false
+
+# Domain name for authentication (Keystone V3).The same domain applies
+# to user and project (string value)
+#domain_name = <None>
+
+# The endpoint type to use for the identity service. (string value)
+#endpoint_type = publicURL
+
+# API key to use when authenticating. (string value)
+#password = <None>
+
+# The identity region name to use. Also used as the other services'
+# region name unless they are set explicitly. If no such region is
+# found in the service catalog, the first found one is used. (string
+# value)
+#region = RegionOne
+
+# Tenant name to use for Nova API requests. (string value)
+#tenant_name = <None>
+
+# Full URI of the OpenStack Identity API (Keystone), v2 (string value)
+#uri = <None>
+
+# Full URI of the OpenStack Identity API (Keystone), v3 (string value)
+#uri_v3 = <None>
+
+# Username to use for Nova API requests. (string value)
+#username = <None>
 
 
 [identity-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Does the identity service have delegation and impersonation
-# enabled (boolean value)
-#trust=true
-
 # Is the v2 identity API enabled (boolean value)
-#api_v2=true
+#api_v2 = true
 
 # Is the v3 identity API enabled (boolean value)
-#api_v3=true
+#api_v3 = true
+
+# Does the identity service have delegation and impersonation enabled
+# (boolean value)
+#trust = true
 
 
 [image]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Image service. (string value)
-#catalog_type=image
+#catalog_type = image
 
-# The image region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
-# value)
-#region=
-
-# The endpoint type to use for the image service. (string
-# value)
-#endpoint_type=publicURL
+# The endpoint type to use for the image service. (string value)
+#endpoint_type = publicURL
 
 # http accessible image (string value)
-#http_image=http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz
+#http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz
+
+# The image region name to use. If empty, the value of identity.region
+# is used instead. If no such region is found in the service catalog,
+# the first found one is used. (string value)
+#region =
 
 
 [image-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Is the v2 image API enabled (boolean value)
-#api_v2=true
-
 # Is the v1 image API enabled (boolean value)
-#api_v1=true
+#api_v1 = true
+
+# Is the v2 image API enabled (boolean value)
+#api_v2 = true
 
 
 [input-scenario]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Matching images become parameters for scenario tests (string
+# Matching flavors become parameters for scenario tests (string value)
+#flavor_regex = ^m1.nano$
+
+# Matching images become parameters for scenario tests (string value)
+#image_regex = ^cirros-0.3.1-x86_64-uec$
+
+# SSH verification in tests is skippedfor matching images (string
 # value)
-#image_regex=^cirros-0.3.1-x86_64-uec$
+#non_ssh_image_regex = ^.*[Ww]in.*$
 
-# Matching flavors become parameters for scenario tests
-# (string value)
-#flavor_regex=^m1.nano$
-
-# SSH verification in tests is skippedfor matching images
-# (string value)
-#non_ssh_image_regex=^.*[Ww]in.*$
-
-# List of user mapped to regex to matching image names.
-# (string value)
-#ssh_user_regex=[["^.*[Cc]irros.*$", "root"]]
+# List of user mapped to regex to matching image names. (string value)
+#ssh_user_regex = [["^.*[Cc]irros.*$", "root"]]
 
 
 [messaging]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Messaging service. (string value)
-#catalog_type=messaging
-
-# The maximum number of queue records per page when listing
-# queues (integer value)
-#max_queues_per_page=20
-
-# The maximum metadata size for a queue (integer value)
-#max_queue_metadata=65536
-
-# The maximum number of queue message per page when listing
-# (or) posting messages (integer value)
-#max_messages_per_page=20
-
-# The maximum size of a message body (integer value)
-#max_message_size=262144
-
-# The maximum number of messages per claim (integer value)
-#max_messages_per_claim=20
-
-# The maximum ttl for a message (integer value)
-#max_message_ttl=1209600
-
-# The maximum ttl for a claim (integer value)
-#max_claim_ttl=43200
+#catalog_type = messaging
 
 # The maximum grace period for a claim (integer value)
-#max_claim_grace=43200
+#max_claim_grace = 43200
+
+# The maximum ttl for a claim (integer value)
+#max_claim_ttl = 43200
+
+# The maximum size of a message body (integer value)
+#max_message_size = 262144
+
+# The maximum ttl for a message (integer value)
+#max_message_ttl = 1209600
+
+# The maximum number of messages per claim (integer value)
+#max_messages_per_claim = 20
+
+# The maximum number of queue message per page when listing (or)
+# posting messages (integer value)
+#max_messages_per_page = 20
+
+# The maximum metadata size for a queue (integer value)
+#max_queue_metadata = 65536
+
+# The maximum number of queue records per page when listing queues
+# (integer value)
+#max_queues_per_page = 20
 
 
 [negative]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Test generator class for all negative tests (string value)
-#test_generator=tempest.common.generator.negative_generator.NegativeTestGenerator
+#test_generator = tempest.common.generator.negative_generator.NegativeTestGenerator
 
 
 [network]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
+# Time in seconds between network operation status checks. (integer
+# value)
+#build_interval = 1
+
+# Timeout in seconds to wait for network operation to complete.
+# (integer value)
+#build_timeout = 300
+
 # Catalog type of the Neutron service. (string value)
-#catalog_type=network
+#catalog_type = network
+
+# List of dns servers whichs hould be used for subnet creation (list
+# value)
+#dns_servers = 8.8.8.8,8.8.4.4
+
+# The endpoint type to use for the network service. (string value)
+#endpoint_type = publicURL
+
+# Id of the public network that provides external connectivity (string
+# value)
+#public_network_id =
+
+# Id of the public router that provides external connectivity (string
+# value)
+#public_router_id =
 
 # The network region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
-# value)
-#region=
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
 
-# The endpoint type to use for the network service. (string
-# value)
-#endpoint_type=publicURL
-
-# The cidr block to allocate tenant ipv4 subnets from (string
-# value)
-#tenant_network_cidr=10.100.0.0/16
+# The cidr block to allocate tenant ipv4 subnets from (string value)
+#tenant_network_cidr = 10.100.0.0/16
 
 # The mask bits for tenant ipv4 subnets (integer value)
-#tenant_network_mask_bits=28
+#tenant_network_mask_bits = 28
 
-# The cidr block to allocate tenant ipv6 subnets from (string
-# value)
-#tenant_network_v6_cidr=2003::/48
+# The cidr block to allocate tenant ipv6 subnets from (string value)
+#tenant_network_v6_cidr = 2003::/48
 
 # The mask bits for tenant ipv6 subnets (integer value)
-#tenant_network_v6_mask_bits=64
+#tenant_network_v6_mask_bits = 64
 
-# Whether tenant network connectivity should be evaluated
-# directly (boolean value)
-#tenant_networks_reachable=false
-
-# Id of the public network that provides external connectivity
-# (string value)
-#public_network_id=
-
-# Id of the public router that provides external connectivity
-# (string value)
-#public_router_id=
-
-# Timeout in seconds to wait for network operation to
-# complete. (integer value)
-#build_timeout=300
-
-# Time in seconds between network operation status checks.
-# (integer value)
-#build_interval=1
-
-# List of dns servers whichs hould be used for subnet creation
-# (list value)
-#dns_servers=8.8.8.8,8.8.4.4
+# Whether tenant network connectivity should be evaluated directly
+# (boolean value)
+#tenant_networks_reachable = false
 
 
 [network-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
+# A list of enabled network extensions with a special entry all which
+# indicates every extension is enabled. Empty list indicates all
+# extensions are disabled (list value)
+#api_extensions = all
+
 # Allow the execution of IPv6 tests (boolean value)
-#ipv6=true
+#ipv6 = true
 
-# A list of enabled network extensions with a special entry
-# all which indicates every extension is enabled. Empty list
-# indicates all extensions are disabled (list value)
-#api_extensions=all
+# Allow the execution of IPv6 subnet tests that use the extended IPv6
+# attributes ipv6_ra_mode and ipv6_address_mode (boolean value)
+#ipv6_subnet_attributes = false
 
-# Allow the execution of IPv6 subnet tests that use the
-# extended IPv6 attributes ipv6_ra_mode and ipv6_address_mode
-# (boolean value)
-#ipv6_subnet_attributes=false
+# If false, skip all network api tests with xml (boolean value)
+#xml_api = false
 
 
 [object-storage]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Object-Storage service. (string value)
-#catalog_type=object-store
+#catalog_type = object-store
 
-# The object-storage region name to use. If empty, the value
-# of identity.region is used instead. If no such region is
-# found in the service catalog, the first found one is used.
-# (string value)
-#region=
+# Number of seconds to wait while looping to check the status of a
+# container to container synchronization (integer value)
+#container_sync_interval = 5
 
-# The endpoint type to use for the object-store service.
-# (string value)
-#endpoint_type=publicURL
+# Number of seconds to time on waiting for a container to container
+# synchronization complete. (integer value)
+#container_sync_timeout = 120
 
-# Number of seconds to time on waiting for a container to
-# container synchronization complete. (integer value)
-#container_sync_timeout=120
+# The endpoint type to use for the object-store service. (string
+# value)
+#endpoint_type = publicURL
 
-# Number of seconds to wait while looping to check the status
-# of a container to container synchronization (integer value)
-#container_sync_interval=5
+# Role to add to users created for swift tests to enable creating
+# containers (string value)
+#operator_role = Member
 
-# Role to add to users created for swift tests to enable
-# creating containers (string value)
-#operator_role=Member
+# The object-storage region name to use. If empty, the value of
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
 
 # User role that has reseller admin (string value)
-#reseller_admin_role=ResellerAdmin
+#reseller_admin_role = ResellerAdmin
 
 
 [object-storage-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# A list of the enabled optional discoverable apis. A single
-# entry, all, indicates that all of these features are
-# expected to be enabled (list value)
-#discoverable_apis=all
-
 # Execute (old style) container-sync tests (boolean value)
-#container_sync=true
-
-# Execute object-versioning tests (boolean value)
-#object_versioning=true
+#container_sync = true
 
 # Execute discoverability tests (boolean value)
-#discoverability=true
+#discoverability = true
+
+# A list of the enabled optional discoverable apis. A single entry,
+# all, indicates that all of these features are expected to be enabled
+# (list value)
+#discoverable_apis = all
+
+# Execute object-versioning tests (boolean value)
+#object_versioning = true
 
 
 [orchestration]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
+# Time in seconds between build status checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for a stack to build. (integer value)
+#build_timeout = 1200
+
 # Catalog type of the Orchestration service. (string value)
-#catalog_type=orchestration
+#catalog_type = orchestration
+
+# The endpoint type to use for the orchestration service. (string
+# value)
+#endpoint_type = publicURL
+
+# Name of heat-cfntools enabled image to use when launching test
+# instances. (string value)
+#image_ref = <None>
+
+# Instance type for tests. Needs to be big enough for a full OS plus
+# the test workload (string value)
+#instance_type = m1.micro
+
+# Name of existing keypair to launch servers with. (string value)
+#keypair_name = <None>
+
+# Value must match heat configuration of the same name. (integer
+# value)
+#max_resources_per_stack = 1000
+
+# Value must match heat configuration of the same name. (integer
+# value)
+#max_template_size = 524288
 
 # The orchestration region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
-# value)
-#region=
-
-# The endpoint type to use for the orchestration service.
-# (string value)
-#endpoint_type=publicURL
-
-# Timeout in seconds to wait for a stack to build. (integer
-# value)
-#build_timeout=1200
-
-# Instance type for tests. Needs to be big enough for a full
-# OS plus the test workload (string value)
-#instance_type=m1.micro
-
-# Name of heat-cfntools enabled image to use when launching
-# test instances. (string value)
-#image_ref=<None>
-
-# Name of existing keypair to launch servers with. (string
-# value)
-#keypair_name=<None>
-
-# Value must match heat configuration of the same name.
-# (integer value)
-#max_template_size=524288
-
-# Value must match heat configuration of the same name.
-# (integer value)
-#max_resources_per_stack=1000
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
 
 
 [scenario]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
+# AKI image file name (string value)
+#aki_img_file = cirros-0.3.1-x86_64-vmlinuz
+
+# AMI image file name (string value)
+#ami_img_file = cirros-0.3.1-x86_64-blank.img
+
+# ARI image file name (string value)
+#ari_img_file = cirros-0.3.1-x86_64-initrd
+
+# Image container format (string value)
+#img_container_format = bare
+
 # Directory containing image files (string value)
-#img_dir=/opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
+#img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
+
+# Image disk format (string value)
+#img_disk_format = qcow2
 
 # Image file name (string value)
 # Deprecated group/name - [DEFAULT]/qcow2_img_file
-#img_file=cirros-0.3.1-x86_64-disk.img
+#img_file = cirros-0.3.1-x86_64-disk.img
 
-# Image disk format (string value)
-#img_disk_format=qcow2
-
-# Image container format (string value)
-#img_container_format=bare
-
-# AMI image file name (string value)
-#ami_img_file=cirros-0.3.1-x86_64-blank.img
-
-# ARI image file name (string value)
-#ari_img_file=cirros-0.3.1-x86_64-initrd
-
-# AKI image file name (string value)
-#aki_img_file=cirros-0.3.1-x86_64-vmlinuz
+# specifies how many resources to request at once. Used for large
+# operations testing. (integer value)
+#large_ops_number = 0
 
 # ssh username for the image file (string value)
-#ssh_user=cirros
-
-# specifies how many resources to request at once. Used for
-# large operations testing. (integer value)
-#large_ops_number=0
+#ssh_user = cirros
 
 
 [service_available]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Whether or not cinder is expected to be available (boolean
+# Whether or not Ceilometer is expected to be available (boolean
 # value)
-#cinder=true
+#ceilometer = true
 
-# Whether or not neutron is expected to be available (boolean
-# value)
-#neutron=false
+# Whether or not cinder is expected to be available (boolean value)
+#cinder = true
 
-# Whether or not glance is expected to be available (boolean
-# value)
-#glance=true
+# Whether or not glance is expected to be available (boolean value)
+#glance = true
 
-# Whether or not swift is expected to be available (boolean
-# value)
-#swift=true
+# Whether or not Heat is expected to be available (boolean value)
+#heat = false
 
-# Whether or not nova is expected to be available (boolean
-# value)
-#nova=true
+# Whether or not Horizon is expected to be available (boolean value)
+#horizon = true
 
-# Whether or not Heat is expected to be available (boolean
-# value)
-#heat=false
+# Whether or not Ironic is expected to be available (boolean value)
+#ironic = false
 
-# Whether or not Ceilometer is expected to be available
-# (boolean value)
-#ceilometer=true
+# Whether or not neutron is expected to be available (boolean value)
+#neutron = false
 
-# Whether or not Horizon is expected to be available (boolean
-# value)
-#horizon=true
+# Whether or not nova is expected to be available (boolean value)
+#nova = true
 
-# Whether or not Sahara is expected to be available (boolean
-# value)
-#sahara=false
+# Whether or not Sahara is expected to be available (boolean value)
+#sahara = false
 
-# Whether or not Ironic is expected to be available (boolean
-# value)
-#ironic=false
+# Whether or not swift is expected to be available (boolean value)
+#swift = true
 
-# Whether or not Trove is expected to be available (boolean
-# value)
-#trove=false
+# Whether or not Trove is expected to be available (boolean value)
+#trove = false
 
-# Whether or not Zaqar is expected to be available (boolean
-# value)
-#zaqar=false
+# Whether or not Zaqar is expected to be available (boolean value)
+#zaqar = false
 
 
 [stress]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Directory containing log files on the compute nodes (string
-# value)
-#nova_logdir=<None>
+# Controller host. (string value)
+#controller = <None>
 
-# Maximum number of instances to create during test. (integer
-# value)
-#max_instances=16
+# The number of threads created while stress test. (integer value)
+#default_thread_number_per_action = 4
+
+# Allows a full cleaning process after a stress test. Caution : this
+# cleanup will remove every objects of every tenant. (boolean value)
+#full_clean_stack = false
+
+# Prevent the cleaning (tearDownClass()) between each stress test run
+# if an exception occurs during this run. (boolean value)
+#leave_dirty_stack = false
+
+# time (in seconds) between log file error checks. (integer value)
+#log_check_interval = 60
+
+# Maximum number of instances to create during test. (integer value)
+#max_instances = 16
+
+# Directory containing log files on the compute nodes (string value)
+#nova_logdir = <None>
 
 # Controller host. (string value)
-#controller=<None>
-
-# Controller host. (string value)
-#target_controller=<None>
-
-# ssh user. (string value)
-#target_ssh_user=<None>
-
-# Path to private key. (string value)
-#target_private_key_path=<None>
+#target_controller = <None>
 
 # regexp for list of log files. (string value)
-#target_logfiles=<None>
+#target_logfiles = <None>
 
-# time (in seconds) between log file error checks. (integer
-# value)
-#log_check_interval=60
+# Path to private key. (string value)
+#target_private_key_path = <None>
 
-# The number of threads created while stress test. (integer
-# value)
-#default_thread_number_per_action=4
-
-# Prevent the cleaning (tearDownClass()) between each stress
-# test run if an exception occurs during this run. (boolean
-# value)
-#leave_dirty_stack=false
-
-# Allows a full cleaning process after a stress test. Caution
-# : this cleanup will remove every objects of every tenant.
-# (boolean value)
-#full_clean_stack=false
+# ssh user. (string value)
+#target_ssh_user = <None>
 
 
 [telemetry]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
 # Catalog type of the Telemetry service. (string value)
-#catalog_type=metering
+#catalog_type = metering
 
-# The endpoint type to use for the telemetry service. (string
+# The endpoint type to use for the telemetry service. (string value)
+#endpoint_type = publicURL
+
+# This variable is used as flag to enable notification tests (boolean
 # value)
-#endpoint_type=publicURL
-
-# This variable is used as flag to enable notification tests
-# (boolean value)
-#too_slow_to_test=true
+#too_slow_to_test = true
 
 
 [volume]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Time in seconds between volume availability checks. (integer
+# Name of the backend1 (must be declared in cinder.conf) (string
 # value)
-#build_interval=1
+#backend1_name = BACKEND_1
 
-# Timeout in seconds to wait for a volume to becomeavailable.
+# Name of the backend2 (must be declared in cinder.conf) (string
+# value)
+#backend2_name = BACKEND_2
+
+# Time in seconds between volume availability checks. (integer value)
+#build_interval = 1
+
+# Timeout in seconds to wait for a volume to become available.
 # (integer value)
-#build_timeout=300
+#build_timeout = 300
 
 # Catalog type of the Volume Service (string value)
-#catalog_type=volume
+#catalog_type = volume
+
+# Disk format to use when copying a volume to image (string value)
+#disk_format = raw
+
+# The endpoint type to use for the volume service. (string value)
+#endpoint_type = publicURL
 
 # The volume region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found
-# in the service catalog, the first found one is used. (string
+# identity.region is used instead. If no such region is found in the
+# service catalog, the first found one is used. (string value)
+#region =
+
+# Backend protocol to target when creating volume types (string value)
+#storage_protocol = iSCSI
+
+# Backend vendor to target when creating volume types (string value)
+#vendor_name = Open Source
+
+# Default size in GB for volumes created by volumes tests (integer
 # value)
-#region=
-
-# The endpoint type to use for the volume service. (string
-# value)
-#endpoint_type=publicURL
-
-# Name of the backend1 (must be declared in cinder.conf)
-# (string value)
-#backend1_name=BACKEND_1
-
-# Name of the backend2 (must be declared in cinder.conf)
-# (string value)
-#backend2_name=BACKEND_2
-
-# Backend protocol to target when creating volume types
-# (string value)
-#storage_protocol=iSCSI
-
-# Backend vendor to target when creating volume types (string
-# value)
-#vendor_name=Open Source
-
-# Disk format to use when copying a volume to image (string
-# value)
-#disk_format=raw
-
-# Default size in GB for volumes created by volumes tests
-# (integer value)
-#volume_size=1
+#volume_size = 1
 
 
 [volume-feature-enabled]
 
 #
-# Options defined in tempest.config
+# From tempest.config
 #
 
-# Runs Cinder multi-backend test (requires 2 backends)
-# (boolean value)
-#multi_backend=false
-
-# Runs Cinder volumes backup test (boolean value)
-#backup=true
-
-# Runs Cinder volume snapshot test (boolean value)
-#snapshot=true
-
-# A list of enabled volume extensions with a special entry all
-# which indicates every extension is enabled. Empty list
-# indicates all extensions are disabled (list value)
-#api_extensions=all
+# A list of enabled volume extensions with a special entry all which
+# indicates every extension is enabled. Empty list indicates all
+# extensions are disabled (list value)
+#api_extensions = all
 
 # Is the v1 volume API enabled (boolean value)
-#api_v1=true
+#api_v1 = true
 
 # Is the v2 volume API enabled (boolean value)
-#api_v2=true
+#api_v2 = true
 
+# Runs Cinder volumes backup test (boolean value)
+#backup = true
 
+# Runs Cinder multi-backend test (requires 2 backends) (boolean value)
+#multi_backend = false
+
+# Runs Cinder volume snapshot test (boolean value)
+#snapshot = true
diff --git a/openstack-common.conf b/openstack-common.conf
index a9a6b0b..5ae2089 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,6 @@
 [DEFAULT]
 
 # The list of modules to copy from openstack-common
-module=config
 module=install_venv_common
 module=lockutils
 module=log
diff --git a/requirements.txt b/requirements.txt
index 708ede3..ac72017 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -27,3 +27,4 @@
 iso8601>=0.1.9
 fixtures>=0.3.14
 testscenarios>=0.4
+tempest-lib
diff --git a/run_tests.sh b/run_tests.sh
index a12bf46..971f89b 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -103,8 +103,6 @@
       echo "Running flake8 without virtual env may miss OpenStack HACKING detection" >&2
   fi
   ${wrapper} flake8
-  export MODULEPATH=tempest.common.generate_sample_tempest
-  ${wrapper} tools/config/check_uptodate.sh
 }
 
 if [ $never_venv -eq 0 ]
diff --git a/setup.cfg b/setup.cfg
index 2e25ace..90ea944 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
 [metadata]
 name = tempest
-version = 2
+version = 3
 summary = OpenStack Integration Testing
 description-file =
     README.rst
@@ -24,6 +24,9 @@
     run-tempest-stress = tempest.cmd.run_stress:main
     tempest-cleanup = tempest.cmd.cleanup:main
 
+oslo.config.opts =
+    tempest.config = tempest.config:list_opts
+
 [build_sphinx]
 all_files = 1
 build-dir = doc/build
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index 8ccd36b..41c12c6 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -130,9 +130,7 @@
 
     @test.attr(type='smoke')
     def test_set_node_boot_device(self):
-        body = self.client.set_node_boot_device(self.node['uuid'], 'pxe')
-        # No content
-        self.assertEqual('', body)
+        self.client.set_node_boot_device(self.node['uuid'], 'pxe')
 
     @test.attr(type='smoke')
     def test_get_node_boot_device(self):
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index 90be820..8d6a7fc 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -68,7 +68,10 @@
         # NOTE(maurosr): since this exercises the same code snippet, we do it
         # only for reserve action
         body = {"reserve": "None"}
-        self.assertRaises(exceptions.NotFound,
+        # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
+        # change the error code to BadRequest, both exceptions should be
+        # accepted by tempest
+        self.assertRaises((exceptions.NotFound, exceptions.BadRequest),
                           self.client.reserve_fixed_ip,
                           "my.invalid.ip", body)
 
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index d365f3a..3307159 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -296,7 +296,7 @@
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
         new_flavor_id = data_utils.rand_int_id(start=1000)
 
-        ram = " 1024 "
+        ram = "1024"
         resp, flavor = self.client.create_flavor(flavor_name,
                                                  ram, self.vcpus,
                                                  self.disk,
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 6c93d33..2f53a0b 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -44,9 +44,9 @@
 
         # TODO(andreaf) WE should care also for the alt_manager here
         # but only once client lazy load in the manager is done
-        os = cls.get_client_manager()
+        cls.os = cls.get_client_manager()
+        cls.multi_user = cls.check_multi_user()
 
-        cls.os = os
         cls.build_interval = CONF.compute.build_interval
         cls.build_timeout = CONF.compute.build_timeout
         cls.ssh_user = CONF.compute.ssh_user
@@ -58,7 +58,6 @@
         cls.image_ssh_password = CONF.compute.image_ssh_password
         cls.servers = []
         cls.images = []
-        cls.multi_user = cls.get_multi_user()
         cls.security_groups = []
         cls.server_groups = []
 
@@ -73,6 +72,8 @@
             cls.quotas_client = cls.os.quotas_client
             # NOTE(mriedem): os-quota-class-sets is v2 API only
             cls.quota_classes_client = cls.os.quota_classes_client
+            # NOTE(mriedem): os-networks is v2 API only
+            cls.networks_client = cls.os.networks_client
             cls.limits_client = cls.os.limits_client
             cls.volumes_extensions_client = cls.os.volumes_extensions_client
             cls.volumes_client = cls.os.volumes_client
@@ -118,30 +119,17 @@
             raise exceptions.InvalidConfiguration(message=msg)
 
     @classmethod
-    def get_multi_user(cls):
-        multi_user = True
-        # Determine if there are two regular users that can be
-        # used in testing. If the test cases are allowed to create
-        # users (config.compute.allow_tenant_isolation is true,
-        # then we allow multi-user.
-        if not CONF.compute.allow_tenant_isolation:
-            user1 = CONF.identity.username
-            user2 = CONF.identity.alt_username
-            if not user2 or user1 == user2:
-                multi_user = False
-            else:
-                user2_password = CONF.identity.alt_password
-                user2_tenant_name = CONF.identity.alt_tenant_name
-                if not user2_password or not user2_tenant_name:
-                    msg = ("Alternate user specified but not alternate "
-                           "tenant or password: alt_tenant_name=%s "
-                           "alt_password=%s"
-                           % (user2_tenant_name, user2_password))
-                    raise exceptions.InvalidConfiguration(msg)
-        return multi_user
+    def check_multi_user(cls):
+        # We have a list of accounts now, so just checking if the list is gt 2
+        if not cls.isolated_creds.is_multi_user():
+            msg = "Not enough users available for multi-user testing"
+            raise exceptions.InvalidConfiguration(msg)
+        return True
 
     @classmethod
     def clear_servers(cls):
+        LOG.debug('Clearing servers: %s', ','.join(
+            server['id'] for server in cls.servers))
         for server in cls.servers:
             try:
                 cls.servers_client.delete_server(server['id'])
@@ -181,6 +169,7 @@
 
     @classmethod
     def clear_images(cls):
+        LOG.debug('Clearing images: %s', ','.join(cls.images))
         for image_id in cls.images:
             try:
                 cls.images_client.delete_image(image_id)
@@ -192,6 +181,8 @@
 
     @classmethod
     def clear_security_groups(cls):
+        LOG.debug('Clearing security groups: %s', ','.join(
+            str(sg['id']) for sg in cls.security_groups))
         for sg in cls.security_groups:
             try:
                 resp, body =\
@@ -206,6 +197,7 @@
 
     @classmethod
     def clear_server_groups(cls):
+        LOG.debug('Clearing server groups: %s', ','.join(cls.server_groups))
         for server_group_id in cls.server_groups:
             try:
                 cls.client.delete_server_group(server_group_id)
@@ -390,19 +382,14 @@
     @classmethod
     def resource_setup(cls):
         super(BaseComputeAdminTest, cls).resource_setup()
-        if (CONF.compute.allow_tenant_isolation or
-            cls.force_tenant_isolation is True):
+        try:
             creds = cls.isolated_creds.get_admin_creds()
-            cls.os_adm = clients.Manager(credentials=creds,
-                                         interface=cls._interface)
-        else:
-            try:
-                cls.os_adm = clients.ComputeAdminManager(
-                    interface=cls._interface)
-            except exceptions.InvalidCredentials:
-                msg = ("Missing Compute Admin API credentials "
-                       "in configuration.")
-                raise cls.skipException(msg)
+            cls.os_adm = clients.Manager(
+                credentials=creds, interface=cls._interface)
+        except NotImplementedError:
+            msg = ("Missing Compute Admin API credentials in configuration.")
+            raise cls.skipException(msg)
+
         if cls._api_version == 2:
             cls.availability_zone_admin_client = (
                 cls.os_adm.availability_zone_client)
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index c0b6730..459d78b 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -105,7 +105,11 @@
             raise self.skipException("Not testable in XML")
         # prefix character is:
         # http://www.fileformat.info/info/unicode/char/1F4A9/index.htm
-        utf8_name = data_utils.rand_name(u'\xF0\x9F\x92\xA9')
+
+        # We use a string with 3 byte utf-8 character due to bug
+        # #1370954 in glance which will 500 if mysql is used as the
+        # backend and it attempts to store a 4 byte utf-8 character
+        utf8_name = data_utils.rand_name('\xe2\x82\xa1')
         resp, body = self.client.create_image(self.server_id, utf8_name)
         image_id = data_utils.parse_image_id(resp['location'])
         self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 45b913a..4fd5c02 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import six
+
 from tempest.api.compute.security_groups import base
 from tempest import config
 from tempest import test
@@ -31,22 +33,54 @@
         cls.from_port = 22
         cls.to_port = 22
 
+    def setUp(cls):
+        super(SecurityGroupRulesTestJSON, cls).setUp()
+
+        from_port = cls.from_port
+        to_port = cls.to_port
+        group = {}
+        ip_range = {}
+        if cls._interface == 'xml':
+            # NOTE: An XML response is different from the one of JSON
+            # like the following.
+            from_port = six.text_type(from_port)
+            to_port = six.text_type(to_port)
+            group = {'tenant_id': 'None', 'name': 'None'}
+            ip_range = {'cidr': 'None'}
+        cls.expected = {
+            'id': None,
+            'parent_group_id': None,
+            'ip_protocol': cls.ip_protocol,
+            'from_port': from_port,
+            'to_port': to_port,
+            'ip_range': ip_range,
+            'group': group
+        }
+
+    def _check_expected_response(self, actual_rule):
+        for key in self.expected:
+            if key == 'id':
+                continue
+            self.assertEqual(self.expected[key], actual_rule[key],
+                             "Miss-matched key is %s" % key)
+
     @test.attr(type='smoke')
     @test.services('network')
     def test_security_group_rules_create(self):
         # Positive test: Creation of Security Group rule
         # should be successful
         # Creating a Security Group to add rules to it
-        resp, security_group = self.create_security_group()
+        _, security_group = self.create_security_group()
         securitygroup_id = security_group['id']
         # Adding rules to the created Security Group
-        resp, rule = \
+        _, rule = \
             self.client.create_security_group_rule(securitygroup_id,
                                                    self.ip_protocol,
                                                    self.from_port,
                                                    self.to_port)
-        self.addCleanup(self.client.delete_security_group_rule, rule['id'])
-        self.assertEqual(200, resp.status)
+        self.expected['parent_group_id'] = securitygroup_id
+        self.expected['ip_range'] = {'cidr': '0.0.0.0/0'}
+        self._check_expected_response(rule)
 
     @test.attr(type='smoke')
     @test.services('network')
@@ -56,16 +90,20 @@
         # should be successful
 
         # Creating a Security Group to add rules to it
-        resp, security_group = self.create_security_group()
+        _, security_group = self.create_security_group()
         parent_group_id = security_group['id']
 
         # Adding rules to the created Security Group with optional cidr
         cidr = '10.2.3.124/24'
-        self.client.create_security_group_rule(parent_group_id,
-                                               self.ip_protocol,
-                                               self.from_port,
-                                               self.to_port,
-                                               cidr=cidr)
+        _, rule = \
+            self.client.create_security_group_rule(parent_group_id,
+                                                   self.ip_protocol,
+                                                   self.from_port,
+                                                   self.to_port,
+                                                   cidr=cidr)
+        self.expected['parent_group_id'] = parent_group_id
+        self.expected['ip_range'] = {'cidr': cidr}
+        self._check_expected_response(rule)
 
     @test.attr(type='smoke')
     @test.services('network')
@@ -75,21 +113,25 @@
         # should be successful
 
         # Creating a Security Group to add rules to it
-        resp, security_group = self.create_security_group()
-        secgroup1 = security_group['id']
+        _, security_group = self.create_security_group()
+        parent_group_id = security_group['id']
 
         # Creating a Security Group so as to assign group_id to the rule
-        resp, security_group = self.create_security_group()
-        secgroup2 = security_group['id']
+        _, security_group = self.create_security_group()
+        group_id = security_group['id']
+        group_name = security_group['name']
 
         # Adding rules to the created Security Group with optional group_id
-        parent_group_id = secgroup1
-        group_id = secgroup2
-        self.client.create_security_group_rule(parent_group_id,
-                                               self.ip_protocol,
-                                               self.from_port,
-                                               self.to_port,
-                                               group_id=group_id)
+        _, rule = \
+            self.client.create_security_group_rule(parent_group_id,
+                                                   self.ip_protocol,
+                                                   self.from_port,
+                                                   self.to_port,
+                                                   group_id=group_id)
+        self.expected['parent_group_id'] = parent_group_id
+        self.expected['group'] = {'tenant_id': self.client.tenant_id,
+                                  'name': group_name}
+        self._check_expected_response(rule)
 
     @test.attr(type='smoke')
     @test.services('network')
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 5df8d82..d954c01 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -42,6 +42,7 @@
         personality = [{'path': '/test.txt',
                        'contents': base64.b64encode(file_contents)}]
         cls.client = cls.servers_client
+        cls.network_client = cls.os.network_client
         cli_resp = cls.create_test_server(name=cls.name,
                                           meta=cls.meta,
                                           accessIPv4=cls.accessIPv4,
@@ -124,6 +125,65 @@
         self.assertEqual(200, resp.status)
         self.assertIn(server['id'], server_group['members'])
 
+    @testtools.skipUnless(CONF.service_available.neutron,
+                          'Neutron service must be available.')
+    def test_verify_multiple_nics_order(self):
+        if getattr(self, '_interface',
+                   None) == 'xml' and not CONF.network_feature_enabled.xml_api:
+            raise self.skipException('Neutron XML API is not enabled')
+        # Verify that the networks order given at the server creation is
+        # preserved within the server.
+        name_net1 = data_utils.rand_name(self.__class__.__name__)
+        _, net1 = self.network_client.create_network(name=name_net1)
+        self.addCleanup(self.network_client.delete_network,
+                        net1['network']['id'])
+
+        name_net2 = data_utils.rand_name(self.__class__.__name__)
+        _, net2 = self.network_client.create_network(name=name_net2)
+        self.addCleanup(self.network_client.delete_network,
+                        net2['network']['id'])
+
+        _, subnet1 = self.network_client.create_subnet(
+            network_id=net1['network']['id'],
+            cidr='19.80.0.0/24',
+            ip_version=4)
+        self.addCleanup(self.network_client.delete_subnet,
+                        subnet1['subnet']['id'])
+
+        _, subnet2 = self.network_client.create_subnet(
+            network_id=net2['network']['id'],
+            cidr='19.86.0.0/24',
+            ip_version=4)
+        self.addCleanup(self.network_client.delete_subnet,
+                        subnet2['subnet']['id'])
+
+        networks = [{'uuid': net1['network']['id']},
+                    {'uuid': net2['network']['id']}]
+
+        _, server_multi_nics = self.create_test_server(
+            networks=networks, wait_until='ACTIVE')
+
+        # Cleanup server; this is needed in the test case because with the LIFO
+        # nature of the cleanups, if we don't delete the server first, the port
+        # will still be part of the subnet and we'll get a 409 from Neutron
+        # when trying to delete the subnet. The tear down in the base class
+        # will try to delete the server and get a 404 but it's ignored so
+        # we're OK.
+        def cleanup_server():
+            self.client.delete_server(server_multi_nics['id'])
+            self.client.wait_for_server_termination(server_multi_nics['id'])
+
+        self.addCleanup(cleanup_server)
+
+        _, addresses = self.client.list_addresses(server_multi_nics['id'])
+
+        expected_addr = ['19.80.0.2', '19.86.0.2']
+
+        addr = [addresses[name_net1][0]['addr'],
+                addresses[name_net2][0]['addr']]
+
+        self.assertEqual(expected_addr, addr)
+
 
 class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
     disk_config = 'AUTO'
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 98fe387..e660f00 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -69,12 +69,12 @@
         resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
                                               flavor=cls.flavor_ref_alt,
                                               wait_until='ACTIVE')
-        if (CONF.service_available.neutron and
-                CONF.compute.allow_tenant_isolation):
-            network = cls.isolated_creds.get_primary_network()
-            cls.fixed_network_name = network['name']
-        else:
-            cls.fixed_network_name = CONF.compute.fixed_network_name
+
+        cls.fixed_network_name = CONF.compute.fixed_network_name
+        if CONF.service_available.neutron:
+            if hasattr(cls.isolated_creds, 'get_primary_network'):
+                network = cls.isolated_creds.get_primary_network()
+                cls.fixed_network_name = network['name']
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @test.attr(type='gate')
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 3aacf2a..b51b46e 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -358,6 +358,25 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     @test.attr(type='gate')
+    def test_get_console_output_with_unlimited_size(self):
+        _, server = self.create_test_server(wait_until='ACTIVE')
+
+        def _check_full_length_console_log():
+            _, output = self.servers_client.get_console_output(server['id'],
+                                                               None)
+            self.assertTrue(output, "Console output was empty.")
+            lines = len(output.split('\n'))
+
+            # NOTE: This test tries to get full length console log, and the
+            # length should be bigger than the one of test_get_console_output.
+            self.assertTrue(lines > 10, "Cannot get enough console log length."
+                                        " (lines: %s)" % lines)
+
+        self.wait_for(_check_full_length_console_log)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
+                          'Console output not supported.')
+    @test.attr(type='gate')
     def test_get_console_output_server_id_in_shutoff_status(self):
         # Positive test:Should be able to GET the console output
         # for a given server_id in SHUTOFF status
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index b86ee06..0349260 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -45,10 +45,7 @@
     def resource_setup(cls):
         super(ServersNegativeTestJSON, cls).resource_setup()
         cls.client = cls.servers_client
-        if CONF.compute.allow_tenant_isolation:
-            cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
-        else:
-            cls.alt_os = clients.AltManager()
+        cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
         cls.alt_client = cls.alt_os.servers_client
         resp, server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 015d9f5..175f008 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -45,12 +45,8 @@
         cls.keypairs_client = cls.os.keypairs_client
         cls.security_client = cls.os.security_groups_client
 
-        if CONF.compute.allow_tenant_isolation:
-            creds = cls.isolated_creds.get_alt_creds()
-            cls.alt_manager = clients.Manager(credentials=creds)
-        else:
-            # Use the alt_XXX credentials in the config file
-            cls.alt_manager = clients.AltManager()
+        creds = cls.isolated_creds.get_alt_creds()
+        cls.alt_manager = clients.Manager(credentials=creds)
 
         cls.alt_client = cls.alt_manager.servers_client
         cls.alt_images_client = cls.alt_manager.images_client
diff --git a/tempest/api/compute/test_networks.py b/tempest/api/compute/test_networks.py
new file mode 100644
index 0000000..86779b3
--- /dev/null
+++ b/tempest/api/compute/test_networks.py
@@ -0,0 +1,33 @@
+# Copyright 2014 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class NetworksTestJSON(base.BaseV2ComputeTest):
+    @classmethod
+    def resource_setup(cls):
+        if CONF.service_available.neutron:
+            raise cls.skipException('nova-network is not available.')
+        super(NetworksTestJSON, cls).resource_setup()
+        cls.client = cls.os.networks_client
+
+    @test.attr(type='gate')
+    def test_list_networks(self):
+        _, networks = self.client.list_networks()
+        self.assertNotEmpty(networks, "No networks found.")
diff --git a/tempest/api/compute/v3/servers/test_list_server_filters.py b/tempest/api/compute/v3/servers/test_list_server_filters.py
index 209d293..73844cf 100644
--- a/tempest/api/compute/v3/servers/test_list_server_filters.py
+++ b/tempest/api/compute/v3/servers/test_list_server_filters.py
@@ -70,12 +70,11 @@
                                               flavor=cls.flavor_ref_alt,
                                               wait_until='ACTIVE')
 
-        if (CONF.service_available.neutron and
-                CONF.compute.allow_tenant_isolation):
-            network = cls.isolated_creds.get_primary_network()
-            cls.fixed_network_name = network['name']
-        else:
-            cls.fixed_network_name = CONF.compute.fixed_network_name
+        cls.fixed_network_name = CONF.compute.fixed_network_name
+        if CONF.service_available.neutron:
+            if hasattr(cls.isolated_creds, 'get_primary_network'):
+                network = cls.isolated_creds.get_primary_network()
+                cls.fixed_network_name = network['name']
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @test.attr(type='gate')
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index 30ac0ac..4b1fe04 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -45,10 +45,7 @@
     def resource_setup(cls):
         super(ServersNegativeV3Test, cls).resource_setup()
         cls.client = cls.servers_client
-        if CONF.compute.allow_tenant_isolation:
-            cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
-        else:
-            cls.alt_os = clients.AltManager()
+        cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
         cls.alt_client = cls.alt_os.servers_v3_client
         resp, server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index f1f1eb6..676f101 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -81,8 +81,7 @@
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
         self.assertIn(endpoint['id'], fetched_endpoints_id)
         # Deleting the endpoint created in this method
-        _, body = self.client.delete_endpoint(endpoint['id'])
-        self.assertEqual(body, '')
+        self.client.delete_endpoint(endpoint['id'])
         # Checking whether endpoint is deleted successfully
         resp, fetched_endpoints = self.client.list_endpoints()
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 08767e3..74baba6 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -41,10 +41,7 @@
         if not CONF.service_available.glance:
             skip_msg = ("%s skipped as glance is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
-        if CONF.compute.allow_tenant_isolation:
-            cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
-        else:
-            cls.os = clients.Manager()
+        cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
 
     @classmethod
     def resource_cleanup(cls):
@@ -91,10 +88,7 @@
     @classmethod
     def resource_setup(cls):
         super(BaseV1ImageMembersTest, cls).resource_setup()
-        if CONF.compute.allow_tenant_isolation:
-            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
-        else:
-            cls.os_alt = clients.AltManager()
+        cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
 
         cls.alt_img_cli = cls.os_alt.image_client
         cls.alt_tenant_id = cls.alt_img_cli.tenant_id
@@ -126,11 +120,8 @@
     @classmethod
     def resource_setup(cls):
         super(BaseV2MemberImageTest, cls).resource_setup()
-        if CONF.compute.allow_tenant_isolation:
-            creds = cls.isolated_creds.get_alt_creds()
-            cls.os_alt = clients.Manager(creds)
-        else:
-            cls.os_alt = clients.AltManager()
+        creds = cls.isolated_creds.get_alt_creds()
+        cls.os_alt = clients.Manager(creds)
         cls.os_img_client = cls.os.image_client_v2
         cls.alt_img_client = cls.os_alt.image_client_v2
         cls.alt_tenant_id = cls.alt_img_client.tenant_id
diff --git a/tempest/api/messaging/test_queues.py b/tempest/api/messaging/test_queues.py
index ab099ff..accbd17 100644
--- a/tempest/api/messaging/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -20,6 +20,7 @@
 
 from tempest.api.messaging import base
 from tempest.common.utils import data_utils
+from tempest import exceptions
 from tempest import test
 
 
@@ -29,16 +30,22 @@
 class TestQueues(base.BaseMessagingTest):
 
     @test.attr(type='smoke')
-    def test_create_queue(self):
-        # Create Queue
+    def test_create_delete_queue(self):
+        # Create & Delete Queue
         queue_name = data_utils.rand_name('test-')
-        resp, body = self.create_queue(queue_name)
+        _, body = self.create_queue(queue_name)
 
         self.addCleanup(self.client.delete_queue, queue_name)
-
-        self.assertEqual('201', resp['status'])
+        # NOTE(gmann): create_queue returns response status code as 201
+        # so specifically checking the expected empty response body as
+        # this is not going to be checked in response_checker().
         self.assertEqual('', body)
 
+        self.delete_queue(queue_name)
+        self.assertRaises(exceptions.NotFound,
+                          self.client.get_queue,
+                          queue_name)
+
 
 class TestManageQueue(base.BaseMessagingTest):
     _interface = 'json'
@@ -54,33 +61,21 @@
             cls.client.create_queue(queue_name)
 
     @test.attr(type='smoke')
-    def test_delete_queue(self):
-        # Delete Queue
-        queue_name = self.queues.pop()
-        resp, body = self.delete_queue(queue_name)
-        self.assertEqual('204', resp['status'])
-        self.assertEqual('', body)
-
-    @test.attr(type='smoke')
     def test_check_queue_existence(self):
         # Checking Queue Existence
         for queue_name in self.queues:
-            resp, body = self.check_queue_exists(queue_name)
-            self.assertEqual('204', resp['status'])
-            self.assertEqual('', body)
+            self.check_queue_exists(queue_name)
 
     @test.attr(type='smoke')
     def test_check_queue_head(self):
         # Checking Queue Existence by calling HEAD
         for queue_name in self.queues:
-            resp, body = self.check_queue_exists_head(queue_name)
-            self.assertEqual('204', resp['status'])
-            self.assertEqual('', body)
+            self.check_queue_exists_head(queue_name)
 
     @test.attr(type='smoke')
     def test_list_queues(self):
         # Listing queues
-        resp, body = self.list_queues()
+        _, body = self.list_queues()
         self.assertEqual(len(body['queues']), len(self.queues))
         for item in body['queues']:
             self.assertIn(item['name'], self.queues)
@@ -91,7 +86,7 @@
         queue_name = self.queues[data_utils.rand_int_id(0,
                                                         len(self.queues) - 1)]
         # Get Queue Stats for a newly created Queue
-        resp, body = self.get_queue_stats(queue_name)
+        _, body = self.get_queue_stats(queue_name)
         msgs = body['messages']
         for element in ('free', 'claimed', 'total'):
             self.assertEqual(0, msgs[element])
@@ -104,8 +99,7 @@
         queue_name = self.queues[data_utils.rand_int_id(0,
                                                         len(self.queues) - 1)]
         # Check the Queue has no metadata
-        resp, body = self.get_queue_metadata(queue_name)
-        self.assertEqual('200', resp['status'])
+        _, body = self.get_queue_metadata(queue_name)
         self.assertThat(body, matchers.HasLength(0))
         # Create metadata
         key3 = [0, 1, 2, 3, 4]
@@ -116,12 +110,10 @@
         req_body = dict()
         req_body[data_utils.rand_name('key1')] = req_body1
         # Set Queue Metadata
-        resp, body = self.set_queue_metadata(queue_name, req_body)
-        self.assertEqual('204', resp['status'])
-        self.assertEqual('', body)
+        self.set_queue_metadata(queue_name, req_body)
+
         # Get Queue Metadata
-        resp, body = self.get_queue_metadata(queue_name)
-        self.assertEqual('200', resp['status'])
+        _, body = self.get_queue_metadata(queue_name)
         self.assertThat(body, matchers.Equals(req_body))
 
     @classmethod
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index d9b2848..7ba68f7 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -62,6 +62,9 @@
         super(BaseNetworkTest, cls).resource_setup()
         if not CONF.service_available.neutron:
             raise cls.skipException("Neutron support is required")
+        if getattr(cls, '_interface', None) == 'xml':
+            if not CONF.network_feature_enabled.xml_api:
+                raise cls.skipException('XML API is not enabled')
 
         os = cls.get_client_manager()
 
@@ -152,33 +155,35 @@
         return network
 
     @classmethod
-    def create_subnet(cls, network, gateway=None, cidr=None, mask_bits=None,
-                      **kwargs):
+    def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
+                      ip_version=None, **kwargs):
         """Wrapper utility that returns a test subnet."""
         # The cidr and mask_bits depend on the ip version.
-        if cls._ip_version == 4:
+        ip_version = ip_version if ip_version is not None else cls._ip_version
+        gateway_not_set = gateway == ''
+        if ip_version == 4:
             cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
             mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
-        elif cls._ip_version == 6:
+        elif ip_version == 6:
             cidr = (
                 cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
             mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
         # Find a cidr that is not in use yet and create a subnet with it
         for subnet_cidr in cidr.subnet(mask_bits):
-            if not gateway:
-                gateway = str(netaddr.IPAddress(subnet_cidr) + 1)
+            if gateway_not_set:
+                gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
+            else:
+                gateway_ip = gateway
             try:
                 resp, body = cls.client.create_subnet(
                     network_id=network['id'],
                     cidr=str(subnet_cidr),
-                    ip_version=cls._ip_version,
-                    gateway_ip=gateway,
+                    ip_version=ip_version,
+                    gateway_ip=gateway_ip,
                     **kwargs)
                 break
             except exceptions.BadRequest as e:
                 is_overlapping_cidr = 'overlaps with another subnet' in str(e)
-                # Unset gateway value if there is an overlapping subnet
-                gateway = None
                 if not is_overlapping_cidr:
                     raise
         else:
@@ -366,19 +371,15 @@
     @classmethod
     def resource_setup(cls):
         super(BaseAdminNetworkTest, cls).resource_setup()
-        admin_username = CONF.compute_admin.username
-        admin_password = CONF.compute_admin.password
-        admin_tenant = CONF.compute_admin.tenant_name
-        if not (admin_username and admin_password and admin_tenant):
+
+        try:
+            creds = cls.isolated_creds.get_admin_creds()
+            cls.os_adm = clients.Manager(
+                credentials=creds, interface=cls._interface)
+        except NotImplementedError:
             msg = ("Missing Administrative Network API credentials "
                    "in configuration.")
             raise cls.skipException(msg)
-        if (CONF.compute.allow_tenant_isolation or
-            cls.force_tenant_isolation is True):
-            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
-                                         interface=cls._interface)
-        else:
-            cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
         cls.admin_client = cls.os_adm.network_client
 
     @classmethod
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 11588d6..8e2b7f5 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -36,6 +36,8 @@
         List firewall policies
         Create firewall policy
         Update firewall policy
+        Insert firewall rule to policy
+        Remove firewall rule from policy
         Delete firewall policy
         Show firewall policy
         List firewall
@@ -62,6 +64,14 @@
         except exceptions.NotFound:
             pass
 
+    def _try_delete_rule(self, rule_id):
+        # delete rule, if it exists
+        try:
+            self.client.delete_firewall_rule(rule_id)
+        # if rule is not found, this means it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
     def _try_delete_firewall(self, fw_id):
         # delete firewall, if it exists
         try:
@@ -211,6 +221,40 @@
         # Delete firewall
         self.client.delete_firewall(firewall_id)
 
+    @test.attr(type='smoke')
+    def test_insert_remove_firewall_rule_from_policy(self):
+        # Create firewall rule
+        resp, body = self.client.create_firewall_rule(
+            name=data_utils.rand_name("fw-rule"),
+            action="allow",
+            protocol="tcp")
+        fw_rule_id = body['firewall_rule']['id']
+        self.addCleanup(self._try_delete_rule, fw_rule_id)
+        # Create firewall policy
+        _, body = self.client.create_firewall_policy(
+            name=data_utils.rand_name("fw-policy"))
+        fw_policy_id = body['firewall_policy']['id']
+        self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+        # Insert rule to firewall policy
+        self.client.insert_firewall_rule_in_policy(
+            fw_policy_id, fw_rule_id, '', '')
+
+        # Verify insertion of rule in policy
+        self.assertIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+        # Remove rule from the firewall policy
+        self.client.remove_firewall_rule_from_policy(
+            fw_policy_id, fw_rule_id)
+
+        # Verify removal of rule from firewall policy
+        self.assertNotIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+
+    def _get_list_fw_rule_ids(self, fw_policy_id):
+        _, fw_policy = self.client.show_firewall_policy(
+            fw_policy_id)
+        return [ruleid for ruleid in fw_policy['firewall_policy']
+                ['firewall_rules']]
+
 
 class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 986a2c8..dd81a09 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -280,6 +280,10 @@
         self.subnets.pop()
 
     @test.attr(type='smoke')
+    def test_create_delete_subnet_without_gateway(self):
+        self._create_verify_delete_subnet()
+
+    @test.attr(type='smoke')
     def test_create_delete_subnet_with_gw(self):
         self._create_verify_delete_subnet(
             **self.subnet_dict(['gateway']))
@@ -492,7 +496,7 @@
         self.assertEqual(subnet['gateway_ip'], gateway)
 
     @test.attr(type='smoke')
-    def test_create_delete_subnet_without_gw(self):
+    def test_create_delete_subnet_with_default_gw(self):
         net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
         gateway_ip = str(netaddr.IPAddress(net.first + 1))
         name = data_utils.rand_name('network-')
@@ -501,16 +505,62 @@
         # Verifies Subnet GW in IPv6
         self.assertEqual(subnet['gateway_ip'], gateway_ip)
 
+    @test.attr(type='smoke')
+    def test_create_list_subnet_with_no_gw64_one_network(self):
+        name = data_utils.rand_name('network-')
+        network = self.create_network(name)
+        ipv6_gateway = self.subnet_dict(['gateway'])['gateway']
+        subnet1 = self.create_subnet(network,
+                                     ip_version=6,
+                                     gateway=ipv6_gateway)
+        self.assertEqual(netaddr.IPNetwork(subnet1['cidr']).version, 6,
+                         'The created subnet is not IPv6')
+        subnet2 = self.create_subnet(network,
+                                     gateway=None,
+                                     ip_version=4)
+        self.assertEqual(netaddr.IPNetwork(subnet2['cidr']).version, 4,
+                         'The created subnet is not IPv4')
+        # Verifies Subnet GW is set in IPv6
+        self.assertEqual(subnet1['gateway_ip'], ipv6_gateway)
+        # Verifies Subnet GW is None in IPv4
+        self.assertEqual(subnet2['gateway_ip'], None)
+        # Verifies all 2 subnets in the same network
+        _, body = self.client.list_subnets()
+        subnets = [sub['id'] for sub in body['subnets']
+                   if sub['network_id'] == network['id']]
+        test_subnet_ids = [sub['id'] for sub in (subnet1, subnet2)]
+        self.assertItemsEqual(subnets,
+                              test_subnet_ids,
+                              'Subnet are not in the same network')
+
     @testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
                           "IPv6 extended attributes for subnets not "
                           "available")
     @test.attr(type='smoke')
-    def test_create_delete_subnet_with_v6_attributes(self):
+    def test_create_delete_subnet_with_v6_attributes_stateful(self):
         self._create_verify_delete_subnet(
             gateway=self._subnet_data[self._ip_version]['gateway'],
+            ipv6_ra_mode='dhcpv6-stateful',
+            ipv6_address_mode='dhcpv6-stateful')
+
+    @testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
+                          "IPv6 extended attributes for subnets not "
+                          "available")
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_with_v6_attributes_slaac(self):
+        self._create_verify_delete_subnet(
             ipv6_ra_mode='slaac',
             ipv6_address_mode='slaac')
 
+    @testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
+                          "IPv6 extended attributes for subnets not "
+                          "available")
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_with_v6_attributes_stateless(self):
+        self._create_verify_delete_subnet(
+            ipv6_ra_mode='dhcpv6-stateless',
+            ipv6_address_mode='dhcpv6-stateless')
+
 
 class NetworksIpV6TestXML(NetworksIpV6TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index cdd3a29..d6db64d 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -151,6 +151,41 @@
         port = self.update_port(port, fixed_ips=fixed_ip_1)
         self.assertEqual(1, len(port['fixed_ips']))
 
+    def _update_port_with_security_groups(self, security_groups_names):
+        post_body = {"network_id": self.network['id']}
+        self.create_subnet(self.network)
+        security_groups_list = list()
+        for name in security_groups_names:
+            _, group_create_body = self.client.create_security_group(
+                name=name)
+            self.addCleanup(self.client.delete_security_group,
+                            group_create_body['security_group']['id'])
+            security_groups_list.append(group_create_body['security_group']
+                                        ['id'])
+        # Create a port
+        _, body = self.client.create_port(**post_body)
+        self.addCleanup(self.client.delete_port, body['port']['id'])
+        port = body['port']
+        # Update the port with security groups
+        update_body = {"security_groups": security_groups_list}
+        _, body = self.client.update_port(
+            port['id'], **update_body)
+        # Verify the security groups updated to port
+        port_show = body['port']
+        for security_group in security_groups_list:
+            self.assertIn(security_group, port_show['security_groups'])
+
+    @test.attr(type='smoke')
+    def test_update_port_with_security_group(self):
+        self._update_port_with_security_groups(
+            [data_utils.rand_name('secgroup')])
+
+    @test.attr(type='smoke')
+    def test_update_port_with_two_security_groups(self):
+        self._update_port_with_security_groups(
+            [data_utils.rand_name('secgroup'),
+             data_utils.rand_name('secgroup')])
+
 
 class PortsTestXML(PortsTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 6a5fd3d..2e39cf9 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -36,18 +36,12 @@
             raise cls.skipException(skip_msg)
         cls.isolated_creds = isolated_creds.IsolatedCreds(
             cls.__name__, network_resources=cls.network_resources)
-        if CONF.compute.allow_tenant_isolation:
-            # Get isolated creds for normal user
-            cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
-            # Get isolated creds for admin user
-            cls.os_admin = clients.Manager(
-                cls.isolated_creds.get_admin_creds())
-            # Get isolated creds for alt user
-            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
-        else:
-            cls.os = clients.Manager()
-            cls.os_admin = clients.AdminManager()
-            cls.os_alt = clients.AltManager()
+        # Get isolated creds for normal user
+        cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
+        # Get isolated creds for admin user
+        cls.os_admin = clients.Manager(cls.isolated_creds.get_admin_creds())
+        # Get isolated creds for alt user
+        cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
 
         cls.object_client = cls.os.object_client
         cls.container_client = cls.os.container_client
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 3782b70..42e2a2d 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -32,7 +32,6 @@
     @test.attr(type="gate")
     @testtools.skipIf(not CONF.service_available.nova,
                       "Nova is not available.")
-    @test.skip_because(bug="1336755")
     def test_check_nova_notification(self):
 
         resp, body = self.create_server()
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index db2aab5..042cde9 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -21,19 +21,19 @@
 LOG = logging.getLogger(__name__)
 
 
-class VolumeMultiBackendTest(base.BaseVolumeV1AdminTest):
+class VolumeMultiBackendV2Test(base.BaseVolumeAdminTest):
     _interface = "json"
 
     @classmethod
     def resource_setup(cls):
-        super(VolumeMultiBackendTest, cls).resource_setup()
+        super(VolumeMultiBackendV2Test, cls).resource_setup()
         if not CONF.volume_feature_enabled.multi_backend:
             raise cls.skipException("Cinder multi-backend feature disabled")
 
         cls.backend1_name = CONF.volume.backend1_name
         cls.backend2_name = CONF.volume.backend2_name
 
-        cls.volume_client = cls.os_adm.volumes_client
+        cls.name_field = cls.special_fields['name_field']
         cls.volume_type_id_list = []
         cls.volume_id_list_with_prefix = []
         cls.volume_id_list_without_prefix = []
@@ -60,12 +60,13 @@
             extra_specs = {spec_key_with_prefix: backend_name_key}
         else:
             extra_specs = {spec_key_without_prefix: backend_name_key}
-        _, self.type = self.client.create_volume_type(
+        _, self.type = self.volume_types_client.create_volume_type(
             type_name, extra_specs=extra_specs)
         self.volume_type_id_list.append(self.type['id'])
 
-        _, self.volume = self.volume_client.create_volume(
-            size=1, display_name=vol_name, volume_type=type_name)
+        params = {self.name_field: vol_name, 'volume_type': type_name}
+
+        _, self.volume = self.volume_client.create_volume(size=1, **params)
         if with_prefix:
             self.volume_id_list_with_prefix.append(self.volume['id'])
         else:
@@ -90,9 +91,9 @@
         # volume types deletion
         volume_type_id_list = getattr(cls, 'volume_type_id_list', [])
         for volume_type_id in volume_type_id_list:
-            cls.client.delete_volume_type(volume_type_id)
+            cls.volume_types_client.delete_volume_type(volume_type_id)
 
-        super(VolumeMultiBackendTest, cls).resource_cleanup()
+        super(VolumeMultiBackendV2Test, cls).resource_cleanup()
 
     @test.attr(type='smoke')
     def test_backend_name_reporting(self):
@@ -149,3 +150,7 @@
         msg = ("volumes %s and %s were created in the same backend" %
                (volume1_id, volume2_id))
         self.assertNotEqual(volume1_host, volume2_host, msg)
+
+
+class VolumeMultiBackendV1Test(VolumeMultiBackendV2Test):
+    _api_version = 1
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 7e24fa4..ece4299 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -71,7 +71,8 @@
 
     @test.attr(type='gate')
     def test_show_quota_usage(self):
-        _, quota_usage = self.quotas_client.get_quota_usage(self.adm_tenant)
+        _, quota_usage = self.quotas_client.get_quota_usage(
+            self.os_adm.credentials.tenant_name)
         for key in QUOTA_KEYS:
             self.assertIn(key, quota_usage)
             for usage_key in QUOTA_USAGE_KEYS:
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 070d38f..a0792f1 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -29,12 +29,12 @@
         self.volumes_client.wait_for_resource_deletion(volume_id)
 
     def _delete_volume_type(self, volume_type_id):
-        self.client.delete_volume_type(volume_type_id)
+        self.volume_types_client.delete_volume_type(volume_type_id)
 
     @test.attr(type='smoke')
     def test_volume_type_list(self):
         # List Volume types.
-        _, body = self.client.list_volume_types()
+        _, body = self.volume_types_client.list_volume_types()
         self.assertIsInstance(body, list)
 
     @test.attr(type='smoke')
@@ -48,7 +48,7 @@
         extra_specs = {"storage_protocol": proto,
                        "vendor_name": vendor}
         body = {}
-        _, body = self.client.create_volume_type(
+        _, body = self.volume_types_client.create_volume_type(
             vol_type_name,
             extra_specs=extra_specs)
         self.assertIn('id', body)
@@ -87,7 +87,7 @@
         vendor = CONF.volume.vendor_name
         extra_specs = {"storage_protocol": proto,
                        "vendor_name": vendor}
-        _, body = self.client.create_volume_type(
+        _, body = self.volume_types_client.create_volume_type(
             name,
             extra_specs=extra_specs)
         self.assertIn('id', body)
@@ -98,7 +98,8 @@
                          "to the requested name")
         self.assertTrue(body['id'] is not None,
                         "Field volume_type id is empty or not found.")
-        _, fetched_volume_type = self.client.get_volume_type(body['id'])
+        _, fetched_volume_type = self.volume_types_client.get_volume_type(
+            body['id'])
         self.assertEqual(name, fetched_volume_type['name'],
                          'The fetched Volume_type is different '
                          'from the created Volume_type')
@@ -115,11 +116,11 @@
         provider = "LuksEncryptor"
         control_location = "front-end"
         name = data_utils.rand_name("volume-type-")
-        _, body = self.client.create_volume_type(name)
+        _, body = self.volume_types_client.create_volume_type(name)
         self.addCleanup(self._delete_volume_type, body['id'])
 
         # Create encryption type
-        _, encryption_type = self.client.create_encryption_type(
+        _, encryption_type = self.volume_types_client.create_encryption_type(
             body['id'], provider=provider,
             control_location=control_location)
         self.assertIn('volume_type_id', encryption_type)
@@ -131,8 +132,9 @@
                          "equal to the requested control_location")
 
         # Get encryption type
-        _, fetched_encryption_type = self.client.get_encryption_type(
-            encryption_type['volume_type_id'])
+        _, fetched_encryption_type = (
+            self.volume_types_client.get_encryption_type(
+                encryption_type['volume_type_id']))
         self.assertEqual(provider,
                          fetched_encryption_type['provider'],
                          'The fetched encryption_type provider is different '
@@ -143,11 +145,12 @@
                          'different from the created encryption_type')
 
         # Delete encryption type
-        self.client.delete_encryption_type(
+        self.volume_types_client.delete_encryption_type(
             encryption_type['volume_type_id'])
         resource = {"id": encryption_type['volume_type_id'],
                     "type": "encryption-type"}
-        self.client.wait_for_resource_deletion(resource)
-        _, deleted_encryption_type = self.client.get_encryption_type(
-            encryption_type['volume_type_id'])
+        self.volume_types_client.wait_for_resource_deletion(resource)
+        _, deleted_encryption_type = (
+            self.volume_types_client.get_encryption_type(
+                encryption_type['volume_type_id']))
         self.assertEmpty(deleted_encryption_type)
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index 2d72dd2..a154821 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -25,22 +25,23 @@
     def resource_setup(cls):
         super(VolumeTypesExtraSpecsTest, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type-')
-        _, cls.volume_type = cls.client.create_volume_type(vol_type_name)
+        _, cls.volume_type = cls.volume_types_client.create_volume_type(
+            vol_type_name)
 
     @classmethod
     def resource_cleanup(cls):
-        cls.client.delete_volume_type(cls.volume_type['id'])
+        cls.volume_types_client.delete_volume_type(cls.volume_type['id'])
         super(VolumeTypesExtraSpecsTest, cls).resource_cleanup()
 
     @test.attr(type='smoke')
     def test_volume_type_extra_specs_list(self):
         # List Volume types extra specs.
         extra_specs = {"spec1": "val1"}
-        _, body = self.client.create_volume_type_extra_specs(
+        _, body = self.volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'], extra_specs)
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
-        _, body = self.client.list_volume_types_extra_specs(
+        _, body = self.volume_types_client.list_volume_types_extra_specs(
             self.volume_type['id'])
         self.assertIsInstance(body, dict)
         self.assertIn('spec1', body)
@@ -49,13 +50,13 @@
     def test_volume_type_extra_specs_update(self):
         # Update volume type extra specs
         extra_specs = {"spec2": "val1"}
-        _, body = self.client.create_volume_type_extra_specs(
+        _, body = self.volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'], extra_specs)
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
 
         extra_spec = {"spec2": "val2"}
-        _, body = self.client.update_volume_type_extra_specs(
+        _, body = self.volume_types_client.update_volume_type_extra_specs(
             self.volume_type['id'],
             extra_spec.keys()[0],
             extra_spec)
@@ -67,18 +68,18 @@
     def test_volume_type_extra_spec_create_get_delete(self):
         # Create/Get/Delete volume type extra spec.
         extra_specs = {"spec3": "val1"}
-        _, body = self.client.create_volume_type_extra_specs(
+        _, body = self.volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'],
             extra_specs)
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
 
-        self.client.get_volume_type_extra_specs(
+        self.volume_types_client.get_volume_type_extra_specs(
             self.volume_type['id'],
             extra_specs.keys()[0])
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly fetched")
 
-        self.client.delete_volume_type_extra_specs(
+        self.volume_types_client.delete_volume_type_extra_specs(
             self.volume_type['id'],
             extra_specs.keys()[0])
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index f3eee00..8734b16 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -29,104 +29,115 @@
         super(ExtraSpecsNegativeTest, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type-')
         cls.extra_specs = {"spec1": "val1"}
-        _, cls.volume_type = cls.client.create_volume_type(
+        _, cls.volume_type = cls.volume_types_client.create_volume_type(
             vol_type_name,
             extra_specs=cls.extra_specs)
 
     @classmethod
     def resource_cleanup(cls):
-        cls.client.delete_volume_type(cls.volume_type['id'])
+        cls.volume_types_client.delete_volume_type(cls.volume_type['id'])
         super(ExtraSpecsNegativeTest, cls).resource_cleanup()
 
     @test.attr(type='gate')
     def test_update_no_body(self):
         # Should not update volume type extra specs with no body
         extra_spec = {"spec1": "val2"}
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.update_volume_type_extra_specs,
-                          self.volume_type['id'], extra_spec.keys()[0], None)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.update_volume_type_extra_specs,
+            self.volume_type['id'], extra_spec.keys()[0], None)
 
     @test.attr(type='gate')
     def test_update_nonexistent_extra_spec_id(self):
         # Should not update volume type extra specs with nonexistent id.
         extra_spec = {"spec1": "val2"}
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.update_volume_type_extra_specs,
-                          self.volume_type['id'], str(uuid.uuid4()),
-                          extra_spec)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.update_volume_type_extra_specs,
+            self.volume_type['id'], str(uuid.uuid4()),
+            extra_spec)
 
     @test.attr(type='gate')
     def test_update_none_extra_spec_id(self):
         # Should not update volume type extra specs with none id.
         extra_spec = {"spec1": "val2"}
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.update_volume_type_extra_specs,
-                          self.volume_type['id'], None, extra_spec)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.update_volume_type_extra_specs,
+            self.volume_type['id'], None, extra_spec)
 
     @test.attr(type='gate')
     def test_update_multiple_extra_spec(self):
         # Should not update volume type extra specs with multiple specs as
             # body.
         extra_spec = {"spec1": "val2", 'spec2': 'val1'}
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.update_volume_type_extra_specs,
-                          self.volume_type['id'], extra_spec.keys()[0],
-                          extra_spec)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.update_volume_type_extra_specs,
+            self.volume_type['id'], extra_spec.keys()[0],
+            extra_spec)
 
     @test.attr(type='gate')
     def test_create_nonexistent_type_id(self):
         # Should not create volume type extra spec for nonexistent volume
             # type id.
         extra_specs = {"spec2": "val1"}
-        self.assertRaises(exceptions.NotFound,
-                          self.client.create_volume_type_extra_specs,
-                          str(uuid.uuid4()), extra_specs)
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.create_volume_type_extra_specs,
+            str(uuid.uuid4()), extra_specs)
 
     @test.attr(type='gate')
     def test_create_none_body(self):
         # Should not create volume type extra spec for none POST body.
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.create_volume_type_extra_specs,
-                          self.volume_type['id'], None)
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.create_volume_type_extra_specs,
+            self.volume_type['id'], None)
 
     @test.attr(type='gate')
     def test_create_invalid_body(self):
         # Should not create volume type extra spec for invalid POST body.
-        self.assertRaises(exceptions.BadRequest,
-                          self.client.create_volume_type_extra_specs,
-                          self.volume_type['id'], ['invalid'])
+        self.assertRaises(
+            exceptions.BadRequest,
+            self.volume_types_client.create_volume_type_extra_specs,
+            self.volume_type['id'], ['invalid'])
 
     @test.attr(type='gate')
     def test_delete_nonexistent_volume_type_id(self):
         # Should not delete volume type extra spec for nonexistent
             # type id.
         extra_specs = {"spec1": "val1"}
-        self.assertRaises(exceptions.NotFound,
-                          self.client.delete_volume_type_extra_specs,
-                          str(uuid.uuid4()), extra_specs.keys()[0])
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.delete_volume_type_extra_specs,
+            str(uuid.uuid4()), extra_specs.keys()[0])
 
     @test.attr(type='gate')
     def test_list_nonexistent_volume_type_id(self):
         # Should not list volume type extra spec for nonexistent type id.
-        self.assertRaises(exceptions.NotFound,
-                          self.client.list_volume_types_extra_specs,
-                          str(uuid.uuid4()))
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.list_volume_types_extra_specs,
+            str(uuid.uuid4()))
 
     @test.attr(type='gate')
     def test_get_nonexistent_volume_type_id(self):
         # Should not get volume type extra spec for nonexistent type id.
         extra_specs = {"spec1": "val1"}
-        self.assertRaises(exceptions.NotFound,
-                          self.client.get_volume_type_extra_specs,
-                          str(uuid.uuid4()), extra_specs.keys()[0])
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.get_volume_type_extra_specs,
+            str(uuid.uuid4()), extra_specs.keys()[0])
 
     @test.attr(type='gate')
     def test_get_nonexistent_extra_spec_id(self):
         # Should not get volume type extra spec for nonexistent extra spec
             # id.
-        self.assertRaises(exceptions.NotFound,
-                          self.client.get_volume_type_extra_specs,
-                          self.volume_type['id'], str(uuid.uuid4()))
+        self.assertRaises(
+            exceptions.NotFound,
+            self.volume_types_client.get_volume_type_extra_specs,
+            self.volume_type['id'], str(uuid.uuid4()))
 
 
 class ExtraSpecsNegativeTestXML(ExtraSpecsNegativeTest):
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index c18e15d..a4d6431 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -35,18 +35,20 @@
     def test_create_with_empty_name(self):
         # Should not be able to create volume type with an empty name.
         self.assertRaises(exceptions.BadRequest,
-                          self.client.create_volume_type, '')
+                          self.volume_types_client.create_volume_type, '')
 
     @test.attr(type='gate')
     def test_get_nonexistent_type_id(self):
         # Should not be able to get volume type with nonexistent type id.
-        self.assertRaises(exceptions.NotFound, self.client.get_volume_type,
+        self.assertRaises(exceptions.NotFound,
+                          self.volume_types_client.get_volume_type,
                           str(uuid.uuid4()))
 
     @test.attr(type='gate')
     def test_delete_nonexistent_type_id(self):
         # Should not be able to delete volume type with nonexistent type id.
-        self.assertRaises(exceptions.NotFound, self.client.delete_volume_type,
+        self.assertRaises(exceptions.NotFound,
+                          self.volume_types_client.delete_volume_type,
                           str(uuid.uuid4()))
 
 
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 7f5361d..d78ddb6 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -154,37 +154,34 @@
     @classmethod
     def resource_setup(cls):
         super(BaseVolumeAdminTest, cls).resource_setup()
-        cls.adm_user = CONF.identity.admin_username
-        cls.adm_pass = CONF.identity.admin_password
-        cls.adm_tenant = CONF.identity.admin_tenant_name
-        if not all((cls.adm_user, cls.adm_pass, cls.adm_tenant)):
-            msg = ("Missing Volume Admin API credentials "
-                   "in configuration.")
-            raise cls.skipException(msg)
 
-        if CONF.compute.allow_tenant_isolation:
-            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
-                                         interface=cls._interface)
-        else:
-            cls.os_adm = clients.AdminManager(interface=cls._interface)
+        try:
+            cls.adm_creds = cls.isolated_creds.get_admin_creds()
+            cls.os_adm = clients.Manager(
+                credentials=cls.adm_creds, interface=cls._interface)
+        except NotImplementedError:
+            msg = "Missing Volume Admin API credentials in configuration."
+            raise cls.skipException(msg)
 
         cls.qos_specs = []
 
-        cls.client = cls.os_adm.volume_types_client
         cls.hosts_client = cls.os_adm.volume_hosts_client
         cls.quotas_client = cls.os_adm.volume_quotas_client
-        cls.volume_types_client = cls.os_adm.volume_types_client
 
         if cls._api_version == 1:
             if not CONF.volume_feature_enabled.api_v1:
                 msg = "Volume API v1 is disabled"
                 raise cls.skipException(msg)
             cls.volume_qos_client = cls.os_adm.volume_qos_client
+            cls.volume_types_client = cls.os_adm.volume_types_client
+            cls.volume_client = cls.os_adm.volumes_client
         elif cls._api_version == 2:
             if not CONF.volume_feature_enabled.api_v2:
                 msg = "Volume API v2 is disabled"
                 raise cls.skipException(msg)
             cls.volume_qos_client = cls.os_adm.volume_qos_v2_client
+            cls.volume_types_client = cls.os_adm.volume_types_v2_client
+            cls.volume_client = cls.os_adm.volumes_v2_client
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 90ac9c1..fe217c1 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -30,15 +30,16 @@
         super(VolumesV2TransfersTest, cls).resource_setup()
 
         # Add another tenant to test volume-transfer
-        if CONF.compute.allow_tenant_isolation:
-            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
-                                         interface=cls._interface)
-            # Add admin tenant to cleanup resources
-            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
-                                         interface=cls._interface)
-        else:
-            cls.os_alt = clients.AltManager()
-            cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+        cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
+                                     interface=cls._interface)
+        # Add admin tenant to cleanup resources
+        try:
+            creds = cls.isolated_creds.get_admin_creds()
+            cls.os_adm = clients.Manager(
+                credentials=creds, interface=cls._interface)
+        except NotImplementedError:
+            msg = "Missing Volume Admin API credentials in configuration."
+            raise cls.skipException(msg)
 
         cls.client = cls.volumes_client
         cls.alt_client = cls.os_alt.volumes_client
diff --git a/tempest/api_schema/response/messaging/v1/queues.py b/tempest/api_schema/response/messaging/v1/queues.py
index f0b2691..09e0147 100644
--- a/tempest/api_schema/response/messaging/v1/queues.py
+++ b/tempest/api_schema/response/messaging/v1/queues.py
@@ -105,7 +105,9 @@
 
 resource_schema = {
     'type': 'array',
-    'items': 'string',
+    'items': {
+        'type': 'string'
+    },
     'minItems': 1
 }
 
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index ca6d7fe..8dd2df2 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -14,46 +14,20 @@
 #    under the License.
 
 import functools
-import os
-import shlex
-import subprocess
 
+from tempest_lib.cli import base
 import testtools
 
-import tempest.cli.output_parser
+from tempest.common import credentials
 from tempest import config
 from tempest import exceptions
-from tempest.openstack.common import log as logging
 from tempest.openstack.common import versionutils
-import tempest.test
+from tempest import test
 
 
-LOG = logging.getLogger(__name__)
-
 CONF = config.CONF
 
 
-def execute(cmd, action, flags='', params='', fail_ok=False,
-            merge_stderr=False):
-    """Executes specified command for the given action."""
-    cmd = ' '.join([os.path.join(CONF.cli.cli_dir, cmd),
-                    flags, action, params])
-    LOG.info("running: '%s'" % cmd)
-    cmd = shlex.split(cmd.encode('utf-8'))
-    result = ''
-    result_err = ''
-    stdout = subprocess.PIPE
-    stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
-    proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr)
-    result, result_err = proc.communicate()
-    if not fail_ok and proc.returncode != 0:
-        raise exceptions.CommandFailed(proc.returncode,
-                                       cmd,
-                                       result,
-                                       result_err)
-    return result
-
-
 def check_client_version(client, version):
     """Checks if the client's version is compatible with the given version
 
@@ -62,8 +36,8 @@
     @return: True if the client version is compatible with the given version
              parameter, False otherwise.
     """
-    current_version = execute(client, '', params='--version',
-                              merge_stderr=True)
+    current_version = base.execute(client, '', params='--version',
+                                   merge_stderr=True, cli_dir=CONF.cli.cli_dir)
 
     if not current_version.strip():
         raise exceptions.TempestException('"%s --version" output was empty' %
@@ -92,100 +66,19 @@
     return decorator
 
 
-class ClientTestBase(tempest.test.BaseTestCase):
+class ClientTestBase(base.ClientTestBase, test.BaseTestCase):
     @classmethod
     def resource_setup(cls):
         if not CONF.cli.enabled:
             msg = "cli testing disabled"
             raise cls.skipException(msg)
         super(ClientTestBase, cls).resource_setup()
+        cls.cred_prov = credentials.get_isolated_credentials(cls.__name__)
+        cls.creds = cls.cred_prov.get_admin_creds()
 
-    def __init__(self, *args, **kwargs):
-        self.parser = tempest.cli.output_parser
-        super(ClientTestBase, self).__init__(*args, **kwargs)
-
-    def nova(self, action, flags='', params='', admin=True, fail_ok=False):
-        """Executes nova command for the given action."""
-        flags += ' --endpoint-type %s' % CONF.compute.endpoint_type
-        return self.cmd_with_auth(
-            'nova', action, flags, params, admin, fail_ok)
-
-    def nova_manage(self, action, flags='', params='', fail_ok=False,
-                    merge_stderr=False):
-        """Executes nova-manage command for the given action."""
-        return execute(
-            'nova-manage', action, flags, params, fail_ok, merge_stderr)
-
-    def keystone(self, action, flags='', params='', admin=True, fail_ok=False):
-        """Executes keystone command for the given action."""
-        return self.cmd_with_auth(
-            'keystone', action, flags, params, admin, fail_ok)
-
-    def glance(self, action, flags='', params='', admin=True, fail_ok=False):
-        """Executes glance command for the given action."""
-        flags += ' --os-endpoint-type %s' % CONF.image.endpoint_type
-        return self.cmd_with_auth(
-            'glance', action, flags, params, admin, fail_ok)
-
-    def ceilometer(self, action, flags='', params='', admin=True,
-                   fail_ok=False):
-        """Executes ceilometer command for the given action."""
-        flags += ' --os-endpoint-type %s' % CONF.telemetry.endpoint_type
-        return self.cmd_with_auth(
-            'ceilometer', action, flags, params, admin, fail_ok)
-
-    def heat(self, action, flags='', params='', admin=True,
-             fail_ok=False):
-        """Executes heat command for the given action."""
-        flags += ' --os-endpoint-type %s' % CONF.orchestration.endpoint_type
-        return self.cmd_with_auth(
-            'heat', action, flags, params, admin, fail_ok)
-
-    def cinder(self, action, flags='', params='', admin=True, fail_ok=False):
-        """Executes cinder command for the given action."""
-        flags += ' --endpoint-type %s' % CONF.volume.endpoint_type
-        return self.cmd_with_auth(
-            'cinder', action, flags, params, admin, fail_ok)
-
-    def swift(self, action, flags='', params='', admin=True, fail_ok=False):
-        """Executes swift command for the given action."""
-        flags += ' --os-endpoint-type %s' % CONF.object_storage.endpoint_type
-        return self.cmd_with_auth(
-            'swift', action, flags, params, admin, fail_ok)
-
-    def neutron(self, action, flags='', params='', admin=True, fail_ok=False):
-        """Executes neutron command for the given action."""
-        flags += ' --endpoint-type %s' % CONF.network.endpoint_type
-        return self.cmd_with_auth(
-            'neutron', action, flags, params, admin, fail_ok)
-
-    def sahara(self, action, flags='', params='', admin=True,
-               fail_ok=False, merge_stderr=True):
-        """Executes sahara command for the given action."""
-        flags += ' --endpoint-type %s' % CONF.data_processing.endpoint_type
-        return self.cmd_with_auth(
-            'sahara', action, flags, params, admin, fail_ok, merge_stderr)
-
-    def cmd_with_auth(self, cmd, action, flags='', params='',
-                      admin=True, fail_ok=False, merge_stderr=False):
-        """Executes given command with auth attributes appended."""
-        # TODO(jogo) make admin=False work
-        creds = ('--os-username %s --os-tenant-name %s --os-password %s '
-                 '--os-auth-url %s' %
-                 (CONF.identity.admin_username,
-                  CONF.identity.admin_tenant_name,
-                  CONF.identity.admin_password,
-                  CONF.identity.uri))
-        flags = creds + ' ' + flags
-        return execute(cmd, action, flags, params, fail_ok, merge_stderr)
-
-    def assertTableStruct(self, items, field_names):
-        """Verify that all items has keys listed in field_names."""
-        for item in items:
-            for field in field_names:
-                self.assertIn(field, item)
-
-    def assertFirstLineStartsWith(self, lines, beginning):
-        self.assertTrue(lines[0].startswith(beginning),
-                        msg=('Beginning of first line has invalid content: %s'
-                             % lines[:3]))
+    def _get_clients(self):
+        clients = base.CLIClient(self.creds.username,
+                                 self.creds.password,
+                                 self.creds.tenant_name,
+                                 CONF.identity.uri, CONF.cli.cli_dir)
+        return clients
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
deleted file mode 100644
index 80234a3..0000000
--- a/tempest/cli/output_parser.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Collection of utilities for parsing CLI clients output."""
-
-import re
-
-from tempest import exceptions
-from tempest.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-delimiter_line = re.compile('^\+\-[\+\-]+\-\+$')
-
-
-def details_multiple(output_lines, with_label=False):
-    """Return list of dicts with item details from cli output tables.
-
-    If with_label is True, key '__label' is added to each items dict.
-    For more about 'label' see OutputParser.tables().
-    """
-    items = []
-    tables_ = tables(output_lines)
-    for table_ in tables_:
-        if 'Property' not in table_['headers'] \
-           or 'Value' not in table_['headers']:
-            raise exceptions.InvalidStructure()
-        item = {}
-        for value in table_['values']:
-            item[value[0]] = value[1]
-        if with_label:
-            item['__label'] = table_['label']
-        items.append(item)
-    return items
-
-
-def details(output_lines, with_label=False):
-    """Return dict with details of first item (table) found in output."""
-    items = details_multiple(output_lines, with_label)
-    return items[0]
-
-
-def listing(output_lines):
-    """Return list of dicts with basic item info parsed from cli output.
-    """
-
-    items = []
-    table_ = table(output_lines)
-    for row in table_['values']:
-        item = {}
-        for col_idx, col_key in enumerate(table_['headers']):
-            item[col_key] = row[col_idx]
-        items.append(item)
-    return items
-
-
-def tables(output_lines):
-    """Find all ascii-tables in output and parse them.
-
-    Return list of tables parsed from cli output as dicts.
-    (see OutputParser.table())
-
-    And, if found, label key (separated line preceding the table)
-    is added to each tables dict.
-    """
-    tables_ = []
-
-    table_ = []
-    label = None
-
-    start = False
-    header = False
-
-    if not isinstance(output_lines, list):
-        output_lines = output_lines.split('\n')
-
-    for line in output_lines:
-        if delimiter_line.match(line):
-            if not start:
-                start = True
-            elif not header:
-                # we are after head area
-                header = True
-            else:
-                # table ends here
-                start = header = None
-                table_.append(line)
-
-                parsed = table(table_)
-                parsed['label'] = label
-                tables_.append(parsed)
-
-                table_ = []
-                label = None
-                continue
-        if start:
-            table_.append(line)
-        else:
-            if label is None:
-                label = line
-            else:
-                LOG.warn('Invalid line between tables: %s' % line)
-    if len(table_) > 0:
-        LOG.warn('Missing end of table')
-
-    return tables_
-
-
-def table(output_lines):
-    """Parse single table from cli output.
-
-    Return dict with list of column names in 'headers' key and
-    rows in 'values' key.
-    """
-    table_ = {'headers': [], 'values': []}
-    columns = None
-
-    if not isinstance(output_lines, list):
-        output_lines = output_lines.split('\n')
-
-    if not output_lines[-1]:
-        # skip last line if empty (just newline at the end)
-        output_lines = output_lines[:-1]
-
-    for line in output_lines:
-        if delimiter_line.match(line):
-            columns = _table_columns(line)
-            continue
-        if '|' not in line:
-            LOG.warn('skipping invalid table line: %s' % line)
-            continue
-        row = []
-        for col in columns:
-            row.append(line[col[0]:col[1]].strip())
-        if table_['headers']:
-            table_['values'].append(row)
-        else:
-            table_['headers'] = row
-
-    return table_
-
-
-def _table_columns(first_table_row):
-    """Find column ranges in output line.
-
-    Return list of tuples (start,end) for each column
-    detected by plus (+) characters in delimiter line.
-    """
-    positions = []
-    start = 1  # there is '+' at 0
-    while start < len(first_table_row):
-        end = first_table_row.find('+', start)
-        if end == -1:
-            break
-        positions.append((start, end))
-        start = end + 1
-    return positions
diff --git a/tempest/cli/simple_read_only/compute/test_nova.py b/tempest/cli/simple_read_only/compute/test_nova.py
index 6e5e077..4fe4982 100644
--- a/tempest/cli/simple_read_only/compute/test_nova.py
+++ b/tempest/cli/simple_read_only/compute/test_nova.py
@@ -13,11 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest_lib import exceptions
 import testtools
 
 from tempest import cli
 from tempest import config
-from tempest import exceptions
 from tempest.openstack.common import log as logging
 import tempest.test
 
@@ -47,6 +47,11 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlyNovaClientTest, cls).resource_setup()
 
+    def nova(self, *args, **kwargs):
+        return self.clients.nova(*args,
+                                 endpoint_type=CONF.compute.endpoint_type,
+                                 **kwargs)
+
     def test_admin_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
                           self.nova,
diff --git a/tempest/cli/simple_read_only/compute/test_nova_manage.py b/tempest/cli/simple_read_only/compute/test_nova_manage.py
index cff543f..34ec671 100644
--- a/tempest/cli/simple_read_only/compute/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/compute/test_nova_manage.py
@@ -13,9 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest_lib import exceptions
+
 from tempest import cli
 from tempest import config
-from tempest import exceptions
 from tempest.openstack.common import log as logging
 
 
@@ -46,6 +47,9 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlyNovaManageTest, cls).resource_setup()
 
+    def nova_manage(self, *args, **kwargs):
+        return self.clients.nova_manage(*args, **kwargs)
+
     def test_admin_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
                           self.nova_manage,
diff --git a/tempest/cli/simple_read_only/data_processing/test_sahara.py b/tempest/cli/simple_read_only/data_processing/test_sahara.py
index 751a4ad..1f2403c 100644
--- a/tempest/cli/simple_read_only/data_processing/test_sahara.py
+++ b/tempest/cli/simple_read_only/data_processing/test_sahara.py
@@ -15,9 +15,10 @@
 import logging
 import re
 
+from tempest_lib import exceptions
+
 from tempest import cli
 from tempest import config
-from tempest import exceptions
 from tempest import test
 
 CONF = config.CONF
@@ -40,6 +41,10 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlySaharaClientTest, cls).resource_setup()
 
+    def sahara(self, *args, **kwargs):
+        return self.clients.sahara(
+            *args, endpoint_type=CONF.data_processing.endpoint_type, **kwargs)
+
     @test.attr(type='negative')
     def test_sahara_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/cli/simple_read_only/identity/test_keystone.py b/tempest/cli/simple_read_only/identity/test_keystone.py
index 9218fcd..1fc7908 100644
--- a/tempest/cli/simple_read_only/identity/test_keystone.py
+++ b/tempest/cli/simple_read_only/identity/test_keystone.py
@@ -15,9 +15,10 @@
 
 import re
 
+from tempest_lib import exceptions
+
 from tempest import cli
 from tempest import config
-from tempest import exceptions
 from tempest.openstack.common import log as logging
 
 CONF = config.CONF
@@ -34,6 +35,9 @@
     their own. They only verify the structure of output if present.
     """
 
+    def keystone(self, *args, **kwargs):
+        return self.clients.keystone(*args, **kwargs)
+
     def test_admin_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
                           self.keystone,
diff --git a/tempest/cli/simple_read_only/image/test_glance.py b/tempest/cli/simple_read_only/image/test_glance.py
index a9cbadb..03e00d7 100644
--- a/tempest/cli/simple_read_only/image/test_glance.py
+++ b/tempest/cli/simple_read_only/image/test_glance.py
@@ -15,9 +15,10 @@
 
 import re
 
+from tempest_lib import exceptions
+
 from tempest import cli
 from tempest import config
-from tempest import exceptions
 from tempest.openstack.common import log as logging
 
 CONF = config.CONF
@@ -40,6 +41,11 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlyGlanceClientTest, cls).resource_setup()
 
+    def glance(self, *args, **kwargs):
+        return self.clients.glance(*args,
+                                   endpoint_type=CONF.image.endpoint_type,
+                                   **kwargs)
+
     def test_glance_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
                           self.glance,
diff --git a/tempest/cli/simple_read_only/network/test_neutron.py b/tempest/cli/simple_read_only/network/test_neutron.py
index f9f8906..2b3920d 100644
--- a/tempest/cli/simple_read_only/network/test_neutron.py
+++ b/tempest/cli/simple_read_only/network/test_neutron.py
@@ -15,9 +15,10 @@
 
 import re
 
+from tempest_lib import exceptions
+
 from tempest import cli
 from tempest import config
-from tempest import exceptions
 from tempest.openstack.common import log as logging
 from tempest import test
 
@@ -41,6 +42,11 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlyNeutronClientTest, cls).resource_setup()
 
+    def neutron(self, *args, **kwargs):
+        return self.clients.neutron(*args,
+                                    endpoint_type=CONF.network.endpoint_type,
+                                    **kwargs)
+
     @test.attr(type='smoke')
     def test_neutron_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/cli/simple_read_only/object_storage/test_swift.py b/tempest/cli/simple_read_only/object_storage/test_swift.py
index a162660..40c4c15 100644
--- a/tempest/cli/simple_read_only/object_storage/test_swift.py
+++ b/tempest/cli/simple_read_only/object_storage/test_swift.py
@@ -15,9 +15,10 @@
 
 import re
 
+from tempest_lib import exceptions
+
 from tempest import cli
 from tempest import config
-from tempest import exceptions
 
 CONF = config.CONF
 
@@ -37,6 +38,10 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlySwiftClientTest, cls).resource_setup()
 
+    def swift(self, *args, **kwargs):
+        return self.clients.swift(
+            *args, endpoint_type=CONF.object_storage.endpoint_type, **kwargs)
+
     def test_swift_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
                           self.swift,
diff --git a/tempest/cli/simple_read_only/orchestration/test_heat.py b/tempest/cli/simple_read_only/orchestration/test_heat.py
index 7d7f8c9..d3a9b01 100644
--- a/tempest/cli/simple_read_only/orchestration/test_heat.py
+++ b/tempest/cli/simple_read_only/orchestration/test_heat.py
@@ -42,6 +42,10 @@
             os.path.dirname(os.path.realpath(__file__))),
             'heat_templates/heat_minimal.yaml')
 
+    def heat(self, *args, **kwargs):
+        return self.clients.heat(
+            *args, endpoint_type=CONF.orchestration.endpoint_type, **kwargs)
+
     def test_heat_stack_list(self):
         self.heat('stack-list')
 
diff --git a/tempest/cli/simple_read_only/telemetry/test_ceilometer.py b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
index 45b793b..f9bf8b2 100644
--- a/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
+++ b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
@@ -39,6 +39,10 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlyCeilometerClientTest, cls).resource_setup()
 
+    def ceilometer(self, *args, **kwargs):
+        return self.clients.ceilometer(
+            *args, endpoint_type=CONF.telemetry.endpoint_type, **kwargs)
+
     def test_ceilometer_meter_list(self):
         self.ceilometer('meter-list')
 
diff --git a/tempest/cli/simple_read_only/volume/test_cinder.py b/tempest/cli/simple_read_only/volume/test_cinder.py
index 45f6c41..6e1e7d3 100644
--- a/tempest/cli/simple_read_only/volume/test_cinder.py
+++ b/tempest/cli/simple_read_only/volume/test_cinder.py
@@ -16,11 +16,12 @@
 import logging
 import re
 
+from tempest_lib import exceptions
 import testtools
 
 from tempest import cli
 from tempest import config
-from tempest import exceptions
+
 
 CONF = config.CONF
 LOG = logging.getLogger(__name__)
@@ -41,6 +42,11 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlyCinderClientTest, cls).resource_setup()
 
+    def cinder(self, *args, **kwargs):
+        return self.clients.cinder(*args,
+                                   endpoint_type=CONF.volume.endpoint_type,
+                                   **kwargs)
+
     def test_cinder_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
                           self.cinder,
diff --git a/tempest/clients.py b/tempest/clients.py
index 2d07852..19b4e11 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -181,6 +181,8 @@
 from tempest.services.volume.json.qos_client import QosSpecsClientJSON
 from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
 from tempest.services.volume.json.volumes_client import VolumesClientJSON
+from tempest.services.volume.v2.json.admin.volume_types_client import \
+    VolumeTypesV2ClientJSON
 from tempest.services.volume.v2.json.availability_zone_client import \
     VolumeV2AvailabilityZoneClientJSON
 from tempest.services.volume.v2.json.extensions_client import \
@@ -332,6 +334,8 @@
             self.volumes_v2_client = VolumesV2ClientJSON(self.auth_provider)
             self.volume_types_client = VolumeTypesClientJSON(
                 self.auth_provider)
+            self.volume_types_v2_client = VolumeTypesV2ClientJSON(
+                self.auth_provider)
             self.identity_client = IdentityClientJSON(self.auth_provider)
             self.identity_v3_client = IdentityV3ClientJSON(
                 self.auth_provider)
@@ -443,20 +447,6 @@
             self.auth_provider)
 
 
-class AltManager(Manager):
-
-    """
-    Manager object that uses the alt_XXX credentials for its
-    managed client objects
-    """
-
-    def __init__(self, interface='json', service=None):
-        super(AltManager, self).__init__(
-            credentials=auth.get_default_credentials('alt_user'),
-            interface=interface,
-            service=service)
-
-
 class AdminManager(Manager):
 
     """
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
old mode 100644
new mode 100755
index 9ae3dfb..a305e42
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -12,7 +12,6 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations
 # under the License.
-# @author: David Paterson
 
 """
 Utility for cleaning up environment after Tempest run
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index f5f0db3..8adfbef 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # Copyright 2014 Dell Inc.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,11 +11,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-'''
-Created on Sep 3, 2014
 
-@author: David_Paterson
-'''
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest import test
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 3c41dd9..0adc7e0 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -20,6 +20,7 @@
 """
 
 import argparse
+import collections
 import datetime
 import os
 import sys
@@ -43,7 +44,7 @@
 
 OPTS = {}
 USERS = {}
-RES = {}
+RES = collections.defaultdict(list)
 
 LOG = None
 
@@ -282,6 +283,8 @@
         If in check mode confirm that the oldest sample available is from
         before the upgrade.
         """
+        if not self.res.get('telemetry'):
+            return
         LOG.info("checking telemetry")
         for server in self.res['servers']:
             client = client_for_user(server['owner'])
@@ -508,6 +511,9 @@
 
 
 def create_volumes(volumes):
+    if not volumes:
+        return
+    LOG.info("Creating volumes")
     for volume in volumes:
         client = client_for_user(volume['owner'])
 
@@ -630,7 +636,7 @@
     global RES
     get_options()
     setup_logging()
-    RES = load_resources(OPTS.resources)
+    RES.update(load_resources(OPTS.resources))
 
     if OPTS.mode == 'create':
         create_resources()
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
index 19ee6d5..2d5e686 100644
--- a/tempest/cmd/resources.yaml
+++ b/tempest/cmd/resources.yaml
@@ -57,3 +57,4 @@
     name: javelin1
     owner: javelin
     file: /etc/hosts
+telemetry: true
diff --git a/tempest/cmd/run_stress.py b/tempest/cmd/run_stress.py
index a3f185c..d21a441 100755
--- a/tempest/cmd/run_stress.py
+++ b/tempest/cmd/run_stress.py
@@ -102,9 +102,11 @@
                                       call_inherited=ns.call_inherited)
 
     if ns.serial:
+        # Duration is total time
+        duration = ns.duration / len(tests)
         for test in tests:
             step_result = driver.stress_openstack([test],
-                                                  ns.duration,
+                                                  duration,
                                                   ns.number,
                                                   ns.stop)
             # NOTE(mkoderer): we just save the last result code
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 5046bff..f426e4d 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -122,6 +122,18 @@
                             not CONF.volume_feature_enabled.api_v2, update)
 
 
+def verify_api_versions(os, service, update):
+    verify = {
+        'cinder': verify_cinder_api_versions,
+        'glance': verify_glance_api_versions,
+        'keystone': verify_keystone_api_versions,
+        'nova': verify_nova_api_versions,
+    }
+    if service not in verify:
+        return
+    verify[service](os, update)
+
+
 def get_extension_client(os, service):
     extensions_client = {
         'nova': os.extensions_client,
@@ -337,10 +349,13 @@
         elif service not in services:
             continue
         results = verify_extensions(os, service, results)
-    verify_keystone_api_versions(os, update)
-    verify_glance_api_versions(os, update)
-    verify_nova_api_versions(os, update)
-    verify_cinder_api_versions(os, update)
+
+    # Verify API verisons of all services in the keystone catalog and keystone
+    # itself.
+    services.append('keystone')
+    for service in services:
+        verify_api_versions(os, service, update)
+
     display_results(results, update, replace)
     if update:
         conf_file.close()
diff --git a/tempest/common/accounts.py b/tempest/common/accounts.py
index 7423c17..66285e4 100644
--- a/tempest/common/accounts.py
+++ b/tempest/common/accounts.py
@@ -58,7 +58,15 @@
         return hash_dict
 
     def is_multi_user(self):
-        return len(self.hash_dict) > 1
+        # Default credentials is not a valid option with locking Account
+        if self.use_default_creds:
+            raise exceptions.InvalidConfiguration(
+                "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
+        else:
+            return len(self.hash_dict) > 1
+
+    def is_multi_tenant(self):
+        return self.is_multi_user()
 
     def _create_hash_file(self, hash_string):
         path = os.path.join(os.path.join(self.accounts_dir, hash_string))
@@ -144,6 +152,27 @@
     to preserve the current behaviour of the serial tempest run.
     """
 
+    def _unique_creds(self, cred_arg=None):
+        """Verify that the configured credentials are valid and distinct """
+        if self.use_default_creds:
+            try:
+                user = self.get_primary_creds()
+                alt_user = self.get_alt_creds()
+                return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
+            except exceptions.InvalidCredentials as ic:
+                msg = "At least one of the configured credentials is " \
+                      "not valid: %s" % ic.message
+                raise exceptions.InvalidConfiguration(msg)
+        else:
+            # TODO(andreaf) Add a uniqueness check here
+            return len(self.hash_dict) > 1
+
+    def is_multi_user(self):
+        return self._unique_creds('username')
+
+    def is_multi_tenant(self):
+        return self._unique_creds('tenant_id')
+
     def get_creds(self, id):
         try:
             # No need to sort the dict as within the same python process
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
index 56d34a5..c5be0c0 100644
--- a/tempest/common/cred_provider.py
+++ b/tempest/common/cred_provider.py
@@ -1,4 +1,5 @@
-# (c) 2014 Deutsche Telekom AG
+# Copyright (c) 2014 Deutsche Telekom AG
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
 #    Licensed under the Apache License, Version 2.0 (the "License");
 #    you may not use this file except in compliance with the License.
 #    You may obtain a copy of the License at
@@ -43,3 +44,11 @@
     @abc.abstractmethod
     def clear_isolated_creds(self):
         return
+
+    @abc.abstractmethod
+    def is_multi_user(self):
+        return
+
+    @abc.abstractmethod
+    def is_multi_tenant(self):
+        return
diff --git a/tempest/common/credentials.py b/tempest/common/credentials.py
new file mode 100644
index 0000000..08b592f
--- /dev/null
+++ b/tempest/common/credentials.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common import accounts
+from tempest.common import isolated_creds
+from tempest import config
+
+CONF = config.CONF
+
+
+# Return the right implementation of CredentialProvider based on config
+# Dropping interface and password, as they are never used anyways
+# TODO(andreaf) Drop them from the CredentialsProvider interface completely
+def get_isolated_credentials(name, network_resources=None,
+                             force_tenant_isolation=False):
+    # If a test requires a new account to work, it can have it via forcing
+    # tenant isolation. A new account will be produced only for that test.
+    # In case admin credentials are not available for the account creation,
+    # the test should be skipped else it would fail.
+    if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
+        return isolated_creds.IsolatedCreds(
+            name=name,
+            network_resources=network_resources)
+    else:
+        if CONF.auth.locking_credentials_provider:
+            # Most params are not relevant for pre-created accounts
+            return accounts.Accounts(name=name)
+        else:
+            return accounts.NotLockingAccounts(name=name)
diff --git a/tempest/common/generate_sample_tempest.py b/tempest/common/generate_sample_tempest.py
deleted file mode 100644
index ceb3394..0000000
--- a/tempest/common/generate_sample_tempest.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import sys
-
-import tempest.config
-from tempest.openstack.common.config import generator
-
-# NOTE(mtreinish): This hack is needed because of how oslo config is used in
-# tempest. Tempest is run from inside a test runner and so we can't rely on the
-# global CONF object being fully populated when we run a test. (test runners
-# don't init every file for running a test) So to get around that we manually
-# load the config file in tempest for each test class to ensure that every
-# config option is set. However, the tool expects the CONF object to be fully
-# populated when it inits all the files in the project. This just works around
-# the issue by manually loading the config file (which may or may not exist)
-# which will populate all the options before running the generator.
-
-
-if __name__ == "__main__":
-    tempest.config.register_opts()
-    generator.generate(sys.argv[1:])
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index b2edfee..228e47c 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -351,3 +351,9 @@
             except exceptions.NotFound:
                 LOG.warn("tenant with name: %s not found for delete" %
                          creds.tenant_name)
+
+    def is_multi_user(self):
+        return True
+
+    def is_multi_tenant(self):
+        return True
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 00fe8d2..c290dad 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -16,6 +16,7 @@
 
 import collections
 import json
+import logging as real_logging
 import re
 import time
 
@@ -36,8 +37,8 @@
 MAX_RECURSION_DEPTH = 2
 TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
 
-# All the successful HTTP status codes from RFC 2616
-HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
+# All the successful HTTP status codes from RFC 7231 & 4918
+HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206, 207)
 
 
 # convert a structure into a string safely
@@ -208,8 +209,9 @@
     @classmethod
     def expected_success(cls, expected_code, read_code):
         assert_msg = ("This function only allowed to use for HTTP status"
-                      "codes which explicitly defined in the RFC 2616. {0}"
-                      " is not a defined Success Code!").format(expected_code)
+                      "codes which explicitly defined in the RFC 7231 & 4918."
+                      "{0} is not a defined Success Code!"
+                      ).format(expected_code)
         if isinstance(expected_code, list):
             for code in expected_code:
                 assert code in HTTP_SUCCESS, assert_msg
@@ -310,14 +312,15 @@
         caller_name = misc_utils.find_test_caller()
         if secs:
             secs = " %.3fs" % secs
-        self.LOG.info(
-            'Request (%s): %s %s %s%s' % (
-                caller_name,
-                resp['status'],
-                method,
-                req_url,
-                secs),
-            extra=extra)
+        if not self.LOG.isEnabledFor(real_logging.DEBUG):
+            self.LOG.info(
+                'Request (%s): %s %s %s%s' % (
+                    caller_name,
+                    resp['status'],
+                    method,
+                    req_url,
+                    secs),
+                extra=extra)
 
         # Also look everything at DEBUG if you want to filter this
         # out, don't run at debug.
@@ -568,9 +571,10 @@
             if self.is_resource_deleted(id):
                 return
             if int(time.time()) - start_time >= self.build_timeout:
-                message = ('Failed to delete resource %(id)s within the '
-                           'required time (%(timeout)s s).' %
-                           {'id': id, 'timeout': self.build_timeout})
+                message = ('Failed to delete %(resource_type)s %(id)s within '
+                           'the required time (%(timeout)s s).' %
+                           {'resource_type': self.resource_type, 'id': id,
+                            'timeout': self.build_timeout})
                 caller = misc_utils.find_test_caller()
                 if caller:
                     message = '(%s) %s' % (caller, message)
@@ -585,6 +589,11 @@
                    % self.__class__.__name__)
         raise NotImplementedError(message)
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'resource'
+
     @classmethod
     def validate_response(cls, schema, resp, body):
         # Only check the response if the status code is a success code
diff --git a/tempest/config.py b/tempest/config.py
index 174a895..6e8238a 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -20,6 +20,7 @@
 
 from oslo.config import cfg
 
+from tempest.openstack.common import lockutils
 from tempest.openstack.common import log as logging
 
 
@@ -38,9 +39,28 @@
                default='etc/accounts.yaml',
                help="Path to the yaml file that contains the list of "
                     "credentials to use for running tests"),
+    cfg.BoolOpt('allow_tenant_isolation',
+                default=False,
+                help="Allows test cases to create/destroy tenants and "
+                     "users. This option requires that OpenStack Identity "
+                     "API admin credentials are known. If false, isolated "
+                     "test cases and parallel execution, can still be "
+                     "achieved configuring a list of test accounts",
+                deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
+                                                   group='compute'),
+                                 cfg.DeprecatedOpt('allow_tenant_isolation',
+                                                   group='orchestration')]),
+    cfg.BoolOpt('locking_credentials_provider',
+                default=False,
+                help="If set to True it enables the Accounts provider, "
+                     "which locks credentials to allow for parallel execution "
+                     "with pre-provisioned accounts. It can only be used to "
+                     "run tests that ensure credentials cleanup happens. "
+                     "It requires at least `2 * CONC` distinct accounts "
+                     "configured in `test_accounts_file`, with CONC == the "
+                     "number of concurrent test processes."),
 ]
 
-
 identity_group = cfg.OptGroup(name='identity',
                               title="Keystone Configuration Options")
 
@@ -129,12 +149,6 @@
                              title='Compute Service Options')
 
 ComputeGroup = [
-    cfg.BoolOpt('allow_tenant_isolation',
-                default=False,
-                help="Allows test cases to create/destroy tenants and "
-                     "users. This option enables isolated test cases and "
-                     "better parallel execution, but also requires that "
-                     "OpenStack Identity API admin credentials are known."),
     cfg.StrOpt('image_ref',
                help="Valid primary image reference to be used in tests. "
                     "This is a required option"),
@@ -467,7 +481,10 @@
                 help="Allow the execution of IPv6 subnet tests that use "
                      "the extended IPv6 attributes ipv6_ra_mode "
                      "and ipv6_address_mode"
-                )
+                ),
+    cfg.BoolOpt('xml_api',
+                default=False,
+                help='If false, skip all network api tests with xml')
 ]
 
 messaging_group = cfg.OptGroup(name='messaging',
@@ -514,7 +531,7 @@
                help='Time in seconds between volume availability checks.'),
     cfg.IntOpt('build_timeout',
                default=300,
-               help='Timeout in seconds to wait for a volume to become'
+               help='Timeout in seconds to wait for a volume to become '
                     'available.'),
     cfg.StrOpt('catalog_type',
                default='volume',
@@ -666,12 +683,6 @@
                choices=['public', 'admin', 'internal',
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the orchestration service."),
-    cfg.BoolOpt('allow_tenant_isolation',
-                default=False,
-                help="Allows test cases to create/destroy tenants and "
-                     "users. This option enables isolated test cases and "
-                     "better parallel execution, but also requires that "
-                     "OpenStack Identity API admin credentials are known."),
     cfg.IntOpt('build_interval',
                default=1,
                help="Time in seconds between build status checks."),
@@ -1020,44 +1031,60 @@
                help="Test generator class for all negative tests"),
 ]
 
+_opts = [
+    (auth_group, AuthGroup),
+    (compute_group, ComputeGroup),
+    (compute_features_group, ComputeFeaturesGroup),
+    (identity_group, IdentityGroup),
+    (identity_feature_group, IdentityFeatureGroup),
+    (image_group, ImageGroup),
+    (image_feature_group, ImageFeaturesGroup),
+    (network_group, NetworkGroup),
+    (network_feature_group, NetworkFeaturesGroup),
+    (messaging_group, MessagingGroup),
+    (volume_group, VolumeGroup),
+    (volume_feature_group, VolumeFeaturesGroup),
+    (object_storage_group, ObjectStoreGroup),
+    (object_storage_feature_group, ObjectStoreFeaturesGroup),
+    (database_group, DatabaseGroup),
+    (orchestration_group, OrchestrationGroup),
+    (telemetry_group, TelemetryGroup),
+    (dashboard_group, DashboardGroup),
+    (data_processing_group, DataProcessingGroup),
+    (boto_group, BotoGroup),
+    (compute_admin_group, ComputeAdminGroup),
+    (stress_group, StressGroup),
+    (scenario_group, ScenarioGroup),
+    (service_available_group, ServiceAvailableGroup),
+    (debug_group, DebugGroup),
+    (baremetal_group, BaremetalGroup),
+    (input_scenario_group, InputScenarioGroup),
+    (cli_group, CLIGroup),
+    (negative_group, NegativeGroup)
+]
+
 
 def register_opts():
-    register_opt_group(cfg.CONF, auth_group, AuthGroup)
-    register_opt_group(cfg.CONF, compute_group, ComputeGroup)
-    register_opt_group(cfg.CONF, compute_features_group,
-                       ComputeFeaturesGroup)
-    register_opt_group(cfg.CONF, identity_group, IdentityGroup)
-    register_opt_group(cfg.CONF, identity_feature_group,
-                       IdentityFeatureGroup)
-    register_opt_group(cfg.CONF, image_group, ImageGroup)
-    register_opt_group(cfg.CONF, image_feature_group, ImageFeaturesGroup)
-    register_opt_group(cfg.CONF, network_group, NetworkGroup)
-    register_opt_group(cfg.CONF, network_feature_group,
-                       NetworkFeaturesGroup)
-    register_opt_group(cfg.CONF, messaging_group, MessagingGroup)
-    register_opt_group(cfg.CONF, volume_group, VolumeGroup)
-    register_opt_group(cfg.CONF, volume_feature_group,
-                       VolumeFeaturesGroup)
-    register_opt_group(cfg.CONF, object_storage_group, ObjectStoreGroup)
-    register_opt_group(cfg.CONF, object_storage_feature_group,
-                       ObjectStoreFeaturesGroup)
-    register_opt_group(cfg.CONF, database_group, DatabaseGroup)
-    register_opt_group(cfg.CONF, orchestration_group, OrchestrationGroup)
-    register_opt_group(cfg.CONF, telemetry_group, TelemetryGroup)
-    register_opt_group(cfg.CONF, dashboard_group, DashboardGroup)
-    register_opt_group(cfg.CONF, data_processing_group,
-                       DataProcessingGroup)
-    register_opt_group(cfg.CONF, boto_group, BotoGroup)
-    register_opt_group(cfg.CONF, compute_admin_group, ComputeAdminGroup)
-    register_opt_group(cfg.CONF, stress_group, StressGroup)
-    register_opt_group(cfg.CONF, scenario_group, ScenarioGroup)
-    register_opt_group(cfg.CONF, service_available_group,
-                       ServiceAvailableGroup)
-    register_opt_group(cfg.CONF, debug_group, DebugGroup)
-    register_opt_group(cfg.CONF, baremetal_group, BaremetalGroup)
-    register_opt_group(cfg.CONF, input_scenario_group, InputScenarioGroup)
-    register_opt_group(cfg.CONF, cli_group, CLIGroup)
-    register_opt_group(cfg.CONF, negative_group, NegativeGroup)
+    for g, o in _opts:
+        register_opt_group(cfg.CONF, g, o)
+
+
+def list_opts():
+    """Return a list of oslo.config options available.
+
+    The purpose of this is to allow tools like the Oslo sample config file
+    generator to discover the options exposed to users.
+    """
+    optlist = [(g.name, o) for g, o in _opts]
+
+    # NOTE(jgrimm): Can be removed once oslo-incubator/oslo changes happen.
+    optlist.append((None, lockutils.util_opts))
+    optlist.append((None, logging.common_cli_opts))
+    optlist.append((None, logging.logging_cli_opts))
+    optlist.append((None, logging.generic_log_opts))
+    optlist.append((None, logging.log_opts))
+
+    return optlist
 
 
 # this should never be called outside of this class
diff --git a/tempest/openstack/common/config/generator.py b/tempest/openstack/common/config/generator.py
deleted file mode 100644
index 664200e..0000000
--- a/tempest/openstack/common/config/generator.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# Copyright 2012 SINA Corporation
-# Copyright 2014 Cisco Systems, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-"""Extracts OpenStack config option info from module(s)."""
-
-from __future__ import print_function
-
-import argparse
-import imp
-import os
-import re
-import socket
-import sys
-import textwrap
-
-from oslo.config import cfg
-import six
-import stevedore.named
-
-from tempest.openstack.common import gettextutils
-from tempest.openstack.common import importutils
-
-gettextutils.install('tempest')
-
-STROPT = "StrOpt"
-BOOLOPT = "BoolOpt"
-INTOPT = "IntOpt"
-FLOATOPT = "FloatOpt"
-LISTOPT = "ListOpt"
-DICTOPT = "DictOpt"
-MULTISTROPT = "MultiStrOpt"
-
-OPT_TYPES = {
-    STROPT: 'string value',
-    BOOLOPT: 'boolean value',
-    INTOPT: 'integer value',
-    FLOATOPT: 'floating point value',
-    LISTOPT: 'list value',
-    DICTOPT: 'dict value',
-    MULTISTROPT: 'multi valued',
-}
-
-OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
-                                              FLOATOPT, LISTOPT, DICTOPT,
-                                              MULTISTROPT]))
-
-PY_EXT = ".py"
-BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                                       "../../../../"))
-WORDWRAP_WIDTH = 60
-
-
-def raise_extension_exception(extmanager, ep, err):
-    raise
-
-
-def generate(argv):
-    parser = argparse.ArgumentParser(
-        description='generate sample configuration file',
-    )
-    parser.add_argument('-m', dest='modules', action='append')
-    parser.add_argument('-l', dest='libraries', action='append')
-    parser.add_argument('srcfiles', nargs='*')
-    parsed_args = parser.parse_args(argv)
-
-    mods_by_pkg = dict()
-    for filepath in parsed_args.srcfiles:
-        pkg_name = filepath.split(os.sep)[1]
-        mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
-                            os.path.basename(filepath).split('.')[0]])
-        mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
-    # NOTE(lzyeval): place top level modules before packages
-    pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
-    ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
-    pkg_names.extend(ext_names)
-
-    # opts_by_group is a mapping of group name to an options list
-    # The options list is a list of (module, options) tuples
-    opts_by_group = {'DEFAULT': []}
-
-    if parsed_args.modules:
-        for module_name in parsed_args.modules:
-            module = _import_module(module_name)
-            if module:
-                for group, opts in _list_opts(module):
-                    opts_by_group.setdefault(group, []).append((module_name,
-                                                                opts))
-
-    # Look for entry points defined in libraries (or applications) for
-    # option discovery, and include their return values in the output.
-    #
-    # Each entry point should be a function returning an iterable
-    # of pairs with the group name (or None for the default group)
-    # and the list of Opt instances for that group.
-    if parsed_args.libraries:
-        loader = stevedore.named.NamedExtensionManager(
-            'oslo.config.opts',
-            names=list(set(parsed_args.libraries)),
-            invoke_on_load=False,
-            on_load_failure_callback=raise_extension_exception
-        )
-        for ext in loader:
-            for group, opts in ext.plugin():
-                opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
-                opt_list.append((ext.name, opts))
-
-    for pkg_name in pkg_names:
-        mods = mods_by_pkg.get(pkg_name)
-        mods.sort()
-        for mod_str in mods:
-            if mod_str.endswith('.__init__'):
-                mod_str = mod_str[:mod_str.rfind(".")]
-
-            mod_obj = _import_module(mod_str)
-            if not mod_obj:
-                raise RuntimeError("Unable to import module %s" % mod_str)
-
-            for group, opts in _list_opts(mod_obj):
-                opts_by_group.setdefault(group, []).append((mod_str, opts))
-
-    print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
-    for group in sorted(opts_by_group.keys()):
-        print_group_opts(group, opts_by_group[group])
-
-
-def _import_module(mod_str):
-    try:
-        if mod_str.startswith('bin.'):
-            imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
-            return sys.modules[mod_str[4:]]
-        else:
-            return importutils.import_module(mod_str)
-    except Exception as e:
-        sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
-        return None
-
-
-def _is_in_group(opt, group):
-    """Check if opt is in group."""
-    for value in group._opts.values():
-        # NOTE(llu): Temporary workaround for bug #1262148, wait until
-        # newly released oslo.config support '==' operator.
-        if not(value['opt'] != opt):
-            return True
-    return False
-
-
-def _guess_groups(opt):
-    # is it in the DEFAULT group?
-    if _is_in_group(opt, cfg.CONF):
-        return 'DEFAULT'
-
-    # what other groups is it in?
-    for value in cfg.CONF.values():
-        if isinstance(value, cfg.CONF.GroupAttr):
-            if _is_in_group(opt, value._group):
-                return value._group.name
-
-    raise RuntimeError(
-        "Unable to find group for option %s, "
-        "maybe it's defined twice in the same group?"
-        % opt.name
-    )
-
-
-def _list_opts(obj):
-    def is_opt(o):
-        return (isinstance(o, cfg.Opt) and
-                not isinstance(o, cfg.SubCommandOpt))
-
-    opts = list()
-    for attr_str in dir(obj):
-        attr_obj = getattr(obj, attr_str)
-        if is_opt(attr_obj):
-            opts.append(attr_obj)
-        elif (isinstance(attr_obj, list) and
-              all(map(lambda x: is_opt(x), attr_obj))):
-            opts.extend(attr_obj)
-
-    ret = {}
-    for opt in opts:
-        ret.setdefault(_guess_groups(opt), []).append(opt)
-    return ret.items()
-
-
-def print_group_opts(group, opts_by_module):
-    print("[%s]" % group)
-    print('')
-    for mod, opts in opts_by_module:
-        print('#')
-        print('# Options defined in %s' % mod)
-        print('#')
-        print('')
-        for opt in opts:
-            _print_opt(opt)
-        print('')
-
-
-def _get_my_ip():
-    try:
-        csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-        csock.connect(('8.8.8.8', 80))
-        (addr, port) = csock.getsockname()
-        csock.close()
-        return addr
-    except socket.error:
-        return None
-
-
-def _sanitize_default(name, value):
-    """Set up a reasonably sensible default for pybasedir, my_ip and host."""
-    hostname = socket.gethostname()
-    fqdn = socket.getfqdn()
-    if value.startswith(sys.prefix):
-        # NOTE(jd) Don't use os.path.join, because it is likely to think the
-        # second part is an absolute pathname and therefore drop the first
-        # part.
-        value = os.path.normpath("/usr/" + value[len(sys.prefix):])
-    elif value.startswith(BASEDIR):
-        return value.replace(BASEDIR, '/usr/lib/python/site-packages')
-    elif BASEDIR in value:
-        return value.replace(BASEDIR, '')
-    elif value == _get_my_ip():
-        return '10.0.0.1'
-    elif value in (hostname, fqdn):
-        if 'host' in name:
-            return 'tempest'
-    elif value.endswith(hostname):
-        return value.replace(hostname, 'tempest')
-    elif value.endswith(fqdn):
-        return value.replace(fqdn, 'tempest')
-    elif value.strip() != value:
-        return '"%s"' % value
-    return value
-
-
-def _print_opt(opt):
-    opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
-    if not opt_help:
-        sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
-        opt_help = ""
-    try:
-        opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
-    except (ValueError, AttributeError) as err:
-        sys.stderr.write("%s\n" % str(err))
-        sys.exit(1)
-    opt_help = u'%s (%s)' % (opt_help,
-                             OPT_TYPES[opt_type])
-    print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
-    if opt.deprecated_opts:
-        for deprecated_opt in opt.deprecated_opts:
-            if deprecated_opt.name:
-                deprecated_group = (deprecated_opt.group if
-                                    deprecated_opt.group else "DEFAULT")
-                print('# Deprecated group/name - [%s]/%s' %
-                      (deprecated_group,
-                       deprecated_opt.name))
-    try:
-        if opt_default is None:
-            print('#%s=<None>' % opt_name)
-        elif opt_type == STROPT:
-            assert(isinstance(opt_default, six.string_types))
-            print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
-                                                          opt_default)))
-        elif opt_type == BOOLOPT:
-            assert(isinstance(opt_default, bool))
-            print('#%s=%s' % (opt_name, str(opt_default).lower()))
-        elif opt_type == INTOPT:
-            assert(isinstance(opt_default, int) and
-                   not isinstance(opt_default, bool))
-            print('#%s=%s' % (opt_name, opt_default))
-        elif opt_type == FLOATOPT:
-            assert(isinstance(opt_default, float))
-            print('#%s=%s' % (opt_name, opt_default))
-        elif opt_type == LISTOPT:
-            assert(isinstance(opt_default, list))
-            print('#%s=%s' % (opt_name, ','.join(opt_default)))
-        elif opt_type == DICTOPT:
-            assert(isinstance(opt_default, dict))
-            opt_default_strlist = [str(key) + ':' + str(value)
-                                   for (key, value) in opt_default.items()]
-            print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
-        elif opt_type == MULTISTROPT:
-            assert(isinstance(opt_default, list))
-            if not opt_default:
-                opt_default = ['']
-            for default in opt_default:
-                print('#%s=%s' % (opt_name, default))
-        print('')
-    except Exception:
-        sys.stderr.write('Error in option "%s"\n' % opt_name)
-        sys.exit(1)
-
-
-def main():
-    generate(sys.argv[1:])
-
-if __name__ == '__main__':
-    main()
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 79207cd..990a392 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -23,8 +23,8 @@
 
 from tempest import auth
 from tempest import clients
+from tempest.common import credentials
 from tempest.common import debug
-from tempest.common import isolated_creds
 from tempest.common.utils import data_utils
 from tempest.common.utils.linux import remote_client
 from tempest import config
@@ -51,8 +51,9 @@
     @classmethod
     def resource_setup(cls):
         super(ScenarioTest, cls).resource_setup()
-        # Using tempest client for isolated credentials as well
-        cls.isolated_creds = isolated_creds.IsolatedCreds(
+        # TODO(andreaf) Some of the code from this resource_setup could be
+        # moved into `BaseTestCase`
+        cls.isolated_creds = credentials.get_isolated_credentials(
             cls.__name__, network_resources=cls.network_resources)
         cls.manager = clients.Manager(
             credentials=cls.credentials()
@@ -79,27 +80,19 @@
         cls.orchestration_client = cls.manager.orchestration_client
 
     @classmethod
-    def _get_credentials(cls, get_creds, ctype):
-        if CONF.compute.allow_tenant_isolation:
-            creds = get_creds()
-        else:
-            creds = auth.get_default_credentials(ctype)
-        return creds
-
-    @classmethod
     def credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_primary_creds,
-                                    'user')
+        return cls.isolated_creds.get_primary_creds()
 
     @classmethod
     def alt_credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_alt_creds,
-                                    'alt_user')
+        return cls.isolated_creds.get_alt_creds()
 
     @classmethod
     def admin_credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_admin_creds,
-                                    'identity_admin')
+        try:
+            return cls.isolated_creds.get_admin_creds()
+        except NotImplementedError:
+            raise cls.skipException('Admin Credentials are not available')
 
     # ## Methods to handle sync and async deletes
 
@@ -382,9 +375,16 @@
             _, servers = self.servers_client.list_servers()
             servers = servers['servers']
         for server in servers:
-            LOG.debug('Console output for %s', server['id'])
-            LOG.debug(self.servers_client.get_console_output(server['id'],
-                                                             length=None))
+            console_output = self.servers_client.get_console_output(
+                server['id'], length=None)
+            LOG.debug('Console output for %s\nhead=%s\nbody=\n%s',
+                      server['id'], console_output[0], console_output[1])
+
+    def _log_net_info(self, exc):
+        # network debug is called as part of ssh init
+        if not isinstance(exc, exceptions.SSHTimeout):
+            LOG.debug('Network information on a devstack host')
+            debug.log_net_debug()
 
     def create_server_snapshot(self, server, name=None):
         # Glance client
@@ -443,7 +443,9 @@
         if wait:
             self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
-    def ping_ip_address(self, ip_address, should_succeed=True):
+    def ping_ip_address(self, ip_address, should_succeed=True,
+                        ping_timeout=None):
+        timeout = ping_timeout or CONF.compute.ping_timeout
         cmd = ['ping', '-c1', '-w1', ip_address]
 
         def ping():
@@ -453,8 +455,7 @@
             proc.communicate()
             return (proc.returncode == 0) == should_succeed
 
-        return tempest.test.call_until_true(
-            ping, CONF.compute.ping_timeout, 1)
+        return tempest.test.call_until_true(ping, timeout, 1)
 
 
 class NetworkScenarioTest(ScenarioTest):
@@ -622,6 +623,29 @@
         self.assertIsNone(floating_ip.port_id)
         return floating_ip
 
+    def check_floating_ip_status(self, floating_ip, status):
+        """Verifies floatingip reaches the given status
+
+        :param floating_ip: net_resources.DeletableFloatingIp floating IP to
+        to check status
+        :param status: target status
+        :raises: AssertionError if status doesn't match
+        """
+        def refresh():
+            floating_ip.refresh()
+            return status == floating_ip.status
+
+        tempest.test.call_until_true(refresh,
+                                     CONF.network.build_timeout,
+                                     CONF.network.build_interval)
+        self.assertEqual(status, floating_ip.status,
+                         message="FloatingIP: {fp} is at status: {cst}. "
+                                 "failed  to reach status: {st}"
+                         .format(fp=floating_ip, cst=floating_ip.status,
+                                 st=status))
+        LOG.info("FloatingIP: {fp} is at status: {st}"
+                 .format(fp=floating_ip, st=status))
+
     def _check_vm_connectivity(self, ip_address,
                                username=None,
                                private_key=None,
@@ -666,9 +690,7 @@
                 ex_msg += ": " + msg
             LOG.exception(ex_msg)
             self._log_console_output(servers)
-            # network debug is called as part of ssh init
-            if not isinstance(e, exceptions.SSHTimeout):
-                debug.log_net_debug()
+            self._log_net_info(e)
             raise
 
     def _check_tenant_network_connectivity(self, server,
@@ -692,9 +714,7 @@
         except Exception as e:
             LOG.exception('Tenant network connectivity check failed')
             self._log_console_output(servers_for_debug)
-            # network debug is called as part of ssh init
-            if not isinstance(e, exceptions.SSHTimeout):
-                debug.log_net_debug()
+            self._log_net_info(e)
             raise
 
     def _check_remote_connectivity(self, source, dest, should_succeed=True):
@@ -924,8 +944,8 @@
         router_id = CONF.network.public_router_id
         network_id = CONF.network.public_network_id
         if router_id:
-            result = client.show_router(router_id)
-            return net_resources.AttributeDict(**result['router'])
+            resp, body = client.show_router(router_id)
+            return net_resources.AttributeDict(**body['router'])
         elif network_id:
             router = self._create_router(client, tenant_id)
             router.set_gateway(network_id)
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index abda1f8..791c564 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -24,7 +24,6 @@
 
 class CfnInitScenarioTest(manager.OrchestrationScenarioTest):
 
-    @test.skip_because(bug="1374175")
     def setUp(self):
         super(CfnInitScenarioTest, self).setUp()
         if not CONF.orchestration.image_ref:
@@ -84,7 +83,8 @@
         server_ip =\
             server['addresses'][CONF.compute.network_for_ssh][0]['addr']
 
-        if not self.ping_ip_address(server_ip):
+        if not self.ping_ip_address(
+            server_ip, ping_timeout=CONF.orchestration.build_timeout):
             self._log_console_output(servers=[server])
             self.fail(
                 "(CfnInitScenarioTest:test_server_cfn_init) Timed out waiting "
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index 35571c6..ea10140 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -132,7 +132,6 @@
         # We expect the ephemeral partition to be mounted on /mnt and to have
         # the same size as our flavor definition.
         eph_size = self.get_flavor_ephemeral_size()
-        self.assertIsNotNone(eph_size)
         if eph_size > 0:
             preserve_ephemeral = True
 
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index b111939..91b95a8 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -47,6 +47,8 @@
 
     def _wait_for_server_status(self, status):
         for server in self.servers:
+            # Make sure nova list keeps working throughout the build process
+            self.servers_client.list_servers()
             self.servers_client.wait_for_server_status(server['id'], status)
 
     def nova_boot(self):
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 8a8e387..3725477 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -16,6 +16,7 @@
 from tempest.common import custom_matchers
 from tempest.common import debug
 from tempest import config
+from tempest import exceptions
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
 from tempest import test
@@ -130,6 +131,17 @@
         self.addCleanup(self.servers_client.remove_security_group,
                         self.server['id'], secgroup['name'])
 
+        def wait_for_secgroup_add():
+            _, body = self.servers_client.get_server(self.server['id'])
+            return {'name': secgroup['name']} in body['security_groups']
+
+        if not test.call_until_true(wait_for_secgroup_add,
+                                    CONF.compute.build_timeout,
+                                    CONF.compute.build_interval):
+            msg = ('Timed out waiting for adding security group %s to server '
+                   '%s' % (secgroup['id'], self.server['id']))
+            raise exceptions.TimeoutException(msg)
+
     @test.services('compute', 'volume', 'image', 'network')
     def test_minimum_basic_scenario(self):
         self.glance_image_create()
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 58a028f..0c48334 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -87,6 +87,7 @@
         self._check_public_network_connectivity(floating_ip, username,
                                                 private_key, should_connect,
                                                 servers=[self.server])
+        self.check_floating_ip_status(self.floating_ip, 'ACTIVE')
 
     def _wait_server_status_and_check_network_connectivity(self):
         self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index de60745..ac4f004 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -176,16 +176,28 @@
 
     def _check_public_network_connectivity(self, should_connect=True,
                                            msg=None):
+        """Verifies connectivty to a VM via public network and floating IP,
+        and verifies floating IP has resource status is correct.
+
+        :param should_connect: bool. determines if connectivity check is
+        negative or positive.
+        :param msg: Failure message to add to Error message. Should describe
+        the place in the test scenario where the method was called,
+        to indicate the context of the failure
+        """
         ssh_login = CONF.compute.image_ssh_user
         floating_ip, server = self.floating_ip_tuple
         ip_address = floating_ip.floating_ip_address
         private_key = None
+        floatingip_status = 'DOWN'
         if should_connect:
             private_key = self._get_server_key(server)
+            floatingip_status = 'ACTIVE'
         # call the common method in the parent class
         super(TestNetworkBasicOps, self)._check_public_network_connectivity(
             ip_address, ssh_login, private_key, should_connect, msg,
             self.servers)
+        self.check_floating_ip_status(floating_ip, floatingip_status)
 
     def _disassociate_floating_ips(self):
         floating_ip, server = self.floating_ip_tuple
@@ -350,6 +362,8 @@
         VMs are created with unique keypair so connectivity also asserts that
         floating IP is associated with the new VM instead of the old one
 
+        Verifies that floating IP status is updated correctly after each change
+
 
         """
         self._setup_network_and_servers()
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 188dea8..6c36034 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -241,7 +241,11 @@
             'security_groups': security_groups,
             'tenant_id': tenant.creds.tenant_id
         }
-        return self.create_server(name=name, create_kwargs=create_kwargs)
+        server = self.create_server(name=name, create_kwargs=create_kwargs)
+        self.assertEqual(
+            sorted([s['name'] for s in security_groups]),
+            sorted([s['name'] for s in server['security_groups']]))
+        return server
 
     def _create_tenant_servers(self, tenant, num=1):
         for i in range(num):
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 1cb010d..09927d3 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -79,6 +79,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'aggregate'
+
     def add_host(self, aggregate_id, host):
         """Adds a host to the given aggregate."""
         post_body = {
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index 5452f3a..8faf8a7 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -99,6 +99,11 @@
                 return False
         return True
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'flavor'
+
     def set_flavor_extra_spec(self, flavor_id, specs):
         """Sets extra Specs to the mentioned flavor."""
         post_body = json.dumps({'extra_specs': specs})
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
index 8b020d0..0ed1720 100644
--- a/tempest/services/compute/json/floating_ips_client.py
+++ b/tempest/services/compute/json/floating_ips_client.py
@@ -102,6 +102,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'floating_ip'
+
     def list_floating_ip_pools(self, params=None):
         """Returns a list of all floating IP Pools."""
         url = 'os-floating-ip-pools'
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 4af8331..079a91e 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -143,3 +143,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'image'
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 29859a9..733a50b 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -143,3 +143,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'security_group'
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 673e365..309dc5b 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -116,3 +116,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume'
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 960fe05..e11ed45 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -79,6 +79,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'aggregate'
+
     def add_host(self, aggregate_id, host):
         """Adds a host to the given aggregate."""
         post_body = {
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index d1eee5b..fdca6b3 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -99,6 +99,11 @@
                 return False
         return True
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'flavor'
+
     def set_flavor_extra_spec(self, flavor_id, specs):
         """Sets extra Specs to the mentioned flavor."""
         post_body = json.dumps({'extra_specs': specs})
diff --git a/tempest/services/compute/xml/aggregates_client.py b/tempest/services/compute/xml/aggregates_client.py
index 9c2d4aa..47cde65 100644
--- a/tempest/services/compute/xml/aggregates_client.py
+++ b/tempest/services/compute/xml/aggregates_client.py
@@ -94,6 +94,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'aggregate'
+
     def add_host(self, aggregate_id, host):
         """Adds a host to the given aggregate."""
         post_body = xml_utils.Element("add_host", host=host)
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 68ef323..63d1a4d 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -136,6 +136,11 @@
                 return False
         return True
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'flavor'
+
     def set_flavor_extra_spec(self, flavor_id, specs):
         """Sets extra Specs to the mentioned flavor."""
         extra_specs = xml_utils.Element("extra_specs")
diff --git a/tempest/services/compute/xml/floating_ips_client.py b/tempest/services/compute/xml/floating_ips_client.py
index 730e870..84f06ab 100644
--- a/tempest/services/compute/xml/floating_ips_client.py
+++ b/tempest/services/compute/xml/floating_ips_client.py
@@ -108,6 +108,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'floating_ip'
+
     def list_floating_ip_pools(self, params=None):
         """Returns a list of all floating IP Pools."""
         url = 'os-floating-ip-pools'
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index 94acf36..ce37b07 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -204,3 +204,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'image'
diff --git a/tempest/services/compute/xml/security_groups_client.py b/tempest/services/compute/xml/security_groups_client.py
index 56ac7ba..e529623 100644
--- a/tempest/services/compute/xml/security_groups_client.py
+++ b/tempest/services/compute/xml/security_groups_client.py
@@ -159,3 +159,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'security_group'
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 156d889..06f1b83 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -349,8 +349,11 @@
             networks = xml_utils.Element("networks")
             server.append(networks)
             for network in kwargs['networks']:
-                s = xml_utils.Element("network", uuid=network['uuid'],
-                                      fixed_ip=network['fixed_ip'])
+                if 'fixed_ip' in network:
+                    s = xml_utils.Element("network", uuid=network['uuid'],
+                                          fixed_ip=network['fixed_ip'])
+                else:
+                    s = xml_utils.Element("network", uuid=network['uuid'])
                 networks.append(s)
 
         if 'meta' in kwargs:
diff --git a/tempest/services/compute/xml/volumes_extensions_client.py b/tempest/services/compute/xml/volumes_extensions_client.py
index e9c5035..da1764a 100644
--- a/tempest/services/compute/xml/volumes_extensions_client.py
+++ b/tempest/services/compute/xml/volumes_extensions_client.py
@@ -141,3 +141,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume'
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index df424ca..5ad416c 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -31,14 +31,11 @@
         self.endpoint_url = 'adminURL'
         self.api_version = "v3"
 
-    def create_user(self, user_name, **kwargs):
+    def create_user(self, user_name, password=None, project_id=None,
+                    email=None, domain_id='default', **kwargs):
         """Creates a user."""
-        password = kwargs.get('password', None)
-        email = kwargs.get('email', None)
         en = kwargs.get('enabled', True)
-        project_id = kwargs.get('project_id', None)
         description = kwargs.get('description', None)
-        domain_id = kwargs.get('domain_id', 'default')
         post_body = {
             'project_id': project_id,
             'description': description,
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 5c43692..fdc0a0a 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -95,14 +95,11 @@
         _json = common.xml_to_json(body)
         return _json
 
-    def create_user(self, user_name, **kwargs):
+    def create_user(self, user_name, password=None, project_id=None,
+                    email=None, domain_id='default', **kwargs):
         """Creates a user."""
-        password = kwargs.get('password', None)
-        email = kwargs.get('email', None)
         en = kwargs.get('enabled', 'true')
-        project_id = kwargs.get('project_id', None)
         description = kwargs.get('description', None)
-        domain_id = kwargs.get('domain_id', 'default')
         post_body = common.Element("user",
                                    xmlns=XMLNS,
                                    name=user_name,
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index bc5e04a..d0d32e5 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -240,6 +240,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'image_meta'
+
     def get_image_membership(self, image_id):
         url = 'v1/images/%s/members' % image_id
         resp, body = self.get(url)
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index c420df9..4865073 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -117,6 +117,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'image'
+
     def store_image(self, image_id, data):
         url = 'v2/images/%s/file' % image_id
         headers = {'Content-Type': 'application/octet-stream'}
diff --git a/tempest/services/messaging/json/messaging_client.py b/tempest/services/messaging/json/messaging_client.py
index 3e82399..2794ea9 100644
--- a/tempest/services/messaging/json/messaging_client.py
+++ b/tempest/services/messaging/json/messaging_client.py
@@ -48,22 +48,26 @@
     def create_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.put(uri, body=None)
+        self.expected_success(201, resp.status)
         return resp, body
 
     def get_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.get(uri)
+        self.expected_success(204, resp.status)
         return resp, body
 
     def head_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.head(uri)
+        self.expected_success(204, resp.status)
         return resp, body
 
     def delete_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
-        resp = self.delete(uri)
-        return resp
+        resp, body = self.delete(uri)
+        self.expected_success(204, resp.status)
+        return resp, body
 
     def get_queue_stats(self, queue_name):
         uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
@@ -75,12 +79,14 @@
     def get_queue_metadata(self, queue_name):
         uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
         resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body
 
     def set_queue_metadata(self, queue_name, rbody):
         uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
         resp, body = self.put(uri, body=json.dumps(rbody))
+        self.expected_success(204, resp.status)
         return resp, body
 
     def post_messages(self, queue_name, rbody):
@@ -90,6 +96,7 @@
                                headers=self.headers)
 
         body = json.loads(body)
+        self.validate_response(queues_schema.post_messages, resp, body)
         return resp, body
 
     def list_messages(self, queue_name):
@@ -126,7 +133,7 @@
 
     def delete_messages(self, message_uri):
         resp, body = self.delete(message_uri)
-        assert(resp['status'] == '204')
+        self.expected_success(204, resp.status)
         return resp, body
 
     def post_claims(self, queue_name, rbody, url_params=False):
@@ -152,10 +159,10 @@
 
     def update_claim(self, claim_uri, rbody):
         resp, body = self.patch(claim_uri, body=json.dumps(rbody))
-        assert(resp['status'] == '204')
+        self.expected_success(204, resp.status)
         return resp, body
 
     def release_claim(self, claim_uri):
         resp, body = self.delete(claim_uri)
-        assert(resp['status'] == '204')
+        self.expected_success(204, resp.status)
         return resp, body
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 16a4f5c..78ed56f 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -320,3 +320,30 @@
         self.rest_client.expected_success(201, resp.status)
         body = json.loads(body)
         return resp, body
+
+    def insert_firewall_rule_in_policy(self, firewall_policy_id,
+                                       firewall_rule_id, insert_after="",
+                                       insert_before=""):
+        uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
+                                                          firewall_policy_id)
+        body = {
+            "firewall_rule_id": firewall_rule_id,
+            "insert_after": insert_after,
+            "insert_before": insert_before
+        }
+        body = json.dumps(body)
+        resp, body = self.put(uri, body)
+        self.rest_client.expected_success(200, resp.status)
+        body = json.loads(body)
+        return resp, body
+
+    def remove_firewall_rule_from_policy(self, firewall_policy_id,
+                                         firewall_rule_id):
+        uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
+                                                          firewall_policy_id)
+        update_body = {"firewall_rule_id": firewall_rule_id}
+        update_body = json.dumps(update_body)
+        resp, body = self.put(uri, update_body)
+        self.rest_client.expected_success(200, resp.status)
+        body = json.loads(body)
+        return resp, body
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
index 2b182d0..a84b4d5 100644
--- a/tempest/services/network/resources.py
+++ b/tempest/services/network/resources.py
@@ -52,7 +52,7 @@
         return
 
     @abc.abstractmethod
-    def show(self):
+    def refresh(self):
         return
 
     def __hash__(self):
@@ -62,7 +62,11 @@
         if not hasattr(self, 'status'):
             return
 
-        return self.client.wait_for_resource_status(self.show, status)
+        def helper_get():
+            self.refresh()
+            return self
+
+        return self.client.wait_for_resource_status(helper_get, status)
 
 
 class DeletableNetwork(DeletableResource):
@@ -116,6 +120,12 @@
 
 class DeletableFloatingIp(DeletableResource):
 
+    def refresh(self, *args, **kwargs):
+        _, result = self.client.show_floatingip(self.id,
+                                                *args,
+                                                **kwargs)
+        super(DeletableFloatingIp, self).update(**result['floatingip'])
+
     def update(self, *args, **kwargs):
         _, result = self.client.update_floatingip(self.id,
                                                   *args,
@@ -172,7 +182,6 @@
     def delete(self):
         self.client.delete_vip(self.id)
 
-    def show(self):
+    def refresh(self):
         _, result = self.client.show_vip(self.id)
-        super(DeletableVip, self).update(**result['vip'])
-        return self
+        super(DeletableVip, self).update(**result['vip'])
\ No newline at end of file
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 17b1f8e..4a8dddc 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -25,7 +25,8 @@
     # list of plurals used for xml serialization
     PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
                'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
-               'health_monitors', 'vips', 'members', 'allowed_address_pairs']
+               'health_monitors', 'vips', 'members', 'allowed_address_pairs',
+               'firewall_rules', 'security_groups']
 
     def get_rest_client(self, auth_provider):
         rc = rest_client.RestClient(auth_provider)
@@ -281,6 +282,27 @@
         body = _root_tag_fetcher_and_xml_to_json_parse(body)
         return resp, body
 
+    def insert_firewall_rule_in_policy(self, firewall_policy_id,
+                                       firewall_rule_id, insert_after="",
+                                       insert_before=""):
+        uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
+                                                          firewall_policy_id)
+        rule = common.Element("firewall_rule_id", firewall_rule_id)
+        resp, body = self.put(uri, str(common.Document(rule)))
+        self.rest_client.expected_success(200, resp.status)
+        body = _root_tag_fetcher_and_xml_to_json_parse(body)
+        return resp, body
+
+    def remove_firewall_rule_from_policy(self, firewall_policy_id,
+                                         firewall_rule_id):
+        uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
+                                                          firewall_policy_id)
+        rule = common.Element("firewall_rule_id", firewall_rule_id)
+        resp, body = self.put(uri, str(common.Document(rule)))
+        self.rest_client.expected_success(200, resp.status)
+        body = _root_tag_fetcher_and_xml_to_json_parse(body)
+        return resp, body
+
 
 def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
     body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/json/admin/volume_types_client.py
index 44ef9fe..eedf880 100644
--- a/tempest/services/volume/json/admin/volume_types_client.py
+++ b/tempest/services/volume/json/admin/volume_types_client.py
@@ -23,13 +23,13 @@
 CONF = config.CONF
 
 
-class VolumeTypesClientJSON(rest_client.RestClient):
+class BaseVolumeTypesClientJSON(rest_client.RestClient):
     """
     Client class to send CRUD Volume Types API requests to a Cinder endpoint
     """
 
     def __init__(self, auth_provider):
-        super(VolumeTypesClientJSON, self).__init__(auth_provider)
+        super(BaseVolumeTypesClientJSON, self).__init__(auth_provider)
 
         self.service = CONF.volume.catalog_type
         self.build_interval = CONF.volume.build_interval
@@ -55,6 +55,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume-type/encryption-type'
+
     def list_volume_types(self, params=None):
         """List all the volume_types created."""
         url = 'types'
@@ -188,3 +193,7 @@
         resp, body = self.delete(
             "/types/%s/encryption/provider" % str(vol_type_id))
         self.expected_success(202, resp.status)
+
+
+class VolumeTypesClientJSON(BaseVolumeTypesClientJSON):
+    """Volume V1 Volume Types client"""
diff --git a/tempest/services/volume/json/qos_client.py b/tempest/services/volume/json/qos_client.py
index 6e0bee9..b647bc7 100644
--- a/tempest/services/volume/json/qos_client.py
+++ b/tempest/services/volume/json/qos_client.py
@@ -38,6 +38,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'qos'
+
     def wait_for_qos_operations(self, qos_id, operation, args=None):
         """Waits for a qos operations to be completed.
 
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index 1f8065b..e9d5b83 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -138,6 +138,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume-snapshot'
+
     def reset_snapshot_status(self, snapshot_id, status):
         """Reset the specified snapshot's status."""
         post_body = json.dumps({'os-reset_status': {"status": status}})
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index c3a9269..cf2837b 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -187,6 +187,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume'
+
     def extend_volume(self, volume_id, extend_size):
         """Extend a volume."""
         post_body = {
diff --git a/tempest/openstack/common/config/__init__.py b/tempest/services/volume/v2/json/admin/__init__.py
similarity index 100%
rename from tempest/openstack/common/config/__init__.py
rename to tempest/services/volume/v2/json/admin/__init__.py
diff --git a/tempest/services/volume/v2/json/admin/volume_types_client.py b/tempest/services/volume/v2/json/admin/volume_types_client.py
new file mode 100644
index 0000000..76fa45d
--- /dev/null
+++ b/tempest/services/volume/v2/json/admin/volume_types_client.py
@@ -0,0 +1,28 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from tempest.services.volume.json.admin import volume_types_client
+
+
+class VolumeTypesV2ClientJSON(volume_types_client.BaseVolumeTypesClientJSON):
+    """
+    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
+    """
+
+    def __init__(self, auth_provider):
+        super(VolumeTypesV2ClientJSON, self).__init__(auth_provider)
+
+        self.api_version = "v2"
diff --git a/tempest/services/volume/xml/admin/volume_types_client.py b/tempest/services/volume/xml/admin/volume_types_client.py
index 679d097..2464016 100644
--- a/tempest/services/volume/xml/admin/volume_types_client.py
+++ b/tempest/services/volume/xml/admin/volume_types_client.py
@@ -205,3 +205,8 @@
         except exceptions.NotFound:
             return True
         return False
+
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume-type'
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index ce98eea..fb591b1 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -153,6 +153,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume-snapshot'
+
     def reset_snapshot_status(self, snapshot_id, status):
         """Reset the specified snapshot's status."""
         post_body = common.Element("os-reset_status", status=status)
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index a8c1ae5..0fe7e0d 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -226,6 +226,11 @@
             return True
         return False
 
+    @property
+    def resource_type(self):
+        """Returns the primary type of resource this client works with."""
+        return 'volume'
+
     def attach_volume(self, volume_id, instance_uuid, mountpoint):
         """Attaches a volume to a given instance on a given mountpoint."""
         post_body = common.Element("os-attach",
diff --git a/tempest/test.py b/tempest/test.py
index 2ed6665..1c6265d 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -29,8 +29,8 @@
 import testtools
 
 from tempest import clients
+from tempest.common import credentials
 import tempest.common.generator.valid_generator as valid
-from tempest.common import isolated_creds
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import importutils
@@ -270,7 +270,14 @@
             try:
                 cls.tearDownClass()
             except Exception as te:
-                LOG.exception("tearDownClass failed: %s" % te)
+                tetype, _, _ = sys.exc_info()
+                # TODO(gmann): Till we split-up resource_setup &
+                # resource_cleanup in more structural way, log
+                # AttributeError as info instead of exception.
+                if tetype is AttributeError:
+                    LOG.info("tearDownClass failed: %s" % te)
+                else:
+                    LOG.exception("tearDownClass failed: %s" % te)
             try:
                 raise etype(value), None, trace
             finally:
@@ -333,31 +340,20 @@
         """
         Returns an OpenStack client manager
         """
-        cls.isolated_creds = isolated_creds.IsolatedCreds(
-            cls.__name__, network_resources=cls.network_resources)
-
         force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
-        if CONF.compute.allow_tenant_isolation or force_tenant_isolation:
-            creds = cls.isolated_creds.get_primary_creds()
-            if getattr(cls, '_interface', None):
-                os = clients.Manager(credentials=creds,
-                                     interface=cls._interface,
-                                     service=cls._service)
-            elif interface:
-                os = clients.Manager(credentials=creds,
-                                     interface=interface,
-                                     service=cls._service)
-            else:
-                os = clients.Manager(credentials=creds,
-                                     service=cls._service)
-        else:
-            if getattr(cls, '_interface', None):
-                os = clients.Manager(interface=cls._interface,
-                                     service=cls._service)
-            elif interface:
-                os = clients.Manager(interface=interface, service=cls._service)
-            else:
-                os = clients.Manager(service=cls._service)
+
+        cls.isolated_creds = credentials.get_isolated_credentials(
+            name=cls.__name__, network_resources=cls.network_resources,
+            force_tenant_isolation=force_tenant_isolation,
+        )
+
+        creds = cls.isolated_creds.get_primary_creds()
+        params = dict(credentials=creds, service=cls._service)
+        if getattr(cls, '_interface', None):
+            interface = cls._interface
+        if interface:
+            params['interface'] = interface
+        os = clients.Manager(**params)
         return os
 
     @classmethod
diff --git a/tempest/tests/cli/test_cli.py b/tempest/tests/cli/test_cli.py
index 1fd5ccb..8f18dfc 100644
--- a/tempest/tests/cli/test_cli.py
+++ b/tempest/tests/cli/test_cli.py
@@ -13,17 +13,25 @@
 #    under the License.
 
 import mock
+from tempest_lib.cli import base as cli_base
 import testtools
 
 from tempest import cli
+from tempest import config
 from tempest import exceptions
 from tempest.tests import base
+from tempest.tests import fake_config
 
 
 class TestMinClientVersion(base.TestCase):
     """Tests for the min_client_version decorator.
     """
 
+    def setUp(self):
+        super(TestMinClientVersion, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
     def _test_min_version(self, required, installed, expect_skip):
 
         @cli.min_client_version(client='nova', version=required)
@@ -33,7 +41,7 @@
                 # expected so we need to fail.
                 self.fail('Should not have gotten past the decorator.')
 
-        with mock.patch.object(cli, 'execute',
+        with mock.patch.object(cli_base, 'execute',
                                return_value=installed) as mock_cmd:
             if expect_skip:
                 self.assertRaises(testtools.TestCase.skipException, fake,
@@ -41,6 +49,7 @@
             else:
                 fake(self, expect_skip)
             mock_cmd.assert_called_once_with('nova', '', params='--version',
+                                             cli_dir='/usr/local/bin',
                                              merge_stderr=True)
 
     def test_min_client_version(self):
@@ -52,7 +61,7 @@
         for case in cases:
             self._test_min_version(*case)
 
-    @mock.patch.object(cli, 'execute', return_value=' ')
+    @mock.patch.object(cli_base, 'execute', return_value=' ')
     def test_check_client_version_empty_output(self, mock_execute):
         # Tests that an exception is raised if the command output is empty.
         self.assertRaises(exceptions.TempestException,
diff --git a/tempest/tests/cli/test_command_failed.py b/tempest/tests/cli/test_command_failed.py
deleted file mode 100644
index 36a4fc8..0000000
--- a/tempest/tests/cli/test_command_failed.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest import exceptions
-from tempest.tests import base
-
-
-class TestOutputParser(base.TestCase):
-
-    def test_command_failed_exception(self):
-        returncode = 1
-        cmd = "foo"
-        stdout = "output"
-        stderr = "error"
-        try:
-            raise exceptions.CommandFailed(returncode, cmd, stdout, stderr)
-        except exceptions.CommandFailed as e:
-            self.assertIn(str(returncode), str(e))
-            self.assertIn(cmd, str(e))
-            self.assertIn(stdout, str(e))
-            self.assertIn(stderr, str(e))
diff --git a/tempest/tests/cli/test_output_parser.py b/tempest/tests/cli/test_output_parser.py
deleted file mode 100644
index 7ad270c..0000000
--- a/tempest/tests/cli/test_output_parser.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# Copyright 2014 NEC Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-from tempest.cli import output_parser
-from tempest import exceptions
-from tempest.tests import base
-
-
-class TestOutputParser(base.TestCase):
-    OUTPUT_LINES = """
-+----+------+---------+
-| ID | Name | Status  |
-+----+------+---------+
-| 11 | foo  | BUILD   |
-| 21 | bar  | ERROR   |
-| 31 | bee  | None    |
-+----+------+---------+
-"""
-    OUTPUT_LINES2 = """
-+----+-------+---------+
-| ID | Name2 | Status2 |
-+----+-------+---------+
-| 41 | aaa   | SSSSS   |
-| 51 | bbb   | TTTTT   |
-| 61 | ccc   | AAAAA   |
-+----+-------+---------+
-"""
-
-    EXPECTED_TABLE = {'headers': ['ID', 'Name', 'Status'],
-                      'values': [['11', 'foo', 'BUILD'],
-                                 ['21', 'bar', 'ERROR'],
-                                 ['31', 'bee', 'None']]}
-    EXPECTED_TABLE2 = {'headers': ['ID', 'Name2', 'Status2'],
-                       'values': [['41', 'aaa', 'SSSSS'],
-                                  ['51', 'bbb', 'TTTTT'],
-                                  ['61', 'ccc', 'AAAAA']]}
-
-    def test_table_with_normal_values(self):
-        actual = output_parser.table(self.OUTPUT_LINES)
-        self.assertIsInstance(actual, dict)
-        self.assertEqual(self.EXPECTED_TABLE, actual)
-
-    def test_table_with_list(self):
-        output_lines = self.OUTPUT_LINES.split('\n')
-        actual = output_parser.table(output_lines)
-        self.assertIsInstance(actual, dict)
-        self.assertEqual(self.EXPECTED_TABLE, actual)
-
-    def test_table_with_invalid_line(self):
-        output_lines = self.OUTPUT_LINES + "aaaa"
-        actual = output_parser.table(output_lines)
-        self.assertIsInstance(actual, dict)
-        self.assertEqual(self.EXPECTED_TABLE, actual)
-
-    def test_tables_with_normal_values(self):
-        output_lines = 'test' + self.OUTPUT_LINES +\
-                       'test2' + self.OUTPUT_LINES2
-        expected = [{'headers': self.EXPECTED_TABLE['headers'],
-                     'label': 'test',
-                     'values': self.EXPECTED_TABLE['values']},
-                    {'headers': self.EXPECTED_TABLE2['headers'],
-                     'label': 'test2',
-                     'values': self.EXPECTED_TABLE2['values']}]
-        actual = output_parser.tables(output_lines)
-        self.assertIsInstance(actual, list)
-        self.assertEqual(expected, actual)
-
-    def test_tables_with_invalid_values(self):
-        output_lines = 'test' + self.OUTPUT_LINES +\
-                       'test2' + self.OUTPUT_LINES2 + '\n'
-        expected = [{'headers': self.EXPECTED_TABLE['headers'],
-                     'label': 'test',
-                     'values': self.EXPECTED_TABLE['values']},
-                    {'headers': self.EXPECTED_TABLE2['headers'],
-                     'label': 'test2',
-                     'values': self.EXPECTED_TABLE2['values']}]
-        actual = output_parser.tables(output_lines)
-        self.assertIsInstance(actual, list)
-        self.assertEqual(expected, actual)
-
-    def test_tables_with_invalid_line(self):
-        output_lines = 'test' + self.OUTPUT_LINES +\
-                       'test2' + self.OUTPUT_LINES2 +\
-                       '+----+-------+---------+'
-        expected = [{'headers': self.EXPECTED_TABLE['headers'],
-                     'label': 'test',
-                     'values': self.EXPECTED_TABLE['values']},
-                    {'headers': self.EXPECTED_TABLE2['headers'],
-                     'label': 'test2',
-                     'values': self.EXPECTED_TABLE2['values']}]
-
-        actual = output_parser.tables(output_lines)
-        self.assertIsInstance(actual, list)
-        self.assertEqual(expected, actual)
-
-    LISTING_OUTPUT = """
-+----+
-| ID |
-+----+
-| 11 |
-| 21 |
-| 31 |
-+----+
-"""
-
-    def test_listing(self):
-        expected = [{'ID': '11'}, {'ID': '21'}, {'ID': '31'}]
-        actual = output_parser.listing(self.LISTING_OUTPUT)
-        self.assertIsInstance(actual, list)
-        self.assertEqual(expected, actual)
-
-    def test_details_multiple_with_invalid_line(self):
-        self.assertRaises(exceptions.InvalidStructure,
-                          output_parser.details_multiple,
-                          self.OUTPUT_LINES)
-
-    DETAILS_LINES1 = """First Table
-+----------+--------+
-| Property | Value  |
-+----------+--------+
-| foo      | BUILD  |
-| bar      | ERROR  |
-| bee      | None   |
-+----------+--------+
-"""
-    DETAILS_LINES2 = """Second Table
-+----------+--------+
-| Property | Value  |
-+----------+--------+
-| aaa      | VVVVV  |
-| bbb      | WWWWW  |
-| ccc      | XXXXX  |
-+----------+--------+
-"""
-
-    def test_details_with_normal_line_label_false(self):
-        expected = {'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'}
-        actual = output_parser.details(self.DETAILS_LINES1)
-        self.assertEqual(expected, actual)
-
-    def test_details_with_normal_line_label_true(self):
-        expected = {'__label': 'First Table',
-                    'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'}
-        actual = output_parser.details(self.DETAILS_LINES1, with_label=True)
-        self.assertEqual(expected, actual)
-
-    def test_details_multiple_with_normal_line_label_false(self):
-        expected = [{'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'},
-                    {'aaa': 'VVVVV', 'bbb': 'WWWWW', 'ccc': 'XXXXX'}]
-        actual = output_parser.details_multiple(self.DETAILS_LINES1 +
-                                                self.DETAILS_LINES2)
-        self.assertIsInstance(actual, list)
-        self.assertEqual(expected, actual)
-
-    def test_details_multiple_with_normal_line_label_true(self):
-        expected = [{'__label': 'First Table',
-                     'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'},
-                    {'__label': 'Second Table',
-                     'aaa': 'VVVVV', 'bbb': 'WWWWW', 'ccc': 'XXXXX'}]
-        actual = output_parser.details_multiple(self.DETAILS_LINES1 +
-                                                self.DETAILS_LINES2,
-                                                with_label=True)
-        self.assertIsInstance(actual, list)
-        self.assertEqual(expected, actual)
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index a28684e..6679c79 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -86,6 +86,24 @@
         self.assertIn('v2.0', versions)
         self.assertIn('v3.0', versions)
 
+    def test_verify_api_versions(self):
+        api_services = ['cinder', 'glance', 'keystone', 'nova']
+        fake_os = mock.MagicMock()
+        for svc in api_services:
+            m = 'verify_%s_api_versions' % svc
+            with mock.patch.object(verify_tempest_config, m) as verify_mock:
+                verify_tempest_config.verify_api_versions(fake_os, svc, True)
+                verify_mock.assert_called_once_with(fake_os, True)
+
+    def test_verify_api_versions_not_implemented(self):
+        api_services = ['cinder', 'glance', 'keystone', 'nova']
+        fake_os = mock.MagicMock()
+        for svc in api_services:
+            m = 'verify_%s_api_versions' % svc
+            with mock.patch.object(verify_tempest_config, m) as verify_mock:
+                verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
+                self.assertFalse(verify_mock.called)
+
     def test_verify_keystone_api_versions_no_v3(self):
         self.useFixture(mockpatch.PatchObject(
             verify_tempest_config, '_get_unversioned_endpoint',
diff --git a/tempest/tests/stress/test_stress.py b/tempest/tests/stress/test_stress.py
index 3dc2199..9c3533d 100644
--- a/tempest/tests/stress/test_stress.py
+++ b/tempest/tests/stress/test_stress.py
@@ -16,7 +16,8 @@
 import shlex
 import subprocess
 
-import tempest.cli as cli
+from tempest_lib import exceptions
+
 from tempest.openstack.common import log as logging
 from tempest.tests import base
 
@@ -43,9 +44,9 @@
             result, result_err = proc.communicate()
             if proc.returncode != 0:
                 LOG.debug('error of %s:\n%s' % (cmd_str, result_err))
-                raise cli.CommandFailed(proc.returncode,
-                                        cmd,
-                                        result)
+                raise exceptions.CommandFailed(proc.returncode,
+                                               cmd,
+                                               result)
         finally:
             LOG.debug('output of %s:\n%s' % (cmd_str, result))
         return proc.returncode
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index 0fd41f9..ae7860d 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -34,7 +34,6 @@
         # Setup Test files
         self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
         self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
-        self.subunit_trace = os.path.join(self.directory, 'subunit-trace.py')
         self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
         self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
         self.init_file = os.path.join(self.test_dir, '__init__.py')
@@ -45,7 +44,6 @@
         shutil.copy('setup.py', self.setup_py)
         shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
         shutil.copy('tempest/tests/files/__init__.py', self.init_file)
-        shutil.copy('tools/subunit-trace.py', self.subunit_trace)
         # copy over the pretty_tox scripts
         shutil.copy('tools/pretty_tox.sh',
                     os.path.join(self.directory, 'pretty_tox.sh'))
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 3496dce..62073bd 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -498,7 +498,10 @@
         def _volume_state():
             volume.update(validate=True)
             try:
-                if volume.status != "available":
+                # NOTE(gmann): Make sure volume is attached.
+                # Checking status as 'not "available"' is not enough to make
+                # sure volume is attached as it can be in "error" state
+                if volume.status == "in-use":
                     volume.detach(force=True)
             except BaseException:
                 LOG.exception("Failed to detach volume %s" % volume)
diff --git a/tempest/thirdparty/boto/test_ec2_network.py b/tempest/thirdparty/boto/test_ec2_network.py
index a75fb7b..132a5a8 100644
--- a/tempest/thirdparty/boto/test_ec2_network.py
+++ b/tempest/thirdparty/boto/test_ec2_network.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest import test
 from tempest.thirdparty.boto import test as boto_test
 
 
@@ -22,21 +21,22 @@
     @classmethod
     def resource_setup(cls):
         super(EC2NetworkTest, cls).resource_setup()
-        cls.client = cls.os.ec2api_client
+        cls.ec2_client = cls.os.ec2api_client
 
     # Note(afazekas): these tests for things duable without an instance
-    @test.skip_because(bug="1080406")
     def test_disassociate_not_associated_floating_ip(self):
         # EC2 disassociate not associated floating ip
         ec2_codes = self.ec2_error_code
-        address = self.client.allocate_address()
+        address = self.ec2_client.allocate_address()
         public_ip = address.public_ip
-        rcuk = self.addResourceCleanUp(self.client.release_address, public_ip)
-        addresses_get = self.client.get_all_addresses(addresses=(public_ip,))
+        rcuk = self.addResourceCleanUp(self.ec2_client.release_address,
+                                       public_ip)
+        addresses_get = self.ec2_client.get_all_addresses(
+            addresses=(public_ip,))
         self.assertEqual(len(addresses_get), 1)
         self.assertEqual(addresses_get[0].public_ip, public_ip)
         self.assertBotoError(ec2_codes.client.InvalidAssociationID.NotFound,
                              address.disassociate)
-        self.client.release_address(public_ip)
-        self.cancelResourceCleanUp(rcuk)
+        self.ec2_client.release_address(public_ip)
         self.assertAddressReleasedWait(address)
+        self.cancelResourceCleanUp(rcuk)
diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh
index 0f0d77e..7b08695 100755
--- a/tools/config/check_uptodate.sh
+++ b/tools/config/check_uptodate.sh
@@ -15,7 +15,7 @@
 TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
 trap "rm -rf $TEMPDIR" EXIT
 
-tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
+oslo-config-generator --config-file tools/config/config-generator.tempest.conf --output-file ${TEMPDIR}/${CFGFILE_NAME}
 if [ $? != 0 ]
 then
     exit 1
@@ -24,6 +24,6 @@
 if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
 then
    echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
-   echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
+   echo "${0##*/}: Please run tox -egenconfig."
    exit 1
 fi
diff --git a/tools/config/config-generator.tempest.conf b/tools/config/config-generator.tempest.conf
new file mode 100644
index 0000000..e5a02f8
--- /dev/null
+++ b/tools/config/config-generator.tempest.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+output_file = etc/tempest.conf.sample
+namespace = tempest.config
diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh
deleted file mode 100755
index d22b2f0..0000000
--- a/tools/config/generate_sample.sh
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/env bash
-
-# Generate sample configuration for your project.
-#
-# Aside from the command line flags, it also respects a config file which
-# should be named oslo.config.generator.rc and be placed in the same directory.
-#
-# You can then export the following variables:
-# TEMPEST_CONFIG_GENERATOR_EXTRA_MODULES: list of modules to interrogate for options.
-# TEMPEST_CONFIG_GENERATOR_EXTRA_LIBRARIES: list of libraries to discover.
-# TEMPEST_CONFIG_GENERATOR_EXCLUDED_FILES: list of files to remove from automatic listing.
-
-print_hint() {
-    echo "Try \`${0##*/} --help' for more information." >&2
-}
-
-PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
-                 --long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
-
-if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
-
-eval set -- "$PARSED_OPTIONS"
-
-while true; do
-    case "$1" in
-        -h|--help)
-            echo "${0##*/} [options]"
-            echo ""
-            echo "options:"
-            echo "-h, --help                show brief help"
-            echo "-b, --base-dir=DIR        project base directory"
-            echo "-p, --package-name=NAME   project package name"
-            echo "-o, --output-dir=DIR      file output directory"
-            echo "-m, --module=MOD          extra python module to interrogate for options"
-            echo "-l, --library=LIB         extra library that registers options for discovery"
-            exit 0
-            ;;
-        -b|--base-dir)
-            shift
-            BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
-            shift
-            ;;
-        -p|--package-name)
-            shift
-            PACKAGENAME=`echo $1`
-            shift
-            ;;
-        -o|--output-dir)
-            shift
-            OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
-            shift
-            ;;
-        -m|--module)
-            shift
-            MODULES="$MODULES -m $1"
-            shift
-            ;;
-        -l|--library)
-            shift
-            LIBRARIES="$LIBRARIES -l $1"
-            shift
-            ;;
-        --)
-            break
-            ;;
-    esac
-done
-
-BASEDIR=${BASEDIR:-`pwd`}
-if ! [ -d $BASEDIR ]
-then
-    echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
-elif [[ $BASEDIR != /* ]]
-then
-    BASEDIR=$(cd "$BASEDIR" && pwd)
-fi
-
-PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)}
-TARGETDIR=$BASEDIR/$PACKAGENAME
-if ! [ -d $TARGETDIR ]
-then
-    echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
-fi
-
-OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
-# NOTE(bnemec): Some projects put their sample config in etc/,
-#               some in etc/$PACKAGENAME/
-if [ -d $OUTPUTDIR/$PACKAGENAME ]
-then
-    OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
-elif ! [ -d $OUTPUTDIR ]
-then
-    echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
-    exit 1
-fi
-
-BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
-find $TARGETDIR -type f -name "*.pyc" -delete
-FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
-        -exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
-
-RC_FILE="`dirname $0`/oslo.config.generator.rc"
-if test -r "$RC_FILE"
-then
-    source "$RC_FILE"
-fi
-
-for filename in ${TEMPEST_CONFIG_GENERATOR_EXCLUDED_FILES}; do
-    FILES="${FILES[@]/$filename/}"
-done
-
-for mod in ${TEMPEST_CONFIG_GENERATOR_EXTRA_MODULES}; do
-    MODULES="$MODULES -m $mod"
-done
-
-for lib in ${TEMPEST_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
-    LIBRARIES="$LIBRARIES -l $lib"
-done
-
-export EVENTLET_NO_GREENDNS=yes
-
-OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
-[ "$OS_VARS" ] && eval "unset \$OS_VARS"
-DEFAULT_MODULEPATH=tempest.openstack.common.config.generator
-MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
-OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
-python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
-if [ $? != 0 ]
-then
-    echo "Can not generate $OUTPUTFILE"
-    exit 1
-fi
-
-# Hook to allow projects to append custom config file snippets
-CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
-for CONCAT_FILE in $CONCAT_FILES; do
-    cat $CONCAT_FILE >> $OUTPUTFILE
-done
diff --git a/tools/config/oslo.config.generator.rc b/tools/config/oslo.config.generator.rc
deleted file mode 100644
index 303e156..0000000
--- a/tools/config/oslo.config.generator.rc
+++ /dev/null
@@ -1 +0,0 @@
-MODULEPATH=tempest.common.generate_sample_tempest
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
index 0a04ce6..ff554c5 100755
--- a/tools/pretty_tox.sh
+++ b/tools/pretty_tox.sh
@@ -3,4 +3,4 @@
 set -o pipefail
 
 TESTRARGS=$1
-python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | $(dirname $0)/subunit-trace.py --no-failure-debug -f
+python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace --no-failure-debug -f
diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh
index db70890..e0fca0f 100755
--- a/tools/pretty_tox_serial.sh
+++ b/tools/pretty_tox_serial.sh
@@ -7,7 +7,7 @@
 if [ ! -d .testrepository ]; then
     testr init
 fi
-testr run --subunit $TESTRARGS | $(dirname $0)/subunit-trace.py -f -n
+testr run --subunit $TESTRARGS | subunit-trace -f -n
 retval=$?
 testr slowest
 
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
deleted file mode 100755
index 57e58f2..0000000
--- a/tools/subunit-trace.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-# Copyright 2014 Samsung Electronics
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Trace a subunit stream in reasonable detail and high accuracy."""
-
-import argparse
-import functools
-import re
-import sys
-
-import subunit
-import testtools
-
-DAY_SECONDS = 60 * 60 * 24
-FAILS = []
-RESULTS = {}
-
-
-def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
-    """Clean up the test name for display.
-
-    By default we strip out the tags in the test because they don't help us
-    in identifying the test that is run to it's result.
-
-    Make it possible to strip out the testscenarios information (not to
-    be confused with tempest scenarios) however that's often needed to
-    indentify generated negative tests.
-    """
-    if strip_tags:
-        tags_start = name.find('[')
-        tags_end = name.find(']')
-        if tags_start > 0 and tags_end > tags_start:
-            newname = name[:tags_start]
-            newname += name[tags_end + 1:]
-            name = newname
-
-    if strip_scenarios:
-        tags_start = name.find('(')
-        tags_end = name.find(')')
-        if tags_start > 0 and tags_end > tags_start:
-            newname = name[:tags_start]
-            newname += name[tags_end + 1:]
-            name = newname
-
-    return name
-
-
-def get_duration(timestamps):
-    start, end = timestamps
-    if not start or not end:
-        duration = ''
-    else:
-        delta = end - start
-        duration = '%d.%06ds' % (
-            delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
-    return duration
-
-
-def find_worker(test):
-    for tag in test['tags']:
-        if tag.startswith('worker-'):
-            return int(tag[7:])
-    return 'NaN'
-
-
-# Print out stdout/stderr if it exists, always
-def print_attachments(stream, test, all_channels=False):
-    """Print out subunit attachments.
-
-    Print out subunit attachments that contain content. This
-    runs in 2 modes, one for successes where we print out just stdout
-    and stderr, and an override that dumps all the attachments.
-    """
-    channels = ('stdout', 'stderr')
-    for name, detail in test['details'].items():
-        # NOTE(sdague): the subunit names are a little crazy, and actually
-        # are in the form pythonlogging:'' (with the colon and quotes)
-        name = name.split(':')[0]
-        if detail.content_type.type == 'test':
-            detail.content_type.type = 'text'
-        if (all_channels or name in channels) and detail.as_text():
-            title = "Captured %s:" % name
-            stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
-            # indent attachment lines 4 spaces to make them visually
-            # offset
-            for line in detail.as_text().split('\n'):
-                stream.write("    %s\n" % line)
-
-
-def show_outcome(stream, test, print_failures=False):
-    global RESULTS
-    status = test['status']
-    # TODO(sdague): ask lifeless why on this?
-    if status == 'exists':
-        return
-
-    worker = find_worker(test)
-    name = cleanup_test_name(test['id'])
-    duration = get_duration(test['timestamps'])
-
-    if worker not in RESULTS:
-        RESULTS[worker] = []
-    RESULTS[worker].append(test)
-
-    # don't count the end of the return code as a fail
-    if name == 'process-returncode':
-        return
-
-    if status == 'success':
-        stream.write('{%s} %s [%s] ... ok\n' % (
-            worker, name, duration))
-        print_attachments(stream, test)
-    elif status == 'fail':
-        FAILS.append(test)
-        stream.write('{%s} %s [%s] ... FAILED\n' % (
-            worker, name, duration))
-        if not print_failures:
-            print_attachments(stream, test, all_channels=True)
-    elif status == 'skip':
-        stream.write('{%s} %s ... SKIPPED: %s\n' % (
-            worker, name, test['details']['reason'].as_text()))
-    else:
-        stream.write('{%s} %s [%s] ... %s\n' % (
-            worker, name, duration, test['status']))
-        if not print_failures:
-            print_attachments(stream, test, all_channels=True)
-
-    stream.flush()
-
-
-def print_fails(stream):
-    """Print summary failure report.
-
-    Currently unused, however there remains debate on inline vs. at end
-    reporting, so leave the utility function for later use.
-    """
-    if not FAILS:
-        return
-    stream.write("\n==============================\n")
-    stream.write("Failed %s tests - output below:" % len(FAILS))
-    stream.write("\n==============================\n")
-    for f in FAILS:
-        stream.write("\n%s\n" % f['id'])
-        stream.write("%s\n" % ('-' * len(f['id'])))
-        print_attachments(stream, f, all_channels=True)
-    stream.write('\n')
-
-
-def count_tests(key, value):
-    count = 0
-    for k, v in RESULTS.items():
-        for item in v:
-            if key in item:
-                if re.search(value, item[key]):
-                    count += 1
-    return count
-
-
-def run_time():
-    runtime = 0.0
-    for k, v in RESULTS.items():
-        for test in v:
-            runtime += float(get_duration(test['timestamps']).strip('s'))
-    return runtime
-
-
-def worker_stats(worker):
-    tests = RESULTS[worker]
-    num_tests = len(tests)
-    delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
-    return num_tests, delta
-
-
-def print_summary(stream):
-    stream.write("\n======\nTotals\n======\n")
-    stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
-                                           run_time()))
-    stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
-    stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
-    stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
-
-    # we could have no results, especially as we filter out the process-codes
-    if RESULTS:
-        stream.write("\n==============\nWorker Balance\n==============\n")
-
-        for w in range(max(RESULTS.keys()) + 1):
-            if w not in RESULTS:
-                stream.write(
-                    " - WARNING: missing Worker %s! "
-                    "Race in testr accounting.\n" % w)
-            else:
-                num, time = worker_stats(w)
-                stream.write(" - Worker %s (%s tests) => %ss\n" %
-                             (w, num, time))
-
-
-def parse_args():
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--no-failure-debug', '-n', action='store_true',
-                        dest='print_failures', help='Disable printing failure '
-                        'debug information in realtime')
-    parser.add_argument('--fails', '-f', action='store_true',
-                        dest='post_fails', help='Print failure debug '
-                        'information after the stream is proccesed')
-    return parser.parse_args()
-
-
-def main():
-    args = parse_args()
-    stream = subunit.ByteStreamToStreamResult(
-        sys.stdin, non_subunit_name='stdout')
-    outcomes = testtools.StreamToDict(
-        functools.partial(show_outcome, sys.stdout,
-                          print_failures=args.print_failures))
-    summary = testtools.StreamSummary()
-    result = testtools.CopyStreamResult([outcomes, summary])
-    result.startTestRun()
-    try:
-        stream.run(result)
-    finally:
-        result.stopTestRun()
-    if count_tests('status', '.*') == 0:
-        print("The test run didn't actually run any tests")
-        return 1
-    if args.post_fails:
-        print_fails(sys.stdout)
-    print_summary(sys.stdout)
-    return (0 if summary.wasSuccessful() else 1)
-
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/tox.ini b/tox.ini
index cab59a8..9f52f0d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,79 +3,83 @@
 minversion = 1.6
 skipsdist = True
 
-[testenv]
+[tempestenv]
+sitepackages = True
 setenv = VIRTUAL_ENV={envdir}
          OS_TEST_PATH=./tempest/test_discover
+deps = -r{toxinidir}/requirements.txt
+
+[testenv]
+setenv = VIRTUAL_ENV={envdir}
+         OS_TEST_PATH=./tempest/tests
 usedevelop = True
 install_command = pip install -U {opts} {packages}
-whitelist_externals = bash
+whitelist_externals = *
+deps = -r{toxinidir}/requirements.txt
+       -r{toxinidir}/test-requirements.txt
+commands = bash tools/pretty_tox.sh '{posargs}'
 
-
-[testenv:py26]
-setenv = OS_TEST_PATH=./tempest/tests
-commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
-
-[testenv:py33]
-setenv = OS_TEST_PATH=./tempest/tests
-commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
-
-[testenv:py34]
-setenv = OS_TEST_PATH=./tempest/tests
-         PYTHONHASHSEED=0
-commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
-
-[testenv:py27]
-setenv = OS_TEST_PATH=./tempest/tests
-commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+[testenv:genconfig]
+commands = oslo-config-generator --config-file tools/config/config-generator.tempest.conf
 
 [testenv:cover]
 setenv = OS_TEST_PATH=./tempest/tests
 commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}'
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
 
 [testenv:all]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+# 'all' includes slow tests
+setenv = {[tempestenv]setenv}
+         OS_TEST_TIMEOUT=1200
+deps = {[tempestenv]deps}
 commands =
   bash tools/pretty_tox.sh '{posargs}'
 
 [testenv:full]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
 commands =
   bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
 
 [testenv:full-serial]
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
 commands =
   bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
 
-[testenv:testr-full]
-sitepackages = True
-commands =
-  bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
-
 [testenv:heat-slow]
-sitepackages = True
-setenv = OS_TEST_TIMEOUT=1200
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+         OS_TEST_TIMEOUT=1200
+deps = {[tempestenv]deps}
 # The regex below is used to select heat api/scenario tests tagged as slow.
 commands =
   bash tools/pretty_tox.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
 
 [testenv:large-ops]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 commands =
   python setup.py testr --slowest --testr-args='tempest.scenario.test_large_ops {posargs}'
 
 [testenv:smoke]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 commands =
    bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])((smoke)|(^tempest\.scenario)) {posargs}'
 
 [testenv:smoke-serial]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 # This is still serial because neutron doesn't work with parallel. See:
 # https://bugs.launchpad.net/tempest/+bug/1216076 so the neutron smoke
 # job would fail if we moved it to parallel.
@@ -83,29 +87,23 @@
    bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])((smoke)|(^tempest\.scenario)) {posargs}'
 
 [testenv:stress]
-sitepackages = True
+sitepackages = {[tempestenv]sitepackages}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
 commands =
-    run-tempest-stress -a -d 3600 -S
+    run-tempest-stress '{posargs}'
 
 [testenv:venv]
 commands = {posargs}
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
 
 [testenv:docs]
 commands = python setup.py build_sphinx {posargs}
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
 
 [testenv:pep8]
-setenv = PYTHONHASHSEED=0
 commands =
    flake8 {posargs}
    {toxinidir}/tools/config/check_uptodate.sh
 
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
-
 [hacking]
 local-check-factory = tempest.hacking.checks.factory
 import_exceptions = tempest.services