Merge "Add unit test for  availability_zone_client"
diff --git a/.gitignore b/.gitignore
index 1777cb9..f584532 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
 ChangeLog
 *.pyc
 etc/tempest.conf
+etc/tempest.conf.sample
 etc/logging.conf
 include/swift_objects/swift_small
 include/swift_objects/swift_medium
@@ -18,3 +19,4 @@
 .coverage*
 !.coveragerc
 cover/
+doc/source/_static/tempest.conf
diff --git a/HACKING.rst b/HACKING.rst
index c776c49..6ddb8ac 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -275,7 +275,7 @@
 Test Documentation
 ------------------
 For tests being added we need to require inline documentation in the form of
-docstings to explain what is being tested. In API tests for a new API a class
+docstrings to explain what is being tested. In API tests for a new API a class
 level docstring should be added to an API reference doc. If one doesn't exist
 a TODO comment should be put indicating that the reference needs to be added.
 For individual API test cases a method level docstring should be used to
@@ -314,6 +314,39 @@
          * Check written content in the instance booted from snapshot
         """
 
+Test Identification with Idempotent ID
+--------------------------------------
+
+Every function that provides a test must have an ``idempotent_id`` decorator
+that is a unique ``uuid-4`` instance. This ID is used to complement the fully
+qualified test name and track test funcionality through refactoring. The
+format of the metadata looks like::
+
+    @test.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
+    def test_list_servers_with_detail(self):
+        # The created server should be in the detailed list of all servers
+        ...
+
+Tempest includes a ``check_uuid.py`` tool that will test for the existence
+and uniqueness of idempotent_id metadata for every test. By default the
+tool runs against the Tempest package by calling::
+
+    python check_uuid.py
+
+It can be invoked against any test suite by passing a package name::
+
+    python check_uuid.py --package <package_name>
+
+Tests without an ``idempotent_id`` can be automatically fixed by running
+the command with the ``--fix`` flag, which will modify the source package
+by inserting randomly generated uuids for every test that does not have
+one::
+
+    python check_uuid.py --fix
+
+The ``check_uuid.py`` tool is used as part of the tempest gate job
+to ensure that all tests have an ``idempotent_id`` decorator.
+
 Branchless Tempest Considerations
 ---------------------------------
 
diff --git a/README.rst b/README.rst
index af24569..7a7dfa6 100644
--- a/README.rst
+++ b/README.rst
@@ -32,7 +32,7 @@
   parts as load generation for an OpenStack cloud.
 - Tempest should attempt to clean up after itself, whenever possible
   we should tear down resources when done.
-- Tempest should be self testing.
+- Tempest should be self-testing.
 
 Quickstart
 ----------
@@ -41,11 +41,13 @@
 will tell Tempest where to find the various OpenStack services and
 other testing behavior switches.
 
-The easiest way to create a configuration file is to copy the sample
-one in the ``etc/`` directory ::
+The easiest way to create a configuration file is to generate a sample
+in the ``etc/`` directory ::
 
     $> cd $TEMPEST_ROOT_DIR
-    $> cp etc/tempest.conf.sample etc/tempest.conf
+    $> oslo-config-generator --config-file \
+        tools/config/config-generator.tempest.conf \
+        --output-file etc/tempest.conf
 
 After that, open up the ``etc/tempest.conf`` file and edit the
 configuration variables to match valid data in your environment.
@@ -92,7 +94,7 @@
 
 Detailed configuration of Tempest is beyond the scope of this
 document see :ref:`tempest-configuration` for more details on configuring
-Tempest. The etc/tempest.conf.sample attempts to be a self documenting version
+Tempest. The etc/tempest.conf.sample attempts to be a self-documenting version
 of the configuration.
 
 You can generate a new sample tempest.conf file, run the following
@@ -107,7 +109,7 @@
 ----------
 
 Tempest also has a set of unit tests which test the Tempest code itself. These
-tests can be run by specifing the test discovery path::
+tests can be run by specifying the test discovery path::
 
     $> OS_TEST_PATH=./tempest/tests testr run --parallel
 
@@ -140,7 +142,7 @@
 support running with Python 3.4. A gating unit test job was added to also run
 Tempest's unit tests under Python 3.4. This means that the Tempest code at
 least imports under Python 3.4 and things that have unit test coverage will
-work on Python 3.4. However, because large parts of Tempest are self verifying
+work on Python 3.4. However, because large parts of Tempest are self-verifying
 there might be uncaught issues running on Python 3.4. So until there is a gating
 job which does a full Tempest run using Python 3.4 there isn't any guarantee
 that running Tempest under Python 3.4 is bug free.
diff --git a/tempest/openstack/__init__.py b/doc/source/_static/.keep
similarity index 100%
rename from tempest/openstack/__init__.py
rename to doc/source/_static/.keep
diff --git a/doc/source/conf.py b/doc/source/conf.py
index daa293c..3ec25ea 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -13,6 +13,19 @@
 
 import sys
 import os
+import subprocess
+
+# Build a tempest sample config file:
+def build_sample_config(app):
+    root_dir = os.path.dirname(
+        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+    subprocess.call(["oslo-config-generator", "--config-file",
+                     "tools/config/config-generator.tempest.conf",
+                     "--output-file", "doc/source/_static/tempest.conf"],
+                    cwd=root_dir)
+
+def setup(app):
+    app.connect('builder-inited', build_sample_config)
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 0805544..3c109b5 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -24,7 +24,7 @@
 Tempest currently has 2 different ways in configuration to provide credentials
 to use when running tempest. One is a traditional set of configuration options
 in the tempest.conf file. These options are in the identity section and let you
-specify a regular user, a global admin user, and a alternate user set of
+specify a regular user, a global admin user, and an alternate user set of
 credentials. (which consist of a username, password, and project/tenant name)
 These options should be clearly labelled in the sample config file in the
 identity section.
@@ -142,7 +142,7 @@
  #. alt_password
  #. alt_tenant_name
 
-And in the auth secion:
+And in the auth section:
 
  #. allow_tenant_isolation = False
  #. comment out 'test_accounts_file' or keep it as empty
@@ -187,7 +187,7 @@
  #. image_ref_alt
 
 Both options are expecting an image id (not name) from nova. The *image_ref*
-option is what what will be used for booting the majority of servers in tempest.
+option is what will be used for booting the majority of servers in tempest.
 *image_ref_alt* is used for tests that require 2 images such as rebuild. If 2
 images are not available you can set both options to the same image_ref and
 those tests will be skipped.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f925018..e9f2161 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -10,6 +10,7 @@
    overview
    HACKING
    REVIEWING
+   plugin
 
 ------------
 Field Guides
@@ -37,6 +38,7 @@
    :maxdepth: 2
 
    configuration
+   sampleconf
 
 ---------------------
 Command Documentation
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
new file mode 100644
index 0000000..f92f63e
--- /dev/null
+++ b/doc/source/plugin.rst
@@ -0,0 +1,135 @@
+=============================
+Tempest Test Plugin Interface
+=============================
+
+Tempest has an external test plugin interface which enables anyone to integrate
+an external test suite as part of a tempest run. This will let any project
+leverage being run with the rest of the tempest suite while not requiring the
+tests live in the tempest tree.
+
+Creating a plugin
+=================
+
+Creating a plugin is fairly straightforward and doesn't require much additional
+effort on top of creating a test suite using tempest-lib. One thing to note with
+doing this is that the interfaces exposed by tempest are not considered stable
+(with the exception of configuration variables which ever effort goes into
+ensuring backwards compatibility). You should not need to import anything from
+tempest itself except where explicitly noted. If there is an interface from
+tempest that you need to rely on in your plugin it likely needs to be migrated
+to tempest-lib. In that situation, file a bug, push a migration patch, etc. to
+expedite providing the interface in a reliable manner.
+
+Plugin Class
+------------
+
+To provide tempest with all the required information it needs to be able to run
+your plugin you need to create a plugin class which tempest will load and call
+to get information when it needs. To simplify creating this tempest provides an
+abstract class that should be used as the parent for your plugin. To use this
+you would do something like the following::
+
+  from tempest.test_discover import plugin
+
+  class MyPlugin(plugin.TempestPlugin):
+
+Then you need to ensure you locally define all of the methods in the abstract
+class, you can refer to the api doc below for a reference of what that entails.
+
+Also, note eventually this abstract class will likely live in tempest-lib, when
+that migration occurs a deprecation shim will be added to tempest so as to not
+break any existing plugins. But, when that occurs migrating to using tempest-lib
+as the source for the abstract class will be prudent.
+
+Abstract Plugin Class
+^^^^^^^^^^^^^^^^^^^^^
+
+.. autoclass:: tempest.test_discover.plugins.TempestPlugin
+   :members:
+
+Entry Point
+-----------
+
+Once you've created your plugin class you need to add an entry point to your
+project to enable tempest to find the plugin. The entry point must be added
+to the "tempest.test_plugins" namespace.
+
+If you are using pbr this is fairly straightforward, in the setup.cfg just add
+something like the following::
+
+  [entry_points]
+  tempest.test_plugins =
+      plugin_name = module.path:PluginClass
+
+Plugin Structure
+----------------
+
+While there are no hard and fast rules for the structure a plugin, there are
+basically no constraints on what the plugin looks like as long as the 2 steps
+above are done. However,  there are some recommended patterns to follow to make
+it easy for people to contribute and work with your plugin. For example, if you
+create a directory structure with something like::
+
+    plugin_dir/
+      config.py
+      plugin.py
+      tests/
+        api/
+        scenario/
+      services/
+        client.py
+
+That will mirror what people expect from tempest. The file
+
+* **config.py**: contains any plugin specific configuration variables
+* **plugin.py**: contains the plugin class used for the entry point
+* **tests**: the directory where test discovery will be run, all tests should
+             be under this dir
+* **services**: where the plugin specific service clients are
+
+Additionally, when you're creating the plugin you likely want to follow all
+of the tempest developer and reviewer documentation to ensure that the tests
+being added in the plugin act and behave like the rest of tempest.
+
+Dealing with configuration options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Historically Tempest didn't provide external guarantees on its configuration
+options. However, with the introduction of the plugin interface this is no
+longer the case. An external plugin can rely on using any configuration option
+coming from Tempest, there will be at least a full deprecation cycle for any
+option before it's removed. However, just the options provided by Tempest
+may not be sufficient for the plugin. If you need to add any plugin specific
+configuration options you should use the ``register_opts`` and
+``get_opt_lists`` methods to pass them to Tempest when the plugin is loaded.
+When adding configuration options the ``register_opts`` method gets passed the
+CONF object from tempest. This enables the plugin to add options to both
+existing sections and also create new configuration sections for new options.
+
+Using Plugins
+=============
+
+Tempest will automatically discover any installed plugins when it is run. So by
+just installing the python packages which contain your plugin you'll be using
+them with tempest, nothing else is really required.
+
+However, you should take care when installing plugins. By their very nature
+there are no guarantees when running tempest with plugins enabled about the
+quality of the plugin. Additionally, while there is no limitation on running
+with multiple plugins it's worth noting that poorly written plugins might not
+properly isolate their tests which could cause unexpected cross interactions
+between plugins.
+
+Notes for using plugins with virtualenvs
+----------------------------------------
+
+When using a tempest inside a virtualenv (like when running under tox) you have
+to ensure that the package that contains your plugin is either installed in the
+venv too or that you have system site-packages enabled. The virtualenv will
+isolate the tempest install from the rest of your system so just installing the
+plugin package on your system and then running tempest inside a venv will not
+work.
+
+Tempest also exposes a tox job, all-plugin, which will setup a tox virtualenv
+with system site-packages enabled. This will let you leverage tox without
+requiring to manually install plugins in the tox venv before running tests.
diff --git a/doc/source/sampleconf.rst b/doc/source/sampleconf.rst
new file mode 100644
index 0000000..2a72971
--- /dev/null
+++ b/doc/source/sampleconf.rst
@@ -0,0 +1,14 @@
+.. _tempest-sampleconf:
+
+Sample Configuration File
+==========================
+
+The following is a sample Tempest configuration for adaptation and use. It is
+auto-generated from Tempest when this documentation is built, so
+if you are having issues with an option, please compare your version of
+Tempest with the version of this documentation.
+
+The sample configuration can also be viewed in `file form <_static/tempest.conf>`_.
+
+.. include:: _static/tempest.conf
+   :code:
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
deleted file mode 100644
index 1095e77..0000000
--- a/etc/tempest.conf.sample
+++ /dev/null
@@ -1,1211 +0,0 @@
-[DEFAULT]
-
-#
-# From oslo.log
-#
-
-# Print debugging output (set logging level to DEBUG instead of
-# default WARNING level). (boolean value)
-#debug = false
-
-# Print more verbose output (set logging level to INFO instead of
-# default WARNING level). (boolean value)
-#verbose = false
-
-# The name of a logging configuration file. This file is appended to
-# any existing logging configuration files. For details about logging
-# configuration files, see the Python logging module documentation.
-# (string value)
-# Deprecated group/name - [DEFAULT]/log_config
-#log_config_append = <None>
-
-# DEPRECATED. A logging.Formatter log message format string which may
-# use any of the available logging.LogRecord attributes. This option
-# is deprecated.  Please use logging_context_format_string and
-# logging_default_format_string instead. (string value)
-#log_format = <None>
-
-# Format string for %%(asctime)s in log records. Default: %(default)s
-# . (string value)
-#log_date_format = %Y-%m-%d %H:%M:%S
-
-# (Optional) Name of log file to output to. If no default is set,
-# logging will go to stdout. (string value)
-# Deprecated group/name - [DEFAULT]/logfile
-#log_file = <None>
-
-# (Optional) The base directory used for relative --log-file paths.
-# (string value)
-# Deprecated group/name - [DEFAULT]/logdir
-#log_dir = <None>
-
-# Use syslog for logging. Existing syslog format is DEPRECATED and
-# will be changed later to honor RFC5424. (boolean value)
-#use_syslog = false
-
-# (Optional) Enables or disables syslog rfc5424 format for logging. If
-# enabled, prefixes the MSG part of the syslog message with APP-NAME
-# (RFC5424). The format without the APP-NAME is deprecated in K, and
-# will be removed in M, along with this option. (boolean value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#use_syslog_rfc_format = true
-
-# Syslog facility to receive log lines. (string value)
-#syslog_log_facility = LOG_USER
-
-# Log output to standard error. (boolean value)
-#use_stderr = true
-
-# Format string to use for log messages with context. (string value)
-#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
-
-# Format string to use for log messages without context. (string
-# value)
-#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-
-# Data to append to log format when level is DEBUG. (string value)
-#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
-
-# Prefix each line of exception output with this format. (string
-# value)
-#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
-
-# List of logger=LEVEL pairs. (list value)
-#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
-
-# Enables or disables publication of error events. (boolean value)
-#publish_errors = false
-
-# The format for an instance that is passed with the log message.
-# (string value)
-#instance_format = "[instance: %(uuid)s] "
-
-# The format for an instance UUID that is passed with the log message.
-# (string value)
-#instance_uuid_format = "[instance: %(uuid)s] "
-
-# Enables or disables fatal status of deprecations. (boolean value)
-#fatal_deprecations = false
-
-#
-# From tempest.config
-#
-
-# Prefix to be added when generating the name for test resources. It
-# can be used to discover all resources associated with a specific
-# test run when running tempest on a real-life cloud (string value)
-#resources_prefix = tempest
-
-
-[auth]
-
-#
-# From tempest.config
-#
-
-# Path to the yaml file that contains the list of credentials to use
-# for running tests. If used when running in parallel you have to make
-# sure sufficient credentials are provided in the accounts file. For
-# example if no tests with roles are being run it requires at least `2
-# * CONC` distinct accounts configured in  the `test_accounts_file`,
-# with CONC == the number of concurrent test processes. (string value)
-#test_accounts_file = <None>
-
-# Allows test cases to create/destroy tenants and users. This option
-# requires that OpenStack Identity API admin credentials are known. If
-# false, isolated test cases and parallel execution, can still be
-# achieved configuring a list of test accounts (boolean value)
-# Deprecated group/name - [compute]/allow_tenant_isolation
-# Deprecated group/name - [orchestration]/allow_tenant_isolation
-#allow_tenant_isolation = true
-
-# Roles to assign to all users created by tempest (list value)
-#tempest_roles =
-
-# Only applicable when identity.auth_version is v3.Domain within which
-# isolated credentials are provisioned.The default "None" means that
-# the domain from theadmin user is used instead. (string value)
-#tenant_isolation_domain_name = <None>
-
-# If allow_tenant_isolation is set to True and Neutron is enabled
-# Tempest will try to create a useable network, subnet, and router
-# when needed for each tenant it  creates. However in some neutron
-# configurations, like with VLAN provider networks, this doesn't work.
-# So if set to False the isolated networks will not be created
-# (boolean value)
-#create_isolated_networks = true
-
-
-[baremetal]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the baremetal provisioning service (string value)
-#catalog_type = baremetal
-
-# Whether the Ironic nova-compute driver is enabled (boolean value)
-#driver_enabled = false
-
-# Driver name which Ironic uses (string value)
-#driver = fake
-
-# The endpoint type to use for the baremetal provisioning service
-# (string value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# Timeout for Ironic node to completely provision (integer value)
-#active_timeout = 300
-
-# Timeout for association of Nova instance and Ironic node (integer
-# value)
-#association_timeout = 30
-
-# Timeout for Ironic power transitions. (integer value)
-#power_timeout = 60
-
-# Timeout for unprovisioning an Ironic node. Takes longer since Kilo
-# as Ironic performs an extra step in Node cleaning. (integer value)
-#unprovision_timeout = 300
-
-
-[boto]
-
-#
-# From tempest.config
-#
-
-# EC2 URL (string value)
-#ec2_url = http://localhost:8773/services/Cloud
-
-# S3 URL (string value)
-#s3_url = http://localhost:8080
-
-# AWS Secret Key (string value)
-#aws_secret = <None>
-
-# AWS Access Key (string value)
-#aws_access = <None>
-
-# AWS Zone for EC2 tests (string value)
-#aws_zone = nova
-
-# S3 Materials Path (string value)
-#s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.0
-
-# ARI Ramdisk Image manifest (string value)
-#ari_manifest = cirros-0.3.0-x86_64-initrd.manifest.xml
-
-# AMI Machine Image manifest (string value)
-#ami_manifest = cirros-0.3.0-x86_64-blank.img.manifest.xml
-
-# AKI Kernel Image manifest (string value)
-#aki_manifest = cirros-0.3.0-x86_64-vmlinuz.manifest.xml
-
-# Instance type (string value)
-#instance_type = m1.tiny
-
-# boto Http socket timeout (integer value)
-#http_socket_timeout = 3
-
-# boto num_retries on error (integer value)
-#num_retries = 1
-
-# Status Change Timeout (integer value)
-#build_timeout = 60
-
-# Status Change Test Interval (integer value)
-#build_interval = 1
-
-
-[compute]
-
-#
-# From tempest.config
-#
-
-# Valid primary image reference to be used in tests. This is a
-# required option (string value)
-#image_ref = <None>
-
-# Valid secondary image reference to be used in tests. This is a
-# required option, but if only one image is available duplicate the
-# value of image_ref above (string value)
-#image_ref_alt = <None>
-
-# Valid primary flavor to use in tests. (string value)
-#flavor_ref = 1
-
-# Valid secondary flavor to be used in tests. (string value)
-#flavor_ref_alt = 2
-
-# User name used to authenticate to an instance. (string value)
-#image_ssh_user = root
-
-# Password used to authenticate to an instance. (string value)
-#image_ssh_password = password
-
-# User name used to authenticate to an instance using the alternate
-# image. (string value)
-#image_alt_ssh_user = root
-
-# Time in seconds between build status checks. (integer value)
-#build_interval = 1
-
-# Timeout in seconds to wait for an instance to build. Other services
-# that do not define build_timeout will inherit this value. (integer
-# value)
-#build_timeout = 300
-
-# Shell fragments to use before executing a command when sshing to a
-# guest. (string value)
-#ssh_shell_prologue = set -eu -o pipefail; PATH=$$PATH:/sbin;
-
-# Auth method used for authenticate to the instance. Valid choices
-# are: keypair, configured, adminpass and disabled. Keypair: start the
-# servers with a ssh keypair. Configured: use the configured user and
-# password. Adminpass: use the injected adminPass. Disabled: avoid
-# using ssh when it is an option. (string value)
-#ssh_auth_method = keypair
-
-# How to connect to the instance? fixed: using the first ip belongs
-# the fixed network floating: creating and using a floating ip.
-# (string value)
-#ssh_connect_method = floating
-
-# User name used to authenticate to an instance. (string value)
-#ssh_user = root
-
-# Timeout in seconds to wait for ping to succeed. (integer value)
-#ping_timeout = 120
-
-# The packet size for ping packets originating from remote linux hosts
-# (integer value)
-#ping_size = 56
-
-# The number of ping packets originating from remote linux hosts
-# (integer value)
-#ping_count = 1
-
-# Additional wait time for clean state, when there is no OS-EXT-STS
-# extension available (integer value)
-#ready_wait = 0
-
-# Name of the fixed network that is visible to all test tenants. If
-# multiple networks are available for a tenant this is the network
-# which will be used for creating servers if tempest does not create a
-# network or a network is not specified elsewhere. It may be used for
-# ssh validation only if floating IPs are disabled. (string value)
-#fixed_network_name = <None>
-
-# Network used for SSH connections. Ignored if
-# use_floatingip_for_ssh=true or run_validation=false. (string value)
-#network_for_ssh = public
-
-# Does SSH use Floating IPs? (boolean value)
-#use_floatingip_for_ssh = true
-
-# Catalog type of the Compute service. (string value)
-#catalog_type = compute
-
-# The compute region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found in the
-# service catalog, the first found one is used. (string value)
-#region =
-
-# The endpoint type to use for the compute service. (string value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# Expected device name when a volume is attached to an instance
-# (string value)
-#volume_device_name = vdb
-
-# Time in seconds before a shelved instance is eligible for removing
-# from a host.  -1 never offload, 0 offload when shelved. This time
-# should be the same as the time of nova.conf, and some tests will run
-# for as long as the time. (integer value)
-#shelved_offload_time = 0
-
-# Unallocated floating IP range, which will be used to test the
-# floating IP bulk feature for CRUD operation. This block must not
-# overlap an existing floating IP pool. (string value)
-#floating_ip_range = 10.0.0.0/29
-
-
-[compute-feature-enabled]
-
-#
-# From tempest.config
-#
-
-# If false, skip disk config tests (boolean value)
-#disk_config = true
-
-# A list of enabled compute extensions with a special entry all which
-# indicates every extension is enabled. Each extension should be
-# specified with alias name. Empty list indicates all extensions are
-# disabled (list value)
-#api_extensions = all
-
-# Does the test environment support changing the admin password?
-# (boolean value)
-#change_password = false
-
-# Does the test environment support obtaining instance serial console
-# output? (boolean value)
-#console_output = true
-
-# Does the test environment support resizing? (boolean value)
-#resize = false
-
-# Does the test environment support pausing? (boolean value)
-#pause = true
-
-# Does the test environment support shelving/unshelving? (boolean
-# value)
-#shelve = true
-
-# Does the test environment support suspend/resume? (boolean value)
-#suspend = true
-
-# Does the test environment support live migration available? (boolean
-# value)
-#live_migration = true
-
-# Does the test environment use block devices for live migration
-# (boolean value)
-#block_migration_for_live_migration = false
-
-# Does the test environment block migration support cinder iSCSI
-# volumes. Note, libvirt doesn't support this, see
-# https://bugs.launchpad.net/nova/+bug/1398999 (boolean value)
-#block_migrate_cinder_iscsi = false
-
-# Does the test system allow live-migration of paused instances? Note,
-# this is more than just the ANDing of paused and live_migrate, but
-# all 3 should be set to True to run those tests (boolean value)
-#live_migrate_paused_instances = false
-
-# Enable VNC console. This configuration value should be same as
-# [nova.vnc]->vnc_enabled in nova.conf (boolean value)
-#vnc_console = false
-
-# Enable Spice console. This configuration value should be same as
-# [nova.spice]->enabled in nova.conf (boolean value)
-#spice_console = false
-
-# Enable RDP console. This configuration value should be same as
-# [nova.rdp]->enabled in nova.conf (boolean value)
-#rdp_console = false
-
-# Does the test environment support instance rescue mode? (boolean
-# value)
-#rescue = true
-
-# Enables returning of the instance password by the relevant server
-# API calls such as create, rebuild or rescue. (boolean value)
-#enable_instance_password = true
-
-# Does the test environment support dynamic network interface
-# attachment? (boolean value)
-#interface_attach = true
-
-# Does the test environment support creating snapshot images of
-# running instances? (boolean value)
-#snapshot = true
-
-# Does the test environment have the ec2 api running? (boolean value)
-#ec2_api = true
-
-# Does Nova preserve preexisting ports from Neutron when deleting an
-# instance? This should be set to True if testing Kilo+ Nova. (boolean
-# value)
-#preserve_ports = false
-
-# Does the test environment support attaching an encrypted volume to a
-# running server instance? This may depend on the combination of
-# compute_driver in nova and the volume_driver(s) in cinder. (boolean
-# value)
-#attach_encrypted_volume = true
-
-
-[dashboard]
-
-#
-# From tempest.config
-#
-
-# Where the dashboard can be found (string value)
-#dashboard_url = http://localhost/
-
-# Login page for the dashboard (string value)
-#login_url = http://localhost/auth/login/
-
-
-[data_processing]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the data processing service. (string value)
-#catalog_type = data_processing
-
-# The endpoint type to use for the data processing service. (string
-# value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-
-[data_processing-feature-enabled]
-
-#
-# From tempest.config
-#
-
-# List of enabled data processing plugins (list value)
-#plugins = vanilla,hdp
-
-
-[database]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the Database service. (string value)
-#catalog_type = database
-
-# Valid primary flavor to use in database tests. (string value)
-#db_flavor_ref = 1
-
-# Current database version to use in database tests. (string value)
-#db_current_version = v1.0
-
-
-[debug]
-
-#
-# From tempest.config
-#
-
-# A regex to determine which requests should be traced.  This is a
-# regex to match the caller for rest client requests to be able to
-# selectively trace calls out of specific classes and methods. It
-# largely exists for test development, and is not expected to be used
-# in a real deploy of tempest. This will be matched against the
-# discovered ClassName:method in the test environment.  Expected
-# values for this field are:   * ClassName:test_method_name - traces
-# one test_method  * ClassName:setUp(Class) - traces specific setup
-# functions  * ClassName:tearDown(Class) - traces specific teardown
-# functions  * ClassName:_run_cleanups - traces the cleanup functions
-# If nothing is specified, this feature is not enabled. To trace
-# everything specify .* as the regex.  (string value)
-#trace_requests =
-
-
-[identity]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the Identity service. (string value)
-#catalog_type = identity
-
-# Set to True if using self-signed SSL certificates. (boolean value)
-#disable_ssl_certificate_validation = false
-
-# Specify a CA bundle file to use in verifying a TLS (https) server
-# certificate. (string value)
-#ca_certificates_file = <None>
-
-# Full URI of the OpenStack Identity API (Keystone), v2 (string value)
-#uri = <None>
-
-# Full URI of the OpenStack Identity API (Keystone), v3 (string value)
-#uri_v3 = <None>
-
-# Identity API version to be used for authentication for API tests.
-# (string value)
-#auth_version = v2
-
-# The identity region name to use. Also used as the other services'
-# region name unless they are set explicitly. If no such region is
-# found in the service catalog, the first found one is used. (string
-# value)
-#region = RegionOne
-
-# The endpoint type to use for the identity service. (string value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# Username to use for Nova API requests. (string value)
-#username = <None>
-
-# Tenant name to use for Nova API requests. (string value)
-#tenant_name = <None>
-
-# Role required to administrate keystone. (string value)
-#admin_role = admin
-
-# API key to use when authenticating. (string value)
-#password = <None>
-
-# Domain name for authentication (Keystone V3).The same domain applies
-# to user and project (string value)
-#domain_name = <None>
-
-# Username of alternate user to use for Nova API requests. (string
-# value)
-#alt_username = <None>
-
-# Alternate user's Tenant name to use for Nova API requests. (string
-# value)
-#alt_tenant_name = <None>
-
-# API key to use when authenticating as alternate user. (string value)
-#alt_password = <None>
-
-# Alternate domain name for authentication (Keystone V3).The same
-# domain applies to user and project (string value)
-#alt_domain_name = <None>
-
-# Administrative Username to use for Keystone API requests. (string
-# value)
-#admin_username = <None>
-
-# Administrative Tenant name to use for Keystone API requests. (string
-# value)
-#admin_tenant_name = <None>
-
-# API key to use when authenticating as admin. (string value)
-#admin_password = <None>
-
-# Admin domain name for authentication (Keystone V3).The same domain
-# applies to user and project (string value)
-#admin_domain_name = <None>
-
-# ID of the default domain (string value)
-#default_domain_id = default
-
-
-[identity-feature-enabled]
-
-#
-# From tempest.config
-#
-
-# Does the identity service have delegation and impersonation enabled
-# (boolean value)
-#trust = true
-
-# Is the v2 identity API enabled (boolean value)
-#api_v2 = true
-
-# Is the v3 identity API enabled (boolean value)
-#api_v3 = true
-
-
-[image]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the Image service. (string value)
-#catalog_type = image
-
-# The image region name to use. If empty, the value of identity.region
-# is used instead. If no such region is found in the service catalog,
-# the first found one is used. (string value)
-#region =
-
-# The endpoint type to use for the image service. (string value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# http accessible image (string value)
-#http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz
-
-# Timeout in seconds to wait for an image to become available.
-# (integer value)
-#build_timeout = 300
-
-# Time in seconds between image operation status checks. (integer
-# value)
-#build_interval = 1
-
-
-[image-feature-enabled]
-
-#
-# From tempest.config
-#
-
-# Is the v2 image API enabled (boolean value)
-#api_v2 = true
-
-# Is the v1 image API enabled (boolean value)
-#api_v1 = true
-
-# Is the deactivate-image feature enabled. The feature has been
-# integrated since Kilo. (boolean value)
-#deactivate_image = false
-
-
-[input-scenario]
-
-#
-# From tempest.config
-#
-
-# Matching images become parameters for scenario tests (string value)
-#image_regex = ^cirros-0.3.1-x86_64-uec$
-
-# Matching flavors become parameters for scenario tests (string value)
-#flavor_regex = ^m1.nano$
-
-# SSH verification in tests is skippedfor matching images (string
-# value)
-#non_ssh_image_regex = ^.*[Ww]in.*$
-
-# List of user mapped to regex to matching image names. (string value)
-#ssh_user_regex = [["^.*[Cc]irros.*$", "cirros"]]
-
-
-[messaging]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the Messaging service. (string value)
-#catalog_type = messaging
-
-# The maximum number of queue records per page when listing queues
-# (integer value)
-#max_queues_per_page = 20
-
-# The maximum metadata size for a queue (integer value)
-#max_queue_metadata = 65536
-
-# The maximum number of queue message per page when listing (or)
-# posting messages (integer value)
-#max_messages_per_page = 20
-
-# The maximum size of a message body (integer value)
-#max_message_size = 262144
-
-# The maximum number of messages per claim (integer value)
-#max_messages_per_claim = 20
-
-# The maximum ttl for a message (integer value)
-#max_message_ttl = 1209600
-
-# The maximum ttl for a claim (integer value)
-#max_claim_ttl = 43200
-
-# The maximum grace period for a claim (integer value)
-#max_claim_grace = 43200
-
-
-[negative]
-
-#
-# From tempest.config
-#
-
-# Test generator class for all negative tests (string value)
-#test_generator = tempest.common.generator.negative_generator.NegativeTestGenerator
-
-
-[network]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the Neutron service. (string value)
-#catalog_type = network
-
-# The network region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found in the
-# service catalog, the first found one is used. (string value)
-#region =
-
-# The endpoint type to use for the network service. (string value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# The cidr block to allocate tenant ipv4 subnets from (string value)
-#tenant_network_cidr = 10.100.0.0/16
-
-# The mask bits for tenant ipv4 subnets (integer value)
-#tenant_network_mask_bits = 28
-
-# The cidr block to allocate tenant ipv6 subnets from (string value)
-#tenant_network_v6_cidr = 2003::/48
-
-# The mask bits for tenant ipv6 subnets (integer value)
-#tenant_network_v6_mask_bits = 64
-
-# Whether tenant networks can be reached directly from the test
-# client. This must be set to True when the 'fixed' ssh_connect_method
-# is selected. (boolean value)
-#tenant_networks_reachable = false
-
-# Id of the public network that provides external connectivity (string
-# value)
-#public_network_id =
-
-# Default floating network name. Used to allocate floating IPs when
-# neutron is enabled. (string value)
-#floating_network_name = <None>
-
-# Id of the public router that provides external connectivity. This
-# should only be used when Neutron's 'allow_overlapping_ips' is set to
-# 'False' in neutron.conf. usually not needed past 'Grizzly' release
-# (string value)
-#public_router_id =
-
-# Timeout in seconds to wait for network operation to complete.
-# (integer value)
-#build_timeout = 300
-
-# Time in seconds between network operation status checks. (integer
-# value)
-#build_interval = 1
-
-# List of dns servers which should be used for subnet creation (list
-# value)
-#dns_servers = 8.8.8.8,8.8.4.4
-
-# vnic_type to use when Launching instances with pre-configured ports.
-# Supported ports are: ['normal','direct','macvtap'] (string value)
-# Allowed values: <None>, normal, direct, macvtap
-#port_vnic_type = <None>
-
-
-[network-feature-enabled]
-
-#
-# From tempest.config
-#
-
-# Allow the execution of IPv6 tests (boolean value)
-#ipv6 = true
-
-# A list of enabled network extensions with a special entry all which
-# indicates every extension is enabled. Empty list indicates all
-# extensions are disabled. To get the list of extensions run: 'neutron
-# ext-list' (list value)
-#api_extensions = all
-
-# Allow the execution of IPv6 subnet tests that use the extended IPv6
-# attributes ipv6_ra_mode and ipv6_address_mode (boolean value)
-#ipv6_subnet_attributes = false
-
-# Does the test environment support changing port admin state (boolean
-# value)
-#port_admin_state_change = true
-
-
-[object-storage]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the Object-Storage service. (string value)
-#catalog_type = object-store
-
-# The object-storage region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found in the
-# service catalog, the first found one is used. (string value)
-#region =
-
-# The endpoint type to use for the object-store service. (string
-# value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# Number of seconds to time on waiting for a container to container
-# synchronization complete. (integer value)
-#container_sync_timeout = 600
-
-# Number of seconds to wait while looping to check the status of a
-# container to container synchronization (integer value)
-#container_sync_interval = 5
-
-# Role to add to users created for swift tests to enable creating
-# containers (string value)
-#operator_role = Member
-
-# User role that has reseller admin (string value)
-#reseller_admin_role = ResellerAdmin
-
-# Name of sync realm. A sync realm is a set of clusters that have
-# agreed to allow container syncing with each other. Set the same
-# realm name as Swift's container-sync-realms.conf (string value)
-#realm_name = realm1
-
-# One name of cluster which is set in the realm whose name is set in
-# 'realm_name' item in this file. Set the same cluster name as Swift's
-# container-sync-realms.conf (string value)
-#cluster_name = name1
-
-
-[object-storage-feature-enabled]
-
-#
-# From tempest.config
-#
-
-# A list of the enabled optional discoverable apis. A single entry,
-# all, indicates that all of these features are expected to be enabled
-# (list value)
-#discoverable_apis = all
-
-# Execute (old style) container-sync tests (boolean value)
-#container_sync = true
-
-# Execute object-versioning tests (boolean value)
-#object_versioning = true
-
-# Execute discoverability tests (boolean value)
-#discoverability = true
-
-
-[orchestration]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the Orchestration service. (string value)
-#catalog_type = orchestration
-
-# The orchestration region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found in the
-# service catalog, the first found one is used. (string value)
-#region =
-
-# The endpoint type to use for the orchestration service. (string
-# value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# Role required for users to be able to manage stacks (string value)
-#stack_owner_role = heat_stack_owner
-
-# Time in seconds between build status checks. (integer value)
-#build_interval = 1
-
-# Timeout in seconds to wait for a stack to build. (integer value)
-#build_timeout = 1200
-
-# Instance type for tests. Needs to be big enough for a full OS plus
-# the test workload (string value)
-#instance_type = m1.micro
-
-# Name of existing keypair to launch servers with. (string value)
-#keypair_name = <None>
-
-# Value must match heat configuration of the same name. (integer
-# value)
-#max_template_size = 524288
-
-# Value must match heat configuration of the same name. (integer
-# value)
-#max_resources_per_stack = 1000
-
-
-[oslo_concurrency]
-
-#
-# From oslo.concurrency
-#
-
-# Enables or disables inter-process locks. (boolean value)
-# Deprecated group/name - [DEFAULT]/disable_process_locking
-#disable_process_locking = false
-
-# Directory to use for lock files.  For security, the specified
-# directory should only be writable by the user running the processes
-# that need locking. Defaults to environment variable OSLO_LOCK_PATH.
-# If external locks are used, a lock path must be set. (string value)
-# Deprecated group/name - [DEFAULT]/lock_path
-#lock_path = <None>
-
-
-[scenario]
-
-#
-# From tempest.config
-#
-
-# Directory containing image files (string value)
-#img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
-
-# Image file name (string value)
-# Deprecated group/name - [DEFAULT]/qcow2_img_file
-#img_file = cirros-0.3.1-x86_64-disk.img
-
-# Image disk format (string value)
-#img_disk_format = qcow2
-
-# Image container format (string value)
-#img_container_format = bare
-
-# Glance image properties. Use for custom images which require them
-# (dict value)
-#img_properties = <None>
-
-# AMI image file name (string value)
-#ami_img_file = cirros-0.3.1-x86_64-blank.img
-
-# ARI image file name (string value)
-#ari_img_file = cirros-0.3.1-x86_64-initrd
-
-# AKI image file name (string value)
-#aki_img_file = cirros-0.3.1-x86_64-vmlinuz
-
-# ssh username for the image file (string value)
-#ssh_user = cirros
-
-# specifies how many resources to request at once. Used for large
-# operations testing. (integer value)
-#large_ops_number = 0
-
-# DHCP client used by images to renew DCHP lease. If left empty,
-# update operation will be skipped. Supported clients: "udhcpc",
-# "dhclient" (string value)
-# Allowed values: udhcpc, dhclient
-#dhcp_client = udhcpc
-
-
-[service_available]
-
-#
-# From tempest.config
-#
-
-# Whether or not cinder is expected to be available (boolean value)
-#cinder = true
-
-# Whether or not neutron is expected to be available (boolean value)
-#neutron = false
-
-# Whether or not glance is expected to be available (boolean value)
-#glance = true
-
-# Whether or not swift is expected to be available (boolean value)
-#swift = true
-
-# Whether or not nova is expected to be available (boolean value)
-#nova = true
-
-# Whether or not Heat is expected to be available (boolean value)
-#heat = false
-
-# Whether or not Ceilometer is expected to be available (boolean
-# value)
-#ceilometer = true
-
-# Whether or not Horizon is expected to be available (boolean value)
-#horizon = true
-
-# Whether or not Sahara is expected to be available (boolean value)
-#sahara = false
-
-# Whether or not Ironic is expected to be available (boolean value)
-#ironic = false
-
-# Whether or not Trove is expected to be available (boolean value)
-#trove = false
-
-# Whether or not Zaqar is expected to be available (boolean value)
-#zaqar = false
-
-
-[stress]
-
-#
-# From tempest.config
-#
-
-# Directory containing log files on the compute nodes (string value)
-#nova_logdir = <None>
-
-# Maximum number of instances to create during test. (integer value)
-#max_instances = 16
-
-# Controller host. (string value)
-#controller = <None>
-
-# Controller host. (string value)
-#target_controller = <None>
-
-# ssh user. (string value)
-#target_ssh_user = <None>
-
-# Path to private key. (string value)
-#target_private_key_path = <None>
-
-# regexp for list of log files. (string value)
-#target_logfiles = <None>
-
-# time (in seconds) between log file error checks. (integer value)
-#log_check_interval = 60
-
-# The number of threads created while stress test. (integer value)
-#default_thread_number_per_action = 4
-
-# Prevent the cleaning (tearDownClass()) between each stress test run
-# if an exception occurs during this run. (boolean value)
-#leave_dirty_stack = false
-
-# Allows a full cleaning process after a stress test. Caution : this
-# cleanup will remove every objects of every tenant. (boolean value)
-#full_clean_stack = false
-
-
-[telemetry]
-
-#
-# From tempest.config
-#
-
-# Catalog type of the Telemetry service. (string value)
-#catalog_type = metering
-
-# The endpoint type to use for the telemetry service. (string value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# This variable is used as flag to enable notification tests (boolean
-# value)
-#too_slow_to_test = true
-
-
-[validation]
-
-#
-# From tempest.config
-#
-
-# Enable ssh on created servers and creation of additional validation
-# resources to enable remote access (boolean value)
-# Deprecated group/name - [compute]/run_ssh
-#run_validation = false
-
-# Default IP type used for validation: -fixed: uses the first IP
-# belonging to the fixed network -floating: creates and uses a
-# floating IP (string value)
-# Allowed values: fixed, floating
-#connect_method = floating
-
-# Default authentication method to the instance. Only ssh via keypair
-# is supported for now. Additional methods will be handled in a
-# separate spec. (string value)
-# Allowed values: keypair
-#auth_method = keypair
-
-# Default IP version for ssh connections. (integer value)
-# Deprecated group/name - [compute]/ip_version_for_ssh
-#ip_version_for_ssh = 4
-
-# Timeout in seconds to wait for ping to succeed. (integer value)
-#ping_timeout = 120
-
-# Timeout in seconds to wait for the TCP connection to be successful.
-# (integer value)
-# Deprecated group/name - [compute]/ssh_channel_timeout
-#connect_timeout = 60
-
-# Timeout in seconds to wait for the ssh banner. (integer value)
-# Deprecated group/name - [compute]/ssh_timeout
-#ssh_timeout = 300
-
-
-[volume]
-
-#
-# From tempest.config
-#
-
-# Time in seconds between volume availability checks. (integer value)
-#build_interval = 1
-
-# Timeout in seconds to wait for a volume to become available.
-# (integer value)
-#build_timeout = 300
-
-# Catalog type of the Volume Service (string value)
-#catalog_type = volume
-
-# The volume region name to use. If empty, the value of
-# identity.region is used instead. If no such region is found in the
-# service catalog, the first found one is used. (string value)
-#region =
-
-# The endpoint type to use for the volume service. (string value)
-# Allowed values: public, admin, internal, publicURL, adminURL, internalURL
-#endpoint_type = publicURL
-
-# Name of the backend1 (must be declared in cinder.conf) (string
-# value)
-#backend1_name = BACKEND_1
-
-# Name of the backend2 (must be declared in cinder.conf) (string
-# value)
-#backend2_name = BACKEND_2
-
-# Backend protocol to target when creating volume types (string value)
-#storage_protocol = iSCSI
-
-# Backend vendor to target when creating volume types (string value)
-#vendor_name = Open Source
-
-# Disk format to use when copying a volume to image (string value)
-#disk_format = raw
-
-# Default size in GB for volumes created by volumes tests (integer
-# value)
-#volume_size = 1
-
-
-[volume-feature-enabled]
-
-#
-# From tempest.config
-#
-
-# Runs Cinder multi-backend test (requires 2 backends) (boolean value)
-#multi_backend = false
-
-# Runs Cinder volumes backup test (boolean value)
-#backup = true
-
-# Runs Cinder volume snapshot test (boolean value)
-#snapshot = true
-
-# A list of enabled volume extensions with a special entry all which
-# indicates every extension is enabled. Empty list indicates all
-# extensions are disabled (list value)
-#api_extensions = all
-
-# Is the v1 volume API enabled (boolean value)
-#api_v1 = true
-
-# Is the v2 volume API enabled (boolean value)
-#api_v2 = true
-
-# Update bootable status of a volume Not implemented on icehouse
-# (boolean value)
-#bootable = false
diff --git a/openstack-common.conf b/openstack-common.conf
index 16ba6a7..acb1437 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -2,7 +2,6 @@
 
 # The list of modules to copy from openstack-common
 module=install_venv_common
-module=versionutils
 module=with_venv
 module=install_venv
 
diff --git a/requirements.txt b/requirements.txt
index d0419f7..cc2a187 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,8 @@
 # The order of packages is significant, because pip processes them in the order
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
-pbr<2.0,>=1.3
-cliff>=1.13.0 # Apache-2.0
+pbr<2.0,>=1.4
+cliff>=1.14.0 # Apache-2.0
 anyjson>=0.3.3
 httplib2>=0.7.5
 jsonschema!=2.5.0,<3.0.0,>=2.0.0
@@ -12,12 +12,12 @@
 netaddr>=0.7.12
 testrepository>=0.0.18
 pyOpenSSL>=0.14
-oslo.concurrency>=2.1.0 # Apache-2.0
-oslo.config>=1.11.0 # Apache-2.0
+oslo.concurrency>=2.3.0 # Apache-2.0
+oslo.config>=2.1.0 # Apache-2.0
 oslo.i18n>=1.5.0 # Apache-2.0
-oslo.log>=1.6.0 # Apache-2.0
+oslo.log>=1.8.0 # Apache-2.0
 oslo.serialization>=1.4.0 # Apache-2.0
-oslo.utils>=1.9.0 # Apache-2.0
+oslo.utils>=2.0.0 # Apache-2.0
 six>=1.9.0
 iso8601>=0.1.9
 fixtures>=1.3.1
diff --git a/setup.cfg b/setup.cfg
index 5c78632..ab40f12 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -18,6 +18,12 @@
     Programming Language :: Python :: 3
     Programming Language :: Python :: 3.4
 
+[files]
+packages =
+    tempest
+data_files =
+    etc/tempest = etc/*
+
 [entry_points]
 console_scripts =
     verify-tempest-config = tempest.cmd.verify_tempest_config:main
@@ -26,7 +32,8 @@
     tempest-cleanup = tempest.cmd.cleanup:main
     tempest-account-generator = tempest.cmd.account_generator:main
     tempest = tempest.cmd.main:main
-
+tempest.cm =
+    init = tempest.cmd.init:TempestInit
 oslo.config.opts =
     tempest.config = tempest.config:list_opts
 
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index 4830dcd..b6dee18 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -20,7 +20,7 @@
 
 
 class TestNodes(base.BaseBaremetalTest):
-    '''Tests for baremetal nodes.'''
+    """Tests for baremetal nodes."""
 
     def setUp(self):
         super(TestNodes, self).setUp()
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index d9a1ee5..38f5fb7 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -38,7 +38,7 @@
             hypervisor='common', os='linux', architecture='x86_64',
             version='7.0', url='xxx://xxxx/xxx/xxx',
             md5hash='add6bb58e139be103324d04d82d8f545')
-        body = self.client.create_agent(**params)
+        body = self.client.create_agent(**params)['agent']
         self.agent_id = body['agent_id']
 
     def tearDown(self):
@@ -67,7 +67,7 @@
             hypervisor='kvm', os='win', architecture='x86',
             version='7.0', url='xxx://xxxx/xxx/xxx',
             md5hash='add6bb58e139be103324d04d82d8f545')
-        body = self.client.create_agent(**params)
+        body = self.client.create_agent(**params)['agent']
         self.addCleanup(self.client.delete_agent, body['agent_id'])
         for expected_item, value in params.items():
             self.assertEqual(value, body[expected_item])
@@ -78,7 +78,7 @@
         params = self._param_helper(
             version='8.0', url='xxx://xxxx/xxx/xxx2',
             md5hash='add6bb58e139be103324d04d82d8f547')
-        body = self.client.update_agent(self.agent_id, **params)
+        body = self.client.update_agent(self.agent_id, **params)['agent']
         for expected_item, value in params.items():
             self.assertEqual(value, body[expected_item])
 
@@ -88,13 +88,13 @@
         self.client.delete_agent(self.agent_id)
 
         # Verify the list doesn't contain the deleted agent.
-        agents = self.client.list_agents()
+        agents = self.client.list_agents()['agents']
         self.assertNotIn(self.agent_id, map(lambda x: x['agent_id'], agents))
 
     @test.idempotent_id('6a326c69-654b-438a-80a3-34bcc454e138')
     def test_list_agents(self):
         # List all agents.
-        agents = self.client.list_agents()
+        agents = self.client.list_agents()['agents']
         self.assertTrue(len(agents) > 0, 'Cannot get any agents.(%s)' % agents)
         self.assertIn(self.agent_id, map(lambda x: x['agent_id'], agents))
 
@@ -105,11 +105,12 @@
             hypervisor='xen', os='linux', architecture='x86',
             version='7.0', url='xxx://xxxx/xxx/xxx1',
             md5hash='add6bb58e139be103324d04d82d8f546')
-        agent_xen = self.client.create_agent(**params)
+        agent_xen = self.client.create_agent(**params)['agent']
         self.addCleanup(self.client.delete_agent, agent_xen['agent_id'])
 
         agent_id_xen = agent_xen['agent_id']
-        agents = self.client.list_agents(hypervisor=agent_xen['hypervisor'])
+        agents = (self.client.list_agents(hypervisor=agent_xen['hypervisor'])
+                  ['agents'])
         self.assertTrue(len(agents) > 0, 'Cannot get any agents.(%s)' % agents)
         self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
         self.assertNotIn(self.agent_id, map(lambda x: x['agent_id'], agents))
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 91e55d6..3553ce7 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -40,7 +40,7 @@
         cls.aggregate_name_prefix = 'test_aggregate'
         cls.az_name_prefix = 'test_az'
 
-        hosts_all = cls.os_adm.hosts_client.list_hosts()
+        hosts_all = cls.os_adm.hosts_client.list_hosts()['hosts']
         hosts = map(lambda x: x['host_name'],
                     filter(lambda y: y['service'] == 'compute', hosts_all))
         cls.host = hosts[0]
@@ -57,7 +57,8 @@
     def test_aggregate_create_delete(self):
         # Create and delete an aggregate.
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self._try_delete_aggregate, aggregate['id'])
         self.assertEqual(aggregate_name, aggregate['name'])
         self.assertIsNone(aggregate['availability_zone'])
@@ -71,7 +72,7 @@
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         az_name = data_utils.rand_name(self.az_name_prefix)
         aggregate = self.client.create_aggregate(
-            name=aggregate_name, availability_zone=az_name)
+            name=aggregate_name, availability_zone=az_name)['aggregate']
         self.addCleanup(self._try_delete_aggregate, aggregate['id'])
         self.assertEqual(aggregate_name, aggregate['name'])
         self.assertEqual(az_name, aggregate['availability_zone'])
@@ -83,10 +84,11 @@
     def test_aggregate_create_verify_entry_in_list(self):
         # Create an aggregate and ensure it is listed.
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
-        aggregates = self.client.list_aggregates()
+        aggregates = self.client.list_aggregates()['aggregates']
         self.assertIn((aggregate['id'], aggregate['availability_zone']),
                       map(lambda x: (x['id'], x['availability_zone']),
                           aggregates))
@@ -95,10 +97,11 @@
     def test_aggregate_create_update_metadata_get_details(self):
         # Create an aggregate and ensure its details are returned.
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
-        body = self.client.show_aggregate(aggregate['id'])
+        body = self.client.show_aggregate(aggregate['id'])['aggregate']
         self.assertEqual(aggregate['name'], body['name'])
         self.assertEqual(aggregate['availability_zone'],
                          body['availability_zone'])
@@ -106,11 +109,11 @@
 
         # set the metadata of the aggregate
         meta = {"key": "value"}
-        body = self.client.set_metadata(aggregate['id'], meta)
-        self.assertEqual(meta, body["metadata"])
+        body = self.client.set_metadata(aggregate['id'], metadata=meta)
+        self.assertEqual(meta, body['aggregate']["metadata"])
 
         # verify the metadata has been set
-        body = self.client.show_aggregate(aggregate['id'])
+        body = self.client.show_aggregate(aggregate['id'])['aggregate']
         self.assertEqual(meta, body["metadata"])
 
     @test.idempotent_id('4d2b2004-40fa-40a1-aab2-66f4dab81beb')
@@ -119,7 +122,7 @@
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         az_name = data_utils.rand_name(self.az_name_prefix)
         aggregate = self.client.create_aggregate(
-            name=aggregate_name, availability_zone=az_name)
+            name=aggregate_name, availability_zone=az_name)['aggregate']
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
         self.assertEqual(aggregate_name, aggregate['name'])
@@ -130,13 +133,14 @@
         new_aggregate_name = aggregate_name + '_new'
         new_az_name = az_name + '_new'
 
-        resp_aggregate = self.client.update_aggregate(aggregate_id,
-                                                      new_aggregate_name,
-                                                      new_az_name)
+        resp_aggregate = self.client.update_aggregate(
+            aggregate_id,
+            name=new_aggregate_name,
+            availability_zone=new_az_name)['aggregate']
         self.assertEqual(new_aggregate_name, resp_aggregate['name'])
         self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
 
-        aggregates = self.client.list_aggregates()
+        aggregates = self.client.list_aggregates()['aggregates']
         self.assertIn((aggregate_id, new_aggregate_name, new_az_name),
                       map(lambda x:
                           (x['id'], x['name'], x['availability_zone']),
@@ -147,16 +151,19 @@
         # Add an host to the given aggregate and remove.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
-        body = self.client.add_host(aggregate['id'], self.host)
+        body = (self.client.add_host(aggregate['id'], host=self.host)
+                ['aggregate'])
         self.assertEqual(aggregate_name, body['name'])
         self.assertEqual(aggregate['availability_zone'],
                          body['availability_zone'])
         self.assertIn(self.host, body['hosts'])
 
-        body = self.client.remove_host(aggregate['id'], self.host)
+        body = (self.client.remove_host(aggregate['id'], host=self.host)
+                ['aggregate'])
         self.assertEqual(aggregate_name, body['name'])
         self.assertEqual(aggregate['availability_zone'],
                          body['availability_zone'])
@@ -167,12 +174,14 @@
         # Add an host to the given aggregate and list.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
 
-        aggregates = self.client.list_aggregates()
+        aggregates = self.client.list_aggregates()['aggregates']
         aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
         self.assertEqual(1, len(aggs))
         agg = aggs[0]
@@ -185,12 +194,14 @@
         # Add an host to the given aggregate and get details.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
 
-        body = self.client.show_aggregate(aggregate['id'])
+        body = self.client.show_aggregate(aggregate['id'])['aggregate']
         self.assertEqual(aggregate_name, body['name'])
         self.assertIsNone(body['availability_zone'])
         self.assertIn(self.host, body['hosts'])
@@ -202,10 +213,11 @@
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         az_name = data_utils.rand_name(self.az_name_prefix)
         aggregate = self.client.create_aggregate(
-            name=aggregate_name, availability_zone=az_name)
+            name=aggregate_name, availability_zone=az_name)['aggregate']
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
         server_name = data_utils.rand_name('test_server')
         admin_servers_client = self.os_adm.servers_client
         server = self.create_test_server(name=server_name,
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index 74a8547..bc1a854 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -39,7 +39,7 @@
         cls.aggregate_name_prefix = 'test_aggregate'
         cls.az_name_prefix = 'test_az'
 
-        hosts_all = cls.os_adm.hosts_client.list_hosts()
+        hosts_all = cls.os_adm.hosts_client.list_hosts()['hosts']
         hosts = map(lambda x: x['host_name'],
                     filter(lambda y: y['service'] == 'compute', hosts_all))
         cls.host = hosts[0]
@@ -76,7 +76,8 @@
         # creating an aggregate with existent aggregate name is forbidden
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = self.client.create_aggregate(name=aggregate_name)
-        self.addCleanup(self.client.delete_aggregate, aggregate['id'])
+        self.addCleanup(self.client.delete_aggregate,
+                        aggregate['aggregate']['id'])
 
         self.assertRaises(lib_exc.Conflict,
                           self.client.create_aggregate,
@@ -87,7 +88,8 @@
     def test_aggregate_delete_as_user(self):
         # Regular user is not allowed to delete an aggregate.
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
         self.assertRaises(lib_exc.Forbidden,
@@ -106,7 +108,8 @@
     def test_aggregate_get_details_as_user(self):
         # Regular user is not allowed to get aggregate details.
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
         self.assertRaises(lib_exc.Forbidden,
@@ -131,7 +134,7 @@
     @test.idempotent_id('0ef07828-12b4-45ba-87cc-41425faf5711')
     def test_aggregate_add_non_exist_host(self):
         # Adding a non-exist host to an aggregate should raise exceptions.
-        hosts_all = self.os_adm.hosts_client.list_hosts()
+        hosts_all = self.os_adm.hosts_client.list_hosts()['hosts']
         hosts = map(lambda x: x['host_name'], hosts_all)
         while True:
             non_exist_host = data_utils.rand_name('nonexist_host')
@@ -139,37 +142,41 @@
                 break
 
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
         self.assertRaises(lib_exc.NotFound, self.client.add_host,
-                          aggregate['id'], non_exist_host)
+                          aggregate['id'], host=non_exist_host)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('7324c334-bd13-4c93-8521-5877322c3d51')
     def test_aggregate_add_host_as_user(self):
         # Regular user is not allowed to add a host to an aggregate.
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
         self.assertRaises(lib_exc.Forbidden,
                           self.user_client.add_host,
-                          aggregate['id'], self.host)
+                          aggregate['id'], host=self.host)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('19dd44e1-c435-4ee1-a402-88c4f90b5950')
     def test_aggregate_add_existent_host(self):
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
 
         self.assertRaises(lib_exc.Conflict, self.client.add_host,
-                          aggregate['id'], self.host)
+                          aggregate['id'], host=self.host)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('7a53af20-137a-4e44-a4ae-e19260e626d9')
@@ -177,22 +184,25 @@
         # Regular user is not allowed to remove a host from an aggregate.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
 
         self.assertRaises(lib_exc.Forbidden,
                           self.user_client.remove_host,
-                          aggregate['id'], self.host)
+                          aggregate['id'], host=self.host)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('95d6a6fa-8da9-4426-84d0-eec0329f2e4d')
     def test_aggregate_remove_nonexistent_host(self):
         non_exist_host = data_utils.rand_name('nonexist_host')
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
-        aggregate = self.client.create_aggregate(name=aggregate_name)
+        aggregate = (self.client.create_aggregate(name=aggregate_name)
+                     ['aggregate'])
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
         self.assertRaises(lib_exc.NotFound, self.client.remove_host,
-                          aggregate['id'], non_exist_host)
+                          aggregate['id'], host=non_exist_host)
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 22eae03..1b36ff2 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -32,10 +32,10 @@
     def test_get_availability_zone_list(self):
         # List of availability zone
         availability_zone = self.client.list_availability_zones()
-        self.assertTrue(len(availability_zone) > 0)
+        self.assertTrue(len(availability_zone['availabilityZoneInfo']) > 0)
 
     @test.idempotent_id('ef726c58-530f-44c2-968c-c7bed22d5b8c')
     def test_get_availability_zone_list_detail(self):
         # List of availability zones and available services
         availability_zone = self.client.list_availability_zones(detail=True)
-        self.assertTrue(len(availability_zone) > 0)
+        self.assertTrue(len(availability_zone['availabilityZoneInfo']) > 0)
diff --git a/tempest/api/compute/admin/test_baremetal_nodes.py b/tempest/api/compute/admin/test_baremetal_nodes.py
index 4d95f0a..2599d86 100644
--- a/tempest/api/compute/admin/test_baremetal_nodes.py
+++ b/tempest/api/compute/admin/test_baremetal_nodes.py
@@ -46,11 +46,11 @@
         # List all baremetal nodes and ensure our created test nodes are
         # listed
         bm_node_ids = set([n['id'] for n in
-                           self.client.list_baremetal_nodes()])
+                           self.client.list_baremetal_nodes()['nodes']])
         test_node_ids = set([n['uuid'] for n in test_nodes])
         self.assertTrue(test_node_ids.issubset(bm_node_ids))
 
         # Test getting each individually
         for node in test_nodes:
             baremetal_node = self.client.show_baremetal_node(node['uuid'])
-            self.assertEqual(node['uuid'], baremetal_node['id'])
+            self.assertEqual(node['uuid'], baremetal_node['node']['id'])
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index dc9a7ef..669585c 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -51,16 +51,14 @@
     @test.services('network')
     def test_list_fixed_ip_details(self):
         fixed_ip = self.client.show_fixed_ip(self.ip)
-        self.assertEqual(fixed_ip['address'], self.ip)
+        self.assertEqual(fixed_ip['fixed_ip']['address'], self.ip)
 
     @test.idempotent_id('5485077b-7e46-4cec-b402-91dc3173433b')
     @test.services('network')
     def test_set_reserve(self):
-        body = {"reserve": "None"}
-        self.client.reserve_fixed_ip(self.ip, body)
+        self.client.reserve_fixed_ip(self.ip, reserve="None")
 
     @test.idempotent_id('7476e322-b9ff-4710-bf82-49d51bac6e2e')
     @test.services('network')
     def test_set_unreserve(self):
-        body = {"unreserve": "None"}
-        self.client.reserve_fixed_ip(self.ip, body)
+        self.client.reserve_fixed_ip(self.ip, unreserve="None")
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index 6698638..e67936c 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -60,19 +60,17 @@
     @test.idempotent_id('ce60042c-fa60-4836-8d43-1c8e3359dc47')
     @test.services('network')
     def test_set_reserve_with_non_admin_user(self):
-        body = {"reserve": "None"}
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.reserve_fixed_ip,
-                          self.ip, body)
+                          self.ip, reserve="None")
 
     @test.attr(type=['negative'])
     @test.idempotent_id('f1f7a35b-0390-48c5-9803-5f27461439db')
     @test.services('network')
     def test_set_unreserve_with_non_admin_user(self):
-        body = {"unreserve": "None"}
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.reserve_fixed_ip,
-                          self.ip, body)
+                          self.ip, unreserve="None")
 
     @test.attr(type=['negative'])
     @test.idempotent_id('f51cf464-7fc5-4352-bc3e-e75cfa2cb717')
@@ -80,19 +78,17 @@
     def test_set_reserve_with_invalid_ip(self):
         # NOTE(maurosr): since this exercises the same code snippet, we do it
         # only for reserve action
-        body = {"reserve": "None"}
         # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
         # change the error code to BadRequest, both exceptions should be
         # accepted by tempest
         self.assertRaises((lib_exc.NotFound, lib_exc.BadRequest),
                           self.client.reserve_fixed_ip,
-                          "my.invalid.ip", body)
+                          "my.invalid.ip", reserve="None")
 
     @test.attr(type=['negative'])
     @test.idempotent_id('fd26ef50-f135-4232-9d32-281aab3f9176')
     @test.services('network')
     def test_fixed_ip_with_invalid_action(self):
-        body = {"invalid_action": "None"}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.reserve_fixed_ip,
-                          self.ip, body)
+                          self.ip, invalid_action="None")
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 364d080..42a2984 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -63,13 +63,13 @@
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
 
         # Create the flavor
-        flavor = self.client.create_flavor(flavor_name,
-                                           self.ram, self.vcpus,
-                                           self.disk,
-                                           flavor_id,
+        flavor = self.client.create_flavor(name=flavor_name,
+                                           ram=self.ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=flavor_id,
                                            ephemeral=self.ephemeral,
                                            swap=self.swap,
-                                           rxtx=self.rxtx)
+                                           rxtx_factor=self.rxtx)['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         self.assertEqual(flavor['name'], flavor_name)
         self.assertEqual(flavor['vcpus'], self.vcpus)
@@ -82,7 +82,7 @@
         self.assertEqual(flavor['os-flavor-access:is_public'], True)
 
         # Verify flavor is retrieved
-        flavor = self.client.show_flavor(flavor['id'])
+        flavor = self.client.show_flavor(flavor['id'])['flavor']
         self.assertEqual(flavor['name'], flavor_name)
 
         return flavor['id']
@@ -115,17 +115,17 @@
         new_flavor_id = data_utils.rand_int_id(start=1000)
 
         # Create the flavor
-        flavor = self.client.create_flavor(flavor_name,
-                                           self.ram, self.vcpus,
-                                           self.disk,
-                                           new_flavor_id,
+        flavor = self.client.create_flavor(name=flavor_name,
+                                           ram=self.ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=new_flavor_id,
                                            ephemeral=self.ephemeral,
                                            swap=self.swap,
-                                           rxtx=self.rxtx)
+                                           rxtx_factor=self.rxtx)['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         flag = False
         # Verify flavor is retrieved
-        flavors = self.client.list_flavors(detail=True)
+        flavors = self.client.list_flavors(detail=True)['flavors']
         for flavor in flavors:
             if flavor['name'] == flavor_name:
                 flag = True
@@ -147,10 +147,10 @@
         new_flavor_id = data_utils.rand_int_id(start=1000)
 
         # Create the flavor
-        flavor = self.client.create_flavor(flavor_name,
-                                           self.ram, self.vcpus,
-                                           self.disk,
-                                           new_flavor_id)
+        flavor = self.client.create_flavor(name=flavor_name,
+                                           ram=self.ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=new_flavor_id)['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         self.assertEqual(flavor['name'], flavor_name)
         self.assertEqual(flavor['ram'], self.ram)
@@ -160,12 +160,12 @@
         verify_flavor_response_extension(flavor)
 
         # Verify flavor is retrieved
-        flavor = self.client.show_flavor(new_flavor_id)
+        flavor = self.client.show_flavor(new_flavor_id)['flavor']
         self.assertEqual(flavor['name'], flavor_name)
         verify_flavor_response_extension(flavor)
 
         # Check if flavor is present in list
-        flavors = self.user_client.list_flavors(detail=True)
+        flavors = self.user_client.list_flavors(detail=True)['flavors']
         for flavor in flavors:
             if flavor['name'] == flavor_name:
                 verify_flavor_response_extension(flavor)
@@ -182,15 +182,15 @@
         new_flavor_id = data_utils.rand_int_id(start=1000)
 
         # Create the flavor
-        flavor = self.client.create_flavor(flavor_name,
-                                           self.ram, self.vcpus,
-                                           self.disk,
-                                           new_flavor_id,
-                                           is_public="False")
+        flavor = self.client.create_flavor(name=flavor_name,
+                                           ram=self.ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=new_flavor_id,
+                                           is_public="False")['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         # Verify flavor is retrieved
         flag = False
-        flavors = self.client.list_flavors(detail=True)
+        flavors = self.client.list_flavors(detail=True)['flavors']
         for flavor in flavors:
             if flavor['name'] == flavor_name:
                 flag = True
@@ -198,7 +198,7 @@
 
         # Verify flavor is not retrieved with other user
         flag = False
-        flavors = self.user_client.list_flavors(detail=True)
+        flavors = self.user_client.list_flavors(detail=True)['flavors']
         for flavor in flavors:
             if flavor['name'] == flavor_name:
                 flag = True
@@ -211,11 +211,11 @@
         new_flavor_id = data_utils.rand_int_id(start=1000)
 
         # Create the flavor
-        flavor = self.client.create_flavor(flavor_name,
-                                           self.ram, self.vcpus,
-                                           self.disk,
-                                           new_flavor_id,
-                                           is_public="False")
+        flavor = self.client.create_flavor(name=flavor_name,
+                                           ram=self.ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=new_flavor_id,
+                                           is_public="False")['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
 
         # Verify flavor is not used by other user
@@ -231,16 +231,16 @@
         new_flavor_id = data_utils.rand_int_id(start=1000)
 
         # Create the flavor
-        flavor = self.client.create_flavor(flavor_name,
-                                           self.ram, self.vcpus,
-                                           self.disk,
-                                           new_flavor_id,
-                                           is_public="True")
+        flavor = self.client.create_flavor(name=flavor_name,
+                                           ram=self.ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=new_flavor_id,
+                                           is_public="True")['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         flag = False
         self.new_client = self.flavors_client
         # Verify flavor is retrieved with new user
-        flavors = self.new_client.list_flavors(detail=True)
+        flavors = self.new_client.list_flavors(detail=True)['flavors']
         for flavor in flavors:
             if flavor['name'] == flavor_name:
                 flag = True
@@ -254,19 +254,19 @@
         flavor_name_public = data_utils.rand_name(self.flavor_name_prefix)
 
         # Create a non public flavor
-        flavor = self.client.create_flavor(flavor_name_not_public,
-                                           self.ram, self.vcpus,
-                                           self.disk,
-                                           flavor_id_not_public,
-                                           is_public="False")
+        flavor = self.client.create_flavor(name=flavor_name_not_public,
+                                           ram=self.ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=flavor_id_not_public,
+                                           is_public="False")['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
 
         # Create a public flavor
-        flavor = self.client.create_flavor(flavor_name_public,
-                                           self.ram, self.vcpus,
-                                           self.disk,
-                                           flavor_id_public,
-                                           is_public="True")
+        flavor = self.client.create_flavor(name=flavor_name_public,
+                                           ram=self.ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=flavor_id_public,
+                                           is_public="True")['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
 
         def _flavor_lookup(flavors, flavor_name):
@@ -278,7 +278,8 @@
         def _test_string_variations(variations, flavor_name):
             for string in variations:
                 params = {'is_public': string}
-                flavors = self.client.list_flavors(detail=True, **params)
+                flavors = (self.client.list_flavors(detail=True, **params)
+                           ['flavors'])
                 flavor = _flavor_lookup(flavors, flavor_name)
                 self.assertIsNotNone(flavor)
 
@@ -294,10 +295,10 @@
         new_flavor_id = data_utils.rand_int_id(start=1000)
 
         ram = "1024"
-        flavor = self.client.create_flavor(flavor_name,
-                                           ram, self.vcpus,
-                                           self.disk,
-                                           new_flavor_id)
+        flavor = self.client.create_flavor(name=flavor_name,
+                                           ram=ram, vcpus=self.vcpus,
+                                           disk=self.disk,
+                                           id=new_flavor_id)['flavor']
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         self.assertEqual(flavor['name'], flavor_name)
         self.assertEqual(flavor['vcpus'], self.vcpus)
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 2baa53e..0a11d52 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -43,8 +43,6 @@
 
         # Non admin tenant ID
         cls.tenant_id = cls.flavors_client.tenant_id
-        # Compute admin tenant ID
-        cls.adm_tenant_id = cls.client.tenant_id
         cls.flavor_name_prefix = 'test_flavor_access_'
         cls.ram = 512
         cls.vcpus = 1
@@ -56,13 +54,14 @@
         # private flavor will return an empty access list
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
         new_flavor_id = data_utils.rand_int_id(start=1000)
-        new_flavor = self.client.create_flavor(flavor_name,
-                                               self.ram, self.vcpus,
-                                               self.disk,
-                                               new_flavor_id,
-                                               is_public='False')
+        new_flavor = self.client.create_flavor(name=flavor_name,
+                                               ram=self.ram, vcpus=self.vcpus,
+                                               disk=self.disk,
+                                               id=new_flavor_id,
+                                               is_public='False')['flavor']
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
-        flavor_access = self.client.list_flavor_access(new_flavor_id)
+        flavor_access = (self.client.list_flavor_access(new_flavor_id)
+                         ['flavor_access'])
         self.assertEqual(len(flavor_access), 0, str(flavor_access))
 
     @test.idempotent_id('59e622f6-bdf6-45e3-8ba8-fedad905a6b4')
@@ -70,30 +69,32 @@
         # Test to add and remove flavor access to a given tenant.
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
         new_flavor_id = data_utils.rand_int_id(start=1000)
-        new_flavor = self.client.create_flavor(flavor_name,
-                                               self.ram, self.vcpus,
-                                               self.disk,
-                                               new_flavor_id,
-                                               is_public='False')
+        new_flavor = self.client.create_flavor(name=flavor_name,
+                                               ram=self.ram, vcpus=self.vcpus,
+                                               disk=self.disk,
+                                               id=new_flavor_id,
+                                               is_public='False')['flavor']
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
         # Add flavor access to a tenant.
         resp_body = {
             "tenant_id": str(self.tenant_id),
             "flavor_id": str(new_flavor['id']),
         }
-        add_body = \
-            self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
+        add_body = (self.client.add_flavor_access(new_flavor['id'],
+                                                  self.tenant_id)
+                    ['flavor_access'])
         self.assertIn(resp_body, add_body)
 
         # The flavor is present in list.
-        flavors = self.flavors_client.list_flavors(detail=True)
+        flavors = self.flavors_client.list_flavors(detail=True)['flavors']
         self.assertIn(new_flavor['id'], map(lambda x: x['id'], flavors))
 
         # Remove flavor access from a tenant.
-        remove_body = \
-            self.client.remove_flavor_access(new_flavor['id'], self.tenant_id)
+        remove_body = (self.client.remove_flavor_access(new_flavor['id'],
+                                                        self.tenant_id)
+                       ['flavor_access'])
         self.assertNotIn(resp_body, remove_body)
 
         # The flavor is not present in list.
-        flavors = self.flavors_client.list_flavors(detail=True)
+        flavors = self.flavors_client.list_flavors(detail=True)['flavors']
         self.assertNotIn(new_flavor['id'], map(lambda x: x['id'], flavors))
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index e5ae23b..89ae1b5 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -57,11 +57,11 @@
         # Test to list flavor access with exceptions by querying public flavor
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
         new_flavor_id = data_utils.rand_int_id(start=1000)
-        new_flavor = self.client.create_flavor(flavor_name,
-                                               self.ram, self.vcpus,
-                                               self.disk,
-                                               new_flavor_id,
-                                               is_public='True')
+        new_flavor = self.client.create_flavor(name=flavor_name,
+                                               ram=self.ram, vcpus=self.vcpus,
+                                               disk=self.disk,
+                                               id=new_flavor_id,
+                                               is_public='True')['flavor']
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
         self.assertRaises(lib_exc.NotFound,
                           self.client.list_flavor_access,
@@ -73,11 +73,11 @@
         # Test to add flavor access as a user without admin privileges.
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
         new_flavor_id = data_utils.rand_int_id(start=1000)
-        new_flavor = self.client.create_flavor(flavor_name,
-                                               self.ram, self.vcpus,
-                                               self.disk,
-                                               new_flavor_id,
-                                               is_public='False')
+        new_flavor = self.client.create_flavor(name=flavor_name,
+                                               ram=self.ram, vcpus=self.vcpus,
+                                               disk=self.disk,
+                                               id=new_flavor_id,
+                                               is_public='False')['flavor']
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
         self.assertRaises(lib_exc.Forbidden,
                           self.flavors_client.add_flavor_access,
@@ -90,11 +90,11 @@
         # Test to remove flavor access as a user without admin privileges.
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
         new_flavor_id = data_utils.rand_int_id(start=1000)
-        new_flavor = self.client.create_flavor(flavor_name,
-                                               self.ram, self.vcpus,
-                                               self.disk,
-                                               new_flavor_id,
-                                               is_public='False')
+        new_flavor = self.client.create_flavor(name=flavor_name,
+                                               ram=self.ram, vcpus=self.vcpus,
+                                               disk=self.disk,
+                                               id=new_flavor_id,
+                                               is_public='False')['flavor']
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
         # Add flavor access to a tenant.
         self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
@@ -111,11 +111,11 @@
         # Create a new flavor.
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
         new_flavor_id = data_utils.rand_int_id(start=1000)
-        new_flavor = self.client.create_flavor(flavor_name,
-                                               self.ram, self.vcpus,
-                                               self.disk,
-                                               new_flavor_id,
-                                               is_public='False')
+        new_flavor = self.client.create_flavor(name=flavor_name,
+                                               ram=self.ram, vcpus=self.vcpus,
+                                               disk=self.disk,
+                                               id=new_flavor_id,
+                                               is_public='False')['flavor']
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
 
         # Add flavor access to a tenant.
@@ -136,11 +136,11 @@
         # Create a new flavor.
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
         new_flavor_id = data_utils.rand_int_id(start=1000)
-        new_flavor = self.client.create_flavor(flavor_name,
-                                               self.ram, self.vcpus,
-                                               self.disk,
-                                               new_flavor_id,
-                                               is_public='False')
+        new_flavor = self.client.create_flavor(name=flavor_name,
+                                               ram=self.ram, vcpus=self.vcpus,
+                                               disk=self.disk,
+                                               id=new_flavor_id,
+                                               is_public='False')['flavor']
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
 
         # An exception should be raised when flavor access is not found
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 6866c1a..25dce6a 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -50,12 +50,13 @@
         swap = 1024
         rxtx = 1
         # Create a flavor so as to set/get/unset extra specs
-        cls.flavor = cls.client.create_flavor(flavor_name,
-                                              ram, vcpus,
-                                              disk,
-                                              cls.new_flavor_id,
+        cls.flavor = cls.client.create_flavor(name=flavor_name,
+                                              ram=ram, vcpus=vcpus,
+                                              disk=disk,
+                                              id=cls.new_flavor_id,
                                               ephemeral=ephemeral,
-                                              swap=swap, rxtx=rxtx)
+                                              swap=swap,
+                                              rxtx_factor=rxtx)['flavor']
 
     @classmethod
     def resource_cleanup(cls):
@@ -70,11 +71,12 @@
         # Assigning extra specs values that are to be set
         specs = {"key1": "value1", "key2": "value2"}
         # SET extra specs to the flavor created in setUp
-        set_body = \
-            self.client.set_flavor_extra_spec(self.flavor['id'], specs)
+        set_body = self.client.set_flavor_extra_spec(self.flavor['id'],
+                                                     **specs)['extra_specs']
         self.assertEqual(set_body, specs)
         # GET extra specs and verify
-        get_body = self.client.list_flavor_extra_specs(self.flavor['id'])
+        get_body = (self.client.list_flavor_extra_specs(self.flavor['id'])
+                    ['extra_specs'])
         self.assertEqual(get_body, specs)
 
         # UPDATE the value of the extra specs key1
@@ -86,7 +88,8 @@
 
         # GET extra specs and verify the value of the key2
         # is the same as before
-        get_body = self.client.list_flavor_extra_specs(self.flavor['id'])
+        get_body = (self.client.list_flavor_extra_specs(self.flavor['id'])
+                    ['extra_specs'])
         self.assertEqual(get_body, {"key1": "value", "key2": "value2"})
 
         # UNSET extra specs that were set in this test
@@ -96,16 +99,18 @@
     @test.idempotent_id('a99dad88-ae1c-4fba-aeb4-32f898218bd0')
     def test_flavor_non_admin_get_all_keys(self):
         specs = {"key1": "value1", "key2": "value2"}
-        self.client.set_flavor_extra_spec(self.flavor['id'], specs)
-        body = self.flavors_client.list_flavor_extra_specs(self.flavor['id'])
+        self.client.set_flavor_extra_spec(self.flavor['id'], **specs)
+        body = (self.flavors_client.list_flavor_extra_specs(self.flavor['id'])
+                ['extra_specs'])
 
         for key in specs:
             self.assertEqual(body[key], specs[key])
 
     @test.idempotent_id('12805a7f-39a3-4042-b989-701d5cad9c90')
     def test_flavor_non_admin_get_specific_key(self):
-        specs = {"key1": "value1", "key2": "value2"}
-        body = self.client.set_flavor_extra_spec(self.flavor['id'], specs)
+        body = self.client.set_flavor_extra_spec(self.flavor['id'],
+                                                 key1="value1",
+                                                 key2="value2")['extra_specs']
         self.assertEqual(body['key1'], 'value1')
         self.assertIn('key2', body)
         body = self.flavors_client.show_flavor_extra_spec(
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index 8c5a103..aa95454 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -53,12 +53,13 @@
         swap = 1024
         rxtx = 1
         # Create a flavor
-        cls.flavor = cls.client.create_flavor(flavor_name,
-                                              ram, vcpus,
-                                              disk,
-                                              cls.new_flavor_id,
+        cls.flavor = cls.client.create_flavor(name=flavor_name,
+                                              ram=ram, vcpus=vcpus,
+                                              disk=disk,
+                                              id=cls.new_flavor_id,
                                               ephemeral=ephemeral,
-                                              swap=swap, rxtx=rxtx)
+                                              swap=swap,
+                                              rxtx_factor=rxtx)['flavor']
 
     @classmethod
     def resource_cleanup(cls):
@@ -70,19 +71,17 @@
     @test.idempotent_id('a00a3b81-5641-45a8-ab2b-4a8ec41e1d7d')
     def test_flavor_non_admin_set_keys(self):
         # Test to SET flavor extra spec as a user without admin privileges.
-        specs = {"key1": "value1", "key2": "value2"}
         self.assertRaises(lib_exc.Forbidden,
                           self.flavors_client.set_flavor_extra_spec,
                           self.flavor['id'],
-                          specs)
+                          key1="value1", key2="value2")
 
     @test.attr(type=['negative'])
     @test.idempotent_id('1ebf4ef8-759e-48fe-a801-d451d80476fb')
     def test_flavor_non_admin_update_specific_key(self):
         # non admin user is not allowed to update flavor extra spec
-        specs = {"key1": "value1", "key2": "value2"}
         body = self.client.set_flavor_extra_spec(
-            self.flavor['id'], specs)
+            self.flavor['id'], key1="value1", key2="value2")['extra_specs']
         self.assertEqual(body['key1'], 'value1')
         self.assertRaises(lib_exc.Forbidden,
                           self.flavors_client.
@@ -94,8 +93,8 @@
     @test.attr(type=['negative'])
     @test.idempotent_id('28f12249-27c7-44c1-8810-1f382f316b11')
     def test_flavor_non_admin_unset_keys(self):
-        specs = {"key1": "value1", "key2": "value2"}
-        self.client.set_flavor_extra_spec(self.flavor['id'], specs)
+        self.client.set_flavor_extra_spec(self.flavor['id'],
+                                          key1="value1", key2="value2")
 
         self.assertRaises(lib_exc.Forbidden,
                           self.flavors_client.unset_flavor_extra_spec,
diff --git a/tempest/api/compute/admin/test_floating_ips_bulk.py b/tempest/api/compute/admin/test_floating_ips_bulk.py
index 4ac1915..c8ca938 100644
--- a/tempest/api/compute/admin/test_floating_ips_bulk.py
+++ b/tempest/api/compute/admin/test_floating_ips_bulk.py
@@ -45,7 +45,7 @@
     @classmethod
     def verify_unallocated_floating_ip_range(cls, ip_range):
         # Verify whether configure floating IP range is not already allocated.
-        body = cls.client.list_floating_ips_bulk()
+        body = cls.client.list_floating_ips_bulk()['floating_ip_info']
         allocated_ips_list = map(lambda x: x['address'], body)
         for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts():
             if str(ip_addr) in allocated_ips_list:
@@ -70,12 +70,13 @@
         # anywhere. Using the below mentioned interface which is not ever
         # expected to be used. Clean Up has been done for created IP range
         interface = 'eth0'
-        body = self.client.create_floating_ips_bulk(self.ip_range,
-                                                    pool,
-                                                    interface)
+        body = (self.client.create_floating_ips_bulk(self.ip_range,
+                                                     pool,
+                                                     interface)
+                ['floating_ips_bulk_create'])
         self.addCleanup(self._delete_floating_ips_bulk, self.ip_range)
         self.assertEqual(self.ip_range, body['ip_range'])
-        ips_list = self.client.list_floating_ips_bulk()
+        ips_list = self.client.list_floating_ips_bulk()['floating_ip_info']
         self.assertNotEqual(0, len(ips_list))
         for ip in netaddr.IPNetwork(self.ip_range).iter_hosts():
             self.assertIn(str(ip), map(lambda x: x['address'], ips_list))
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index 0dadea5..6d8788f 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -30,15 +30,15 @@
 
     @test.idempotent_id('9bfaf98d-e2cb-44b0-a07e-2558b2821e4f')
     def test_list_hosts(self):
-        hosts = self.client.list_hosts()
+        hosts = self.client.list_hosts()['hosts']
         self.assertTrue(len(hosts) >= 2, str(hosts))
 
     @test.idempotent_id('5dc06f5b-d887-47a2-bb2a-67762ef3c6de')
     def test_list_hosts_with_zone(self):
         self.useFixture(fixtures.LockFixture('availability_zone'))
-        hosts = self.client.list_hosts()
+        hosts = self.client.list_hosts()['hosts']
         host = hosts[0]
-        hosts = self.client.list_hosts(zone=host['zone'])
+        hosts = self.client.list_hosts(zone=host['zone'])['hosts']
         self.assertTrue(len(hosts) >= 1)
         self.assertIn(host, hosts)
 
@@ -46,26 +46,26 @@
     def test_list_hosts_with_a_blank_zone(self):
         # If send the request with a blank zone, the request will be successful
         # and it will return all the hosts list
-        hosts = self.client.list_hosts(zone='')
+        hosts = self.client.list_hosts(zone='')['hosts']
         self.assertNotEqual(0, len(hosts))
 
     @test.idempotent_id('c6ddbadb-c94e-4500-b12f-8ffc43843ff8')
     def test_list_hosts_with_nonexistent_zone(self):
         # If send the request with a nonexistent zone, the request will be
         # successful and no hosts will be retured
-        hosts = self.client.list_hosts(zone='xxx')
+        hosts = self.client.list_hosts(zone='xxx')['hosts']
         self.assertEqual(0, len(hosts))
 
     @test.idempotent_id('38adbb12-aee2-4498-8aec-329c72423aa4')
     def test_show_host_detail(self):
-        hosts = self.client.list_hosts()
+        hosts = self.client.list_hosts()['hosts']
 
         hosts = [host for host in hosts if host['service'] == 'compute']
         self.assertTrue(len(hosts) >= 1)
 
         for host in hosts:
             hostname = host['host_name']
-            resources = self.client.show_host(hostname)
+            resources = self.client.show_host(hostname)['host']
             self.assertTrue(len(resources) >= 1)
             host_resource = resources[0]['resource']
             self.assertIsNotNone(host_resource)
diff --git a/tempest/api/compute/admin/test_hosts_negative.py b/tempest/api/compute/admin/test_hosts_negative.py
index b2d2a04..4c8d8a2 100644
--- a/tempest/api/compute/admin/test_hosts_negative.py
+++ b/tempest/api/compute/admin/test_hosts_negative.py
@@ -32,7 +32,7 @@
         cls.non_admin_client = cls.os.hosts_client
 
     def _get_host_name(self):
-        hosts = self.client.list_hosts()
+        hosts = self.client.list_hosts()['hosts']
         self.assertTrue(len(hosts) >= 1)
         hostname = hosts[0]['host_name']
         return hostname
@@ -71,19 +71,6 @@
                           maintenance_mode='enable')
 
     @test.attr(type=['negative'])
-    @test.idempotent_id('76e396fe-5418-4dd3-a186-5b301edc0721')
-    def test_update_host_with_extra_param(self):
-        # only 'status' and 'maintenance_mode' are the valid params.
-        hostname = self._get_host_name()
-
-        self.assertRaises(lib_exc.BadRequest,
-                          self.client.update_host,
-                          hostname,
-                          status='enable',
-                          maintenance_mode='enable',
-                          param='XXX')
-
-    @test.attr(type=['negative'])
     @test.idempotent_id('fbe2bf3e-3246-4a95-a59f-94e4e298ec77')
     def test_update_host_with_invalid_status(self):
         # 'status' can only be 'enable' or 'disable'
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 47f66af..186867e 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -30,7 +30,7 @@
 
     def _list_hypervisors(self):
         # List of hypervisors
-        hypers = self.client.list_hypervisors()
+        hypers = self.client.list_hypervisors()['hypervisors']
         return hypers
 
     def assertHypervisors(self, hypers):
@@ -45,7 +45,7 @@
     @test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
     def test_get_hypervisor_list_details(self):
         # Display the details of the all hypervisor
-        hypers = self.client.list_hypervisors(detail=True)
+        hypers = self.client.list_hypervisors(detail=True)['hypervisors']
         self.assertHypervisors(hypers)
 
     @test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
@@ -54,7 +54,7 @@
         hypers = self._list_hypervisors()
         self.assertHypervisors(hypers)
 
-        details = self.client.show_hypervisor(hypers[0]['id'])
+        details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
         self.assertTrue(len(details) > 0)
         self.assertEqual(details['hypervisor_hostname'],
                          hypers[0]['hypervisor_hostname'])
@@ -66,13 +66,15 @@
         self.assertHypervisors(hypers)
 
         hostname = hypers[0]['hypervisor_hostname']
-        hypervisors = self.client.list_servers_on_hypervisor(hostname)
+        hypervisors = (self.client.list_servers_on_hypervisor(hostname)
+                       ['hypervisors'])
         self.assertTrue(len(hypervisors) > 0)
 
     @test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
     def test_get_hypervisor_stats(self):
         # Verify the stats of the all hypervisor
-        stats = self.client.show_hypervisor_statistics()
+        stats = (self.client.show_hypervisor_statistics()
+                 ['hypervisor_statistics'])
         self.assertTrue(len(stats) > 0)
 
     @test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
@@ -88,7 +90,8 @@
         ironic_only = True
         hypers_without_ironic = []
         for hyper in hypers:
-            details = self.client.show_hypervisor(hypers[0]['id'])
+            details = (self.client.show_hypervisor(hypers[0]['id'])
+                       ['hypervisor'])
             if details['hypervisor_type'] != 'ironic':
                 hypers_without_ironic.append(hyper)
                 ironic_only = False
@@ -102,7 +105,8 @@
             # because hypervisors might be disabled, this loops looking
             # for any good hit.
             try:
-                uptime = self.client.show_hypervisor_uptime(hyper['id'])
+                uptime = (self.client.show_hypervisor_uptime(hyper['id'])
+                          ['hypervisor'])
                 if len(uptime) > 0:
                     has_valid_uptime = True
                     break
@@ -117,5 +121,5 @@
         hypers = self._list_hypervisors()
         self.assertHypervisors(hypers)
         hypers = self.client.search_hypervisor(
-            hypers[0]['hypervisor_hostname'])
+            hypers[0]['hypervisor_hostname'])['hypervisors']
         self.assertHypervisors(hypers)
diff --git a/tempest/api/compute/admin/test_hypervisor_negative.py b/tempest/api/compute/admin/test_hypervisor_negative.py
index 701b4bb..ca4a691 100644
--- a/tempest/api/compute/admin/test_hypervisor_negative.py
+++ b/tempest/api/compute/admin/test_hypervisor_negative.py
@@ -36,7 +36,7 @@
 
     def _list_hypervisors(self):
         # List of hypervisors
-        hypers = self.client.list_hypervisors()
+        hypers = self.client.list_hypervisors()['hypervisors']
         return hypers
 
     @test.attr(type=['negative'])
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log.py b/tempest/api/compute/admin/test_instance_usage_audit_log.py
index 189c316..6976783 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log.py
@@ -31,7 +31,8 @@
     @test.idempotent_id('25319919-33d9-424f-9f99-2c203ee48b9d')
     def test_list_instance_usage_audit_logs(self):
         # list instance usage audit logs
-        body = self.adm_client.list_instance_usage_audit_logs()
+        body = (self.adm_client.list_instance_usage_audit_logs()
+                ["instance_usage_audit_logs"])
         expected_items = ['total_errors', 'total_instances', 'log',
                           'num_hosts_running', 'num_hosts_done',
                           'num_hosts', 'hosts_not_run', 'overall_status',
@@ -44,8 +45,9 @@
     def test_get_instance_usage_audit_log(self):
         # Get instance usage audit log before specified time
         now = datetime.datetime.now()
-        body = self.adm_client.show_instance_usage_audit_log(
+        body = (self.adm_client.show_instance_usage_audit_log(
             urllib.quote(now.strftime("%Y-%m-%d %H:%M:%S")))
+            ["instance_usage_audit_log"])
 
         expected_items = ['total_errors', 'total_instances', 'log',
                           'num_hosts_running', 'num_hosts_done', 'num_hosts',
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 6ffa4e9..d6bc6f5 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -32,6 +32,7 @@
         super(LiveBlockMigrationTestJSON, cls).setup_clients()
         cls.admin_hosts_client = cls.os_adm.hosts_client
         cls.admin_servers_client = cls.os_adm.servers_client
+        cls.admin_migration_client = cls.os_adm.migrations_client
 
     @classmethod
     def resource_setup(cls):
@@ -40,7 +41,7 @@
         cls.created_server_ids = []
 
     def _get_compute_hostnames(self):
-        body = self.admin_hosts_client.list_hosts()
+        body = self.admin_hosts_client.list_hosts()['hosts']
         return [
             host_record['host_name']
             for host_record in body
@@ -55,9 +56,10 @@
         return self._get_server_details(server_id)[self._host_key]
 
     def _migrate_server_to(self, server_id, dest_host):
+        bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
         body = self.admin_servers_client.live_migrate_server(
-            server_id, dest_host,
-            CONF.compute_feature_enabled.block_migration_for_live_migration)
+            server_id, host=dest_host, block_migration=bmflm,
+            disk_over_commit=False)
         return body
 
     def _get_host_other_than(self, host):
@@ -109,7 +111,16 @@
 
         self._migrate_server_to(server_id, target_host)
         waiters.wait_for_server_status(self.servers_client, server_id, state)
-        self.assertEqual(target_host, self._get_host_for_server(server_id))
+        migration_list = self.admin_migration_client.list_migrations()
+
+        msg = ("Live Migration failed. Migrations list for Instance "
+               "%s: [" % server_id)
+        for live_migration in migration_list:
+            if (live_migration['instance_uuid'] == server_id):
+                msg += "\n%s" % live_migration
+        msg += "]"
+        self.assertEqual(target_host, self._get_host_for_server(server_id),
+                         msg)
 
     @test.idempotent_id('1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b')
     @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
@@ -153,7 +164,7 @@
         self.addCleanup(self._volume_clean_up, server_id, volume['id'])
 
         # Attach the volume to the server
-        self.servers_client.attach_volume(server_id, volume['id'],
+        self.servers_client.attach_volume(server_id, volumeId=volume['id'],
                                           device='/dev/xvdb')
         self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
 
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 5af7406..7d69e13 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -42,7 +42,7 @@
         server = self.create_test_server(wait_until="ACTIVE")
         server_id = server['id']
 
-        self.servers_client.resize(server_id, self.flavor_ref_alt)
+        self.servers_client.resize_server(server_id, self.flavor_ref_alt)
         waiters.wait_for_server_status(self.servers_client,
                                        server_id, 'VERIFY_RESIZE')
         self.servers_client.confirm_resize(server_id)
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index 981a5c9..deb81a9 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -36,7 +36,7 @@
 
     @test.idempotent_id('d206d211-8912-486f-86e2-a9d090d1f416')
     def test_get_network(self):
-        networks = self.client.list_networks()
+        networks = self.client.list_networks()['networks']
         if CONF.compute.fixed_network_name:
             configured_network = [x for x in networks if x['label'] ==
                                   CONF.compute.fixed_network_name]
@@ -47,12 +47,13 @@
         else:
             configured_network = networks
         configured_network = configured_network[0]
-        network = self.client.show_network(configured_network['id'])
+        network = (self.client.show_network(configured_network['id'])
+                   ['network'])
         self.assertEqual(configured_network['label'], network['label'])
 
     @test.idempotent_id('df3d1046-6fa5-4b2c-ad0c-cfa46a351cb9')
     def test_list_all_networks(self):
-        networks = self.client.list_networks()
+        networks = self.client.list_networks()['networks']
         # Check the configured network is in the list
         if CONF.compute.fixed_network_name:
             configured_network = CONF.compute.fixed_network_name
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 47bdfa6..3416eae 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -118,6 +118,8 @@
                                            password=password,
                                            tenant_id=tenant_id,
                                            email=email)
+        if 'user' in user:
+            user = user['user']
         user_id = user['id']
         self.addCleanup(identity_client.delete_user, user_id)
 
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 798bd30..b4fc749 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -32,6 +32,7 @@
         cls.client = cls.os.quotas_client
         cls.adm_client = cls.os_adm.quotas_client
         cls.sg_client = cls.security_groups_client
+        cls.sgr_client = cls.security_group_rules_client
 
     @classmethod
     def resource_setup(cls):
@@ -128,7 +129,7 @@
         # will be raised when out of quota
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
                           self.sg_client.create_security_group,
-                          "sg-overlimit", "sg-desc")
+                          name="sg-overlimit", description="sg-desc")
 
     @decorators.skip_because(bug="1186354",
                              condition=CONF.service_available.neutron)
@@ -155,8 +156,8 @@
 
         s_name = data_utils.rand_name('securitygroup')
         s_description = data_utils.rand_name('description')
-        securitygroup =\
-            self.sg_client.create_security_group(s_name, s_description)
+        securitygroup = self.sg_client.create_security_group(
+            name=s_name, description=s_description)['security_group']
         self.addCleanup(self.sg_client.delete_security_group,
                         securitygroup['id'])
 
@@ -167,5 +168,6 @@
         # A 403 Forbidden or 413 Overlimit (old behaviour) exception
         # will be raised when out of quota
         self.assertRaises((lib_exc.OverLimit, lib_exc.Forbidden),
-                          self.sg_client.create_security_group_rule,
-                          secgroup_id, ip_protocol, 1025, 1025)
+                          self.sgr_client.create_security_group_rule,
+                          parent_group_id=secgroup_id, ip_protocol=ip_protocol,
+                          from_port=1025, to_port=1025)
diff --git a/tempest/api/compute/admin/test_security_group_default_rules.py b/tempest/api/compute/admin/test_security_group_default_rules.py
index 13d6cc0..74f3caa 100644
--- a/tempest/api/compute/admin/test_security_group_default_rules.py
+++ b/tempest/api/compute/admin/test_security_group_default_rules.py
@@ -45,10 +45,10 @@
                                              cidr='10.10.0.0/24'):
         # Create Security Group default rule
         rule = self.adm_client.create_security_default_group_rule(
-            ip_protocol,
-            from_port,
-            to_port,
-            cidr=cidr)
+            ip_protocol=ip_protocol,
+            from_port=from_port,
+            to_port=to_port,
+            cidr=cidr)['security_group_default_rule']
         self.assertEqual(ip_protocol, rule['ip_protocol'])
         self.assertEqual(from_port, rule['from_port'])
         self.assertEqual(to_port, rule['to_port'])
@@ -73,9 +73,9 @@
         from_port = 80
         to_port = 80
         rule = self.adm_client.create_security_default_group_rule(
-            ip_protocol,
-            from_port,
-            to_port)
+            ip_protocol=ip_protocol,
+            from_port=from_port,
+            to_port=to_port)['security_group_default_rule']
         self.addCleanup(self.adm_client.delete_security_group_default_rule,
                         rule['id'])
         self.assertNotEqual(0, rule['id'])
@@ -88,10 +88,10 @@
         to_port = 10
         cidr = ''
         rule = self.adm_client.create_security_default_group_rule(
-            ip_protocol,
-            from_port,
-            to_port,
-            cidr=cidr)
+            ip_protocol=ip_protocol,
+            from_port=from_port,
+            to_port=to_port,
+            cidr=cidr)['security_group_default_rule']
         self.addCleanup(self.adm_client.delete_security_group_default_rule,
                         rule['id'])
         self.assertNotEqual(0, rule['id'])
@@ -109,7 +109,8 @@
                                                          cidr)
         self.addCleanup(self.adm_client.delete_security_group_default_rule,
                         rule['id'])
-        rules = self.adm_client.list_security_group_default_rules()
+        rules = (self.adm_client.list_security_group_default_rules()
+                 ['security_group_default_rules'])
         self.assertNotEqual(0, len(rules))
         self.assertIn(rule, rules)
 
@@ -126,5 +127,5 @@
         self.addCleanup(self.adm_client.delete_security_group_default_rule,
                         rule['id'])
         fetched_rule = self.adm_client.show_security_group_default_rule(
-            rule['id'])
+            rule['id'])['security_group_default_rule']
         self.assertEqual(rule, fetched_rule)
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index ff87a4f..b0a3086 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -50,8 +50,8 @@
         for i in range(2):
             name = data_utils.rand_name('securitygroup')
             description = data_utils.rand_name('description')
-            securitygroup = (self.client
-                             .create_security_group(name, description))
+            securitygroup = self.client.create_security_group(
+                name=name, description=description)['security_group']
             self.addCleanup(self._delete_security_group,
                             securitygroup['id'], admin=False)
             security_group_list.append(securitygroup)
@@ -61,15 +61,15 @@
         for i in range(2):
             name = data_utils.rand_name('securitygroup')
             description = data_utils.rand_name('description')
-            adm_securitygroup = (self.adm_client
-                                 .create_security_group(name,
-                                                        description))
+            adm_securitygroup = self.adm_client.create_security_group(
+                name=name, description=description)['security_group']
             self.addCleanup(self._delete_security_group,
                             adm_securitygroup['id'])
             security_group_list.append(adm_securitygroup)
 
         # Fetch all security groups based on 'all_tenants' search filter
-        fetched_list = self.adm_client.list_security_groups(all_tenants='true')
+        fetched_list = self.adm_client.list_security_groups(
+            all_tenants='true')['security_groups']
         sec_group_id_list = map(lambda sg: sg['id'], fetched_list)
         # Now check if all created Security Groups are present in fetched list
         for sec_group in security_group_list:
@@ -77,7 +77,8 @@
 
         # Fetch all security groups for non-admin user with 'all_tenants'
         # search filter
-        fetched_list = self.client.list_security_groups(all_tenants='true')
+        fetched_list = (self.client.list_security_groups(all_tenants='true')
+                        ['security_groups'])
         # Now check if all created Security Groups are present in fetched list
         for sec_group in fetched_list:
             self.assertEqual(sec_group['tenant_id'], client_tenant_id,
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 1982eda..0528fa5 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -163,11 +163,11 @@
 
         # resetting vm state require admin privilege
         self.client.reset_state(self.s1_id, state='error')
-        rebuilt_server = self.non_admin_client.rebuild(
+        rebuilt_server = self.non_admin_client.rebuild_server(
             self.s1_id, self.image_ref_alt)
         self.addCleanup(waiters.wait_for_server_status, self.non_admin_client,
                         self.s1_id, 'ACTIVE')
-        self.addCleanup(self.non_admin_client.rebuild, self.s1_id,
+        self.addCleanup(self.non_admin_client.rebuild_server, self.s1_id,
                         self.image_ref)
 
         # Verify the properties in the initial response are correct
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index b93aaca..d65f335 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -73,12 +73,13 @@
         ram = int(quota_set['ram']) + 1
         vcpus = 8
         disk = 10
-        flavor_ref = self.flavors_client.create_flavor(flavor_name,
-                                                       ram, vcpus, disk,
-                                                       flavor_id)
+        flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
+                                                       ram=ram, vcpus=vcpus,
+                                                       disk=disk,
+                                                       id=flavor_id)['flavor']
         self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
-                          self.client.resize,
+                          self.client.resize_server,
                           self.servers[0]['id'],
                           flavor_ref['id'])
 
@@ -95,12 +96,13 @@
         quota_set = self.quotas_client.show_default_quota_set(self.tenant_id)
         vcpus = int(quota_set['cores']) + 1
         disk = 10
-        flavor_ref = self.flavors_client.create_flavor(flavor_name,
-                                                       ram, vcpus, disk,
-                                                       flavor_id)
+        flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
+                                                       ram=ram, vcpus=vcpus,
+                                                       disk=disk,
+                                                       id=flavor_id)['flavor']
         self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
-                          self.client.resize,
+                          self.client.resize_server,
                           self.servers[0]['id'],
                           flavor_ref['id'])
 
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index db22925..4d7dea5 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -31,25 +31,25 @@
 
     @test.idempotent_id('5be41ef4-53d1-41cc-8839-5c2a48a1b283')
     def test_list_services(self):
-        services = self.client.list_services()
+        services = self.client.list_services()['services']
         self.assertNotEqual(0, len(services))
 
     @test.idempotent_id('f345b1ec-bc6e-4c38-a527-3ca2bc00bef5')
     def test_get_service_by_service_binary_name(self):
         binary_name = 'nova-compute'
-        services = self.client.list_services(binary=binary_name)
+        services = self.client.list_services(binary=binary_name)['services']
         self.assertNotEqual(0, len(services))
         for service in services:
             self.assertEqual(binary_name, service['binary'])
 
     @test.idempotent_id('affb42d5-5b4b-43c8-8b0b-6dca054abcca')
     def test_get_service_by_host_name(self):
-        services = self.client.list_services()
+        services = self.client.list_services()['services']
         host_name = services[0]['host']
         services_on_host = [service for service in services if
                             service['host'] == host_name]
 
-        services = self.client.list_services(host=host_name)
+        services = self.client.list_services(host=host_name)['services']
 
         # we could have a periodic job checkin between the 2 service
         # lookups, so only compare binary lists.
@@ -62,12 +62,12 @@
 
     @test.idempotent_id('39397f6f-37b8-4234-8671-281e44c74025')
     def test_get_service_by_service_and_host_name(self):
-        services = self.client.list_services()
+        services = self.client.list_services()['services']
         host_name = services[0]['host']
         binary_name = services[0]['binary']
 
         services = self.client.list_services(host=host_name,
-                                             binary=binary_name)
+                                             binary=binary_name)['services']
         self.assertEqual(1, len(services))
         self.assertEqual(host_name, services[0]['host'])
         self.assertEqual(binary_name, services[0]['binary'])
diff --git a/tempest/api/compute/admin/test_services_negative.py b/tempest/api/compute/admin/test_services_negative.py
index b9335c9..0c81ccb 100644
--- a/tempest/api/compute/admin/test_services_negative.py
+++ b/tempest/api/compute/admin/test_services_negative.py
@@ -40,22 +40,25 @@
     @test.idempotent_id('d0884a69-f693-4e79-a9af-232d15643bf7')
     def test_get_service_by_invalid_params(self):
         # return all services if send the request with invalid parameter
-        services = self.client.list_services()
-        services_xxx = self.client.list_services(xxx='nova-compute')
+        services = self.client.list_services()['services']
+        services_xxx = (self.client.list_services(xxx='nova-compute')
+                        ['services'])
         self.assertEqual(len(services), len(services_xxx))
 
     @test.attr(type=['negative'])
     @test.idempotent_id('1e966d4a-226e-47c7-b601-0b18a27add54')
     def test_get_service_by_invalid_service_and_valid_host(self):
-        services = self.client.list_services()
+        services = self.client.list_services()['services']
         host_name = services[0]['host']
-        services = self.client.list_services(host=host_name, binary='xxx')
+        services = self.client.list_services(host=host_name,
+                                             binary='xxx')['services']
         self.assertEqual(0, len(services))
 
     @test.attr(type=['negative'])
     @test.idempotent_id('64e7e7fb-69e8-4cb6-a71d-8d5eb0c98655')
     def test_get_service_with_valid_service_and_invalid_host(self):
-        services = self.client.list_services()
+        services = self.client.list_services()['services']
         binary_name = services[0]['binary']
-        services = self.client.list_services(host='xxx', binary=binary_name)
+        services = self.client.list_services(host='xxx',
+                                             binary=binary_name)['services']
         self.assertEqual(0, len(services))
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage.py b/tempest/api/compute/admin/test_simple_tenant_usage.py
index 1c9e5d7..7333acb 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage.py
@@ -50,14 +50,14 @@
     def test_list_usage_all_tenants(self):
         # Get usage for all tenants
         tenant_usage = self.adm_client.list_tenant_usages(
-            start=self.start, end=self.end, detailed=int(bool(True)))
+            start=self.start, end=self.end, detailed="1")['tenant_usages'][0]
         self.assertEqual(len(tenant_usage), 8)
 
     @test.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
     def test_get_usage_tenant(self):
         # Get usage for a specific tenant
         tenant_usage = self.adm_client.show_tenant_usage(
-            self.tenant_id, start=self.start, end=self.end)
+            self.tenant_id, start=self.start, end=self.end)['tenant_usage']
 
         self.assertEqual(len(tenant_usage), 8)
 
@@ -65,6 +65,6 @@
     def test_get_usage_tenant_with_non_admin_user(self):
         # Get usage for a specific tenant with non admin user
         tenant_usage = self.client.show_tenant_usage(
-            self.tenant_id, start=self.start, end=self.end)
+            self.tenant_id, start=self.start, end=self.end)['tenant_usage']
 
         self.assertEqual(len(tenant_usage), 8)
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
index 934fc31..e9b4ad4 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
@@ -66,6 +66,6 @@
         # Get usage for all tenants with non admin user
         params = {'start': self.start,
                   'end': self.end,
-                  'detailed': int(bool(True))}
+                  'detailed': "1"}
         self.assertRaises(lib_exc.Forbidden,
                           self.client.list_tenant_usages, **params)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 759bb8c..a25a2af 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -43,6 +43,8 @@
     @classmethod
     def skip_checks(cls):
         super(BaseComputeTest, cls).skip_checks()
+        if not CONF.service_available.nova:
+            raise cls.skipException("Nova is not available")
         if cls._api_version != 2:
             msg = ("Unexpected API version is specified (%s)" %
                    cls._api_version)
@@ -64,6 +66,7 @@
         cls.floating_ip_pools_client = cls.os.floating_ip_pools_client
         cls.floating_ips_client = cls.os.floating_ips_client
         cls.keypairs_client = cls.os.keypairs_client
+        cls.security_group_rules_client = cls.os.security_group_rules_client
         cls.security_groups_client = cls.os.security_groups_client
         cls.quotas_client = cls.os.quotas_client
         # NOTE(mriedem): os-quota-class-sets is v2 API only
@@ -128,7 +131,8 @@
 
         for server in cls.servers:
             try:
-                cls.servers_client.wait_for_server_termination(server['id'])
+                waiters.wait_for_server_termination(cls.servers_client,
+                                                    server['id'])
             except Exception:
                 LOG.exception('Waiting for deletion of server %s failed'
                               % server['id'])
@@ -149,7 +153,8 @@
             except Exception as exc:
                 LOG.exception(exc)
                 cls.servers_client.delete_server(cls.server_id)
-                cls.servers_client.wait_for_server_termination(cls.server_id)
+                waiters.wait_for_server_termination(cls.servers_client,
+                                                    cls.server_id)
                 cls.server_id = None
                 raise
 
@@ -220,9 +225,8 @@
             name = data_utils.rand_name(cls.__name__ + "-securitygroup")
         if description is None:
             description = data_utils.rand_name('description')
-        body = \
-            cls.security_groups_client.create_security_group(name,
-                                                             description)
+        body = cls.security_groups_client.create_security_group(
+            name=name, description=description)['security_group']
         cls.security_groups.append(body)
 
         return body
@@ -233,7 +237,8 @@
             name = data_utils.rand_name(cls.__name__ + "-Server-Group")
         if policy is None:
             policy = ['affinity']
-        body = cls.server_groups_client.create_server_group(name, policy)
+        body = (cls.server_groups_client.create_server_group(name, policy)
+                ['server_group'])
         cls.server_groups.append(body['id'])
         return body
 
@@ -278,14 +283,14 @@
         if 'name' in kwargs:
             name = kwargs.pop('name')
 
-        image = cls.images_client.create_image(server_id, name)
+        image = cls.images_client.create_image(server_id, name=name)
         image_id = data_utils.parse_image_id(image.response['location'])
         cls.images.append(image_id)
 
         if 'wait_until' in kwargs:
             waiters.wait_for_image_status(cls.images_client,
                                           image_id, kwargs['wait_until'])
-            image = cls.images_client.show_image(image_id)
+            image = cls.images_client.show_image(image_id)['image']
 
             if kwargs['wait_until'] == 'ACTIVE':
                 if kwargs.get('wait_for_server', True):
@@ -299,7 +304,8 @@
         if server_id:
             try:
                 cls.servers_client.delete_server(server_id)
-                cls.servers_client.wait_for_server_termination(server_id)
+                waiters.wait_for_server_termination(cls.servers_client,
+                                                    server_id)
             except Exception:
                 LOG.exception('Failed to delete server %s' % server_id)
 
@@ -315,7 +321,8 @@
         """Deletes an existing server and waits for it to be gone."""
         try:
             cls.servers_client.delete_server(server_id)
-            cls.servers_client.wait_for_server_termination(server_id)
+            waiters.wait_for_server_termination(cls.servers_client,
+                                                server_id)
         except Exception:
             LOG.exception('Failed to delete server %s' % server_id)
 
diff --git a/tempest/api/compute/certificates/test_certificates.py b/tempest/api/compute/certificates/test_certificates.py
index 5f68786..78a0a93 100644
--- a/tempest/api/compute/certificates/test_certificates.py
+++ b/tempest/api/compute/certificates/test_certificates.py
@@ -24,13 +24,14 @@
     @test.idempotent_id('c070a441-b08e-447e-a733-905909535b1b')
     def test_create_root_certificate(self):
         # create certificates
-        body = self.certificates_client.create_certificate()
+        body = self.certificates_client.create_certificate()['certificate']
         self.assertIn('data', body)
         self.assertIn('private_key', body)
 
     @test.idempotent_id('3ac273d0-92d2-4632-bdfc-afbc21d4606c')
     def test_get_root_certificate(self):
         # get the root certificate
-        body = self.certificates_client.show_certificate('root')
+        body = (self.certificates_client.show_certificate('root')
+                ['certificate'])
         self.assertIn('data', body)
         self.assertIn('private_key', body)
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 728fefb..e114c80 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -32,8 +32,8 @@
     @test.idempotent_id('e36c0eaa-dff5-4082-ad1f-3f9a80aa3f59')
     def test_list_flavors(self):
         # List of all flavors should contain the expected flavor
-        flavors = self.client.list_flavors()
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavors = self.client.list_flavors()['flavors']
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
                              'name': flavor['name']}
         self.assertIn(flavor_min_detail, flavors)
@@ -41,89 +41,89 @@
     @test.idempotent_id('6e85fde4-b3cd-4137-ab72-ed5f418e8c24')
     def test_list_flavors_with_detail(self):
         # Detailed list of all flavors should contain the expected flavor
-        flavors = self.client.list_flavors(detail=True)
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavors = self.client.list_flavors(detail=True)['flavors']
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         self.assertIn(flavor, flavors)
 
     @test.attr(type='smoke')
     @test.idempotent_id('1f12046b-753d-40d2-abb6-d8eb8b30cb2f')
     def test_get_flavor(self):
         # The expected flavor details should be returned
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         self.assertEqual(self.flavor_ref, flavor['id'])
 
     @test.idempotent_id('8d7691b3-6ed4-411a-abc9-2839a765adab')
     def test_list_flavors_limit_results(self):
         # Only the expected number of flavors should be returned
         params = {'limit': 1}
-        flavors = self.client.list_flavors(**params)
+        flavors = self.client.list_flavors(**params)['flavors']
         self.assertEqual(1, len(flavors))
 
     @test.idempotent_id('b26f6327-2886-467a-82be-cef7a27709cb')
     def test_list_flavors_detailed_limit_results(self):
         # Only the expected number of flavors (detailed) should be returned
         params = {'limit': 1}
-        flavors = self.client.list_flavors(detail=True, **params)
+        flavors = self.client.list_flavors(detail=True, **params)['flavors']
         self.assertEqual(1, len(flavors))
 
     @test.idempotent_id('e800f879-9828-4bd0-8eae-4f17189951fb')
     def test_list_flavors_using_marker(self):
         # The list of flavors should start from the provided marker
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
         params = {'marker': flavor_id}
-        flavors = self.client.list_flavors(**params)
+        flavors = self.client.list_flavors(**params)['flavors']
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]),
                          'The list of flavors did not start after the marker.')
 
     @test.idempotent_id('6db2f0c0-ddee-4162-9c84-0703d3dd1107')
     def test_list_flavors_detailed_using_marker(self):
         # The list of flavors should start from the provided marker
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
         params = {'marker': flavor_id}
-        flavors = self.client.list_flavors(detail=True, **params)
+        flavors = self.client.list_flavors(detail=True, **params)['flavors']
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]),
                          'The list of flavors did not start after the marker.')
 
     @test.idempotent_id('3df2743e-3034-4e57-a4cb-b6527f6eac79')
     def test_list_flavors_detailed_filter_by_min_disk(self):
         # The detailed list of flavors should be filtered by disk space
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
         params = {self._min_disk: flavor['disk'] + 1}
-        flavors = self.client.list_flavors(detail=True, **params)
+        flavors = self.client.list_flavors(detail=True, **params)['flavors']
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
 
     @test.idempotent_id('09fe7509-b4ee-4b34-bf8b-39532dc47292')
     def test_list_flavors_detailed_filter_by_min_ram(self):
         # The detailed list of flavors should be filtered by RAM
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
         params = {self._min_ram: flavor['ram'] + 1}
-        flavors = self.client.list_flavors(detail=True, **params)
+        flavors = self.client.list_flavors(detail=True, **params)['flavors']
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
 
     @test.idempotent_id('10645a4d-96f5-443f-831b-730711e11dd4')
     def test_list_flavors_filter_by_min_disk(self):
         # The list of flavors should be filtered by disk space
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
         params = {self._min_disk: flavor['disk'] + 1}
-        flavors = self.client.list_flavors(**params)
+        flavors = self.client.list_flavors(**params)['flavors']
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
 
     @test.idempotent_id('935cf550-e7c8-4da6-8002-00f92d5edfaa')
     def test_list_flavors_filter_by_min_ram(self):
         # The list of flavors should be filtered by RAM
-        flavor = self.client.show_flavor(self.flavor_ref)
+        flavor = self.client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
         params = {self._min_ram: flavor['ram'] + 1}
-        flavors = self.client.list_flavors(**params)
+        flavors = self.client.list_flavors(**params)['flavors']
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index 8bb4fac..5b90641 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -39,7 +39,7 @@
         server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
         # Floating IP creation
-        body = cls.client.create_floating_ip()
+        body = cls.client.create_floating_ip()['floating_ip']
         cls.floating_ip_id = body['id']
         cls.floating_ip = body['ip']
 
@@ -63,14 +63,14 @@
     def test_allocate_floating_ip(self):
         # Positive test:Allocation of a new floating IP to a project
         # should be successful
-        body = self.client.create_floating_ip()
+        body = self.client.create_floating_ip()['floating_ip']
         floating_ip_id_allocated = body['id']
         self.addCleanup(self.client.delete_floating_ip,
                         floating_ip_id_allocated)
-        floating_ip_details = \
-            self.client.show_floating_ip(floating_ip_id_allocated)
+        floating_ip_details = self.client.show_floating_ip(
+            floating_ip_id_allocated)['floating_ip']
         # Checking if the details of allocated IP is in list of floating IP
-        body = self.client.list_floating_ips()
+        body = self.client.list_floating_ips()['floating_ips']
         self.assertIn(floating_ip_details, body)
 
     @test.idempotent_id('de45e989-b5ca-4a9b-916b-04a52e7bbb8b')
@@ -79,7 +79,7 @@
         # Positive test:Deletion of valid floating IP from project
         # should be successful
         # Creating the floating IP that is to be deleted in this method
-        floating_ip_body = self.client.create_floating_ip()
+        floating_ip_body = self.client.create_floating_ip()['floating_ip']
         self.addCleanup(self._try_delete_floating_ip, floating_ip_body['id'])
         # Deleting the floating IP from the project
         self.client.delete_floating_ip(floating_ip_body['id'])
@@ -98,7 +98,8 @@
             self.server_id)
 
         # Check instance_id in the floating_ip body
-        body = self.client.show_floating_ip(self.floating_ip_id)
+        body = (self.client.show_floating_ip(self.floating_ip_id)
+                ['floating_ip'])
         self.assertEqual(self.server_id, body['instance_id'])
 
         # Disassociation of floating IP that was associated in this method
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
index c07af72..64aac80 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
@@ -42,7 +42,7 @@
         cls.server_id = server['id']
         # Generating a nonexistent floatingIP id
         cls.floating_ip_ids = []
-        body = cls.client.list_floating_ips()
+        body = cls.client.list_floating_ips()['floating_ips']
         for i in range(len(body)):
             cls.floating_ip_ids.append(body[i]['id'])
         while True:
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index d26a5e5..d003967 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -31,7 +31,7 @@
         cls.floating_ip = []
         cls.floating_ip_id = []
         for i in range(3):
-            body = cls.client.create_floating_ip()
+            body = cls.client.create_floating_ip()['floating_ip']
             cls.floating_ip.append(body)
             cls.floating_ip_id.append(body['id'])
 
@@ -45,7 +45,7 @@
     @test.services('network')
     def test_list_floating_ips(self):
         # Positive test:Should return the list of floating IPs
-        body = self.client.list_floating_ips()
+        body = self.client.list_floating_ips()['floating_ips']
         floating_ips = body
         self.assertNotEqual(0, len(floating_ips),
                             "Expected floating IPs. Got zero.")
@@ -57,14 +57,14 @@
     def test_get_floating_ip_details(self):
         # Positive test:Should be able to GET the details of floatingIP
         # Creating a floating IP for which details are to be checked
-        body = self.client.create_floating_ip()
+        body = self.client.create_floating_ip()['floating_ip']
         floating_ip_id = body['id']
         self.addCleanup(self.client.delete_floating_ip,
                         floating_ip_id)
         floating_ip_instance_id = body['instance_id']
         floating_ip_ip = body['ip']
         floating_ip_fixed_ip = body['fixed_ip']
-        body = self.client.show_floating_ip(floating_ip_id)
+        body = self.client.show_floating_ip(floating_ip_id)['floating_ip']
         # Comparing the details of floating IP
         self.assertEqual(floating_ip_instance_id,
                          body['instance_id'])
@@ -78,5 +78,5 @@
     def test_list_floating_ip_pools(self):
         # Positive test:Should return the list of floating IP Pools
         floating_ip_pools = self.pools_client.list_floating_ip_pools()
-        self.assertNotEqual(0, len(floating_ip_pools),
+        self.assertNotEqual(0, len(floating_ip_pools['floating_ip_pools']),
                             "Expected floating IP Pools. Got zero.")
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index ab82d91..975b850 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -48,7 +48,7 @@
         body = cls.glance_client.create_image(name=name,
                                               container_format='bare',
                                               disk_format='raw',
-                                              is_public=False)
+                                              is_public=False)['image']
         cls.image_id = body['id']
         cls.images.append(cls.image_id)
         image_file = six.StringIO(('*' * 1024))
@@ -64,7 +64,8 @@
     def test_list_image_metadata(self):
         # All metadata key/value pairs for an image should be returned
         resp_metadata = self.client.list_image_metadata(self.image_id)
-        expected = {'os_version': 'value1', 'os_distro': 'value2'}
+        expected = {'metadata': {
+            'os_version': 'value1', 'os_distro': 'value2'}}
         self.assertEqual(expected, resp_metadata)
 
     @test.idempotent_id('ece7befc-d3ce-42a4-b4be-c3067a418c29')
@@ -74,7 +75,8 @@
         self.client.set_image_metadata(self.image_id,
                                        req_metadata)
 
-        resp_metadata = self.client.list_image_metadata(self.image_id)
+        resp_metadata = (self.client.list_image_metadata(self.image_id)
+                         ['metadata'])
         self.assertEqual(req_metadata, resp_metadata)
 
     @test.idempotent_id('7b491c11-a9d5-40fe-a696-7f7e03d3fea2')
@@ -85,16 +87,17 @@
                                           req_metadata)
 
         resp_metadata = self.client.list_image_metadata(self.image_id)
-        expected = {'os_version': 'alt1',
-                    'os_distro': 'value2',
-                    'architecture': 'value3'}
+        expected = {'metadata': {
+            'os_version': 'alt1',
+            'os_distro': 'value2',
+            'architecture': 'value3'}}
         self.assertEqual(expected, resp_metadata)
 
     @test.idempotent_id('4f5db52f-6685-4c75-b848-f4bb363f9aa6')
     def test_get_image_metadata_item(self):
         # The value for a specific metadata key should be returned
         meta = self.client.show_image_metadata_item(self.image_id,
-                                                    'os_distro')
+                                                    'os_distro')['meta']
         self.assertEqual('value2', meta['os_distro'])
 
     @test.idempotent_id('f2de776a-4778-4d90-a5da-aae63aee64ae')
@@ -105,7 +108,7 @@
         self.client.set_image_metadata_item(self.image_id,
                                             'os_version', meta)
         resp_metadata = self.client.list_image_metadata(self.image_id)
-        expected = {'os_version': 'alt', 'os_distro': 'value2'}
+        expected = {'metadata': {'os_version': 'alt', 'os_distro': 'value2'}}
         self.assertEqual(expected, resp_metadata)
 
     @test.idempotent_id('a013796c-ba37-4bb5-8602-d944511def14')
@@ -114,5 +117,5 @@
         self.client.delete_image_metadata_item(self.image_id,
                                                'os_version')
         resp_metadata = self.client.list_image_metadata(self.image_id)
-        expected = {'os_distro': 'value2'}
+        expected = {'metadata': {'os_distro': 'value2'}}
         self.assertEqual(expected, resp_metadata)
diff --git a/tempest/api/compute/images/test_images_negative.py b/tempest/api/compute/images/test_images_negative.py
index 9721fa5..7f23730 100644
--- a/tempest/api/compute/images/test_images_negative.py
+++ b/tempest/api/compute/images/test_images_negative.py
@@ -50,7 +50,7 @@
 
         # Delete server before trying to create server
         self.servers_client.delete_server(server['id'])
-        self.servers_client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
         # Create a new image after server is deleted
         name = data_utils.rand_name('image')
         meta = {'image_type': 'test'}
@@ -93,7 +93,7 @@
         snapshot_name = data_utils.rand_name('test-snap')
         test_uuid = ('a' * 35)
         self.assertRaises(lib_exc.NotFound, self.client.create_image,
-                          test_uuid, snapshot_name)
+                          test_uuid, name=snapshot_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('36741560-510e-4cc2-8641-55fe4dfb2437')
@@ -102,7 +102,7 @@
         snapshot_name = data_utils.rand_name('test-snap')
         test_uuid = ('a' * 37)
         self.assertRaises(lib_exc.NotFound, self.client.create_image,
-                          test_uuid, snapshot_name)
+                          test_uuid, name=snapshot_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('381acb65-785a-4942-94ce-d8f8c84f1f0f')
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 40a781c..37c2bb6 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -71,7 +71,7 @@
         cls.server_id = server['id']
 
     def _get_default_flavor_disk_size(self, flavor_id):
-        flavor = self.flavors_client.show_flavor(flavor_id)
+        flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
         return flavor['disk']
 
     @test.idempotent_id('3731d080-d4c5-4872-b41a-64d0d0021314')
@@ -80,16 +80,17 @@
         # Create a new image
         name = data_utils.rand_name('image')
         meta = {'image_type': 'test'}
-        body = self.client.create_image(self.server_id, name, meta)
+        body = self.client.create_image(self.server_id, name=name,
+                                        metadata=meta)
         image_id = data_utils.parse_image_id(body.response['location'])
         waiters.wait_for_image_status(self.client, image_id, 'ACTIVE')
 
         # Verify the image was created correctly
-        image = self.client.show_image(image_id)
+        image = self.client.show_image(image_id)['image']
         self.assertEqual(name, image['name'])
         self.assertEqual('test', image['metadata']['image_type'])
 
-        original_image = self.client.show_image(self.image_ref)
+        original_image = self.client.show_image(self.image_ref)['image']
 
         # Verify minRAM is the same as the original image
         self.assertEqual(image['minRam'], original_image['minRam'])
@@ -112,6 +113,6 @@
         # #1370954 in glance which will 500 if mysql is used as the
         # backend and it attempts to store a 4 byte utf-8 character
         utf8_name = data_utils.rand_name('\xe2\x82\xa1')
-        body = self.client.create_image(self.server_id, utf8_name)
+        body = self.client.create_image(self.server_id, name=utf8_name)
         image_id = data_utils.parse_image_id(body.response['location'])
         self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 1a74e52..9ea62fb 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -93,7 +93,7 @@
         snapshot_name = data_utils.rand_name('test-snap')
         meta = {'': ''}
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          self.server_id, snapshot_name, meta)
+                          self.server_id, name=snapshot_name, metadata=meta)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('3d24d11f-5366-4536-bd28-cff32b748eca')
@@ -102,7 +102,7 @@
         snapshot_name = data_utils.rand_name('test-snap')
         meta = {'a' * 260: 'b' * 260}
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          self.server_id, snapshot_name, meta)
+                          self.server_id, name=snapshot_name, metadata=meta)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('0460efcf-ee88-4f94-acef-1bf658695456')
@@ -111,8 +111,7 @@
 
         # Create first snapshot
         snapshot_name = data_utils.rand_name('test-snap')
-        body = self.client.create_image(self.server_id,
-                                        snapshot_name)
+        body = self.client.create_image(self.server_id, name=snapshot_name)
         image_id = data_utils.parse_image_id(body.response['location'])
         self.image_ids.append(image_id)
         self.addCleanup(self._reset_server)
@@ -120,7 +119,7 @@
         # Create second snapshot
         alt_snapshot_name = data_utils.rand_name('test-snap')
         self.assertRaises(lib_exc.Conflict, self.client.create_image,
-                          self.server_id, alt_snapshot_name)
+                          self.server_id, name=alt_snapshot_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('084f0cbc-500a-4963-8a4e-312905862581')
@@ -129,7 +128,7 @@
 
         snapshot_name = data_utils.rand_name('a' * 260)
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          self.server_id, snapshot_name)
+                          self.server_id, name=snapshot_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('0894954d-2db2-4195-a45b-ffec0bc0187e')
@@ -137,7 +136,7 @@
         # Return an error while trying to delete an image what is creating
 
         snapshot_name = data_utils.rand_name('test-snap')
-        body = self.client.create_image(self.server_id, snapshot_name)
+        body = self.client.create_image(self.server_id, name=snapshot_name)
         image_id = data_utils.parse_image_id(body.response['location'])
         self.image_ids.append(image_id)
         self.addCleanup(self._reset_server)
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 2c0ce59..9f3ba71 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -54,7 +54,7 @@
             body = cls.glance_client.create_image(name=name,
                                                   container_format='bare',
                                                   disk_format='raw',
-                                                  is_public=False)
+                                                  is_public=False)['image']
             image_id = body['id']
             cls.images.append(image_id)
             # Wait 1 second between creation and upload to ensure a delta
@@ -63,7 +63,7 @@
             image_file = six.StringIO(('*' * 1024))
             cls.glance_client.update_image(image_id, data=image_file)
             waiters.wait_for_image_status(cls.client, image_id, 'ACTIVE')
-            body = cls.client.show_image(image_id)
+            body = cls.client.show_image(image_id)['image']
             return body
 
         # Create non-snapshot images via glance
@@ -106,7 +106,7 @@
         # The list of images should contain only images with the
         # provided status
         params = {'status': 'ACTIVE'}
-        images = self.client.list_images(**params)
+        images = self.client.list_images(**params)['images']
 
         self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
         self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
@@ -117,7 +117,7 @@
         # List of all images should contain the expected images filtered
         # by name
         params = {'name': self.image1['name']}
-        images = self.client.list_images(**params)
+        images = self.client.list_images(**params)['images']
 
         self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
         self.assertFalse(any([i for i in images if i['id'] == self.image2_id]))
@@ -129,7 +129,7 @@
     def test_list_images_filter_by_server_id(self):
         # The images should contain images filtered by server id
         params = {'server': self.server1['id']}
-        images = self.client.list_images(**params)
+        images = self.client.list_images(**params)['images']
 
         self.assertTrue(any([i for i in images
                              if i['id'] == self.snapshot1_id]),
@@ -150,7 +150,7 @@
         # Try all server link types
         for link in server_links:
             params = {'server': link['href']}
-            images = self.client.list_images(**params)
+            images = self.client.list_images(**params)['images']
 
             self.assertFalse(any([i for i in images
                                   if i['id'] == self.snapshot1_id]))
@@ -165,7 +165,7 @@
     def test_list_images_filter_by_type(self):
         # The list of servers should be filtered by image type
         params = {'type': 'snapshot'}
-        images = self.client.list_images(**params)
+        images = self.client.list_images(**params)['images']
 
         self.assertTrue(any([i for i in images
                              if i['id'] == self.snapshot1_id]))
@@ -180,7 +180,7 @@
     def test_list_images_limit_results(self):
         # Verify only the expected number of results are returned
         params = {'limit': '1'}
-        images = self.client.list_images(**params)
+        images = self.client.list_images(**params)['images']
         self.assertEqual(1, len([x for x in images if 'id' in x]))
 
     @test.idempotent_id('18bac3ae-da27-436c-92a9-b22474d13aab')
@@ -190,7 +190,7 @@
         # Becoming ACTIVE will modify the updated time
         # Filter by the image's created time
         params = {'changes-since': self.image3['created']}
-        images = self.client.list_images(**params)
+        images = self.client.list_images(**params)['images']
         found = any([i for i in images if i['id'] == self.image3_id])
         self.assertTrue(found)
 
@@ -199,7 +199,7 @@
         # Detailed list of all images should only contain images
         # with the provided status
         params = {'status': 'ACTIVE'}
-        images = self.client.list_images(detail=True, **params)
+        images = self.client.list_images(detail=True, **params)['images']
 
         self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
         self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
@@ -210,7 +210,7 @@
         # Detailed list of all images should contain the expected
         # images filtered by name
         params = {'name': self.image1['name']}
-        images = self.client.list_images(detail=True, **params)
+        images = self.client.list_images(detail=True, **params)['images']
 
         self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
         self.assertFalse(any([i for i in images if i['id'] == self.image2_id]))
@@ -221,7 +221,7 @@
         # Verify only the expected number of results (with full details)
         # are returned
         params = {'limit': '1'}
-        images = self.client.list_images(detail=True, **params)
+        images = self.client.list_images(detail=True, **params)['images']
         self.assertEqual(1, len(images))
 
     @test.idempotent_id('8c78f822-203b-4bf6-8bba-56ebd551cf84')
@@ -234,7 +234,7 @@
         # Try all server link types
         for link in server_links:
             params = {'server': link['href']}
-            images = self.client.list_images(detail=True, **params)
+            images = self.client.list_images(detail=True, **params)['images']
 
             self.assertFalse(any([i for i in images
                                   if i['id'] == self.snapshot1_id]))
@@ -249,7 +249,7 @@
     def test_list_images_with_detail_filter_by_type(self):
         # The detailed list of servers should be filtered by image type
         params = {'type': 'snapshot'}
-        images = self.client.list_images(detail=True, **params)
+        images = self.client.list_images(detail=True, **params)['images']
         self.client.show_image(self.image_ref)
 
         self.assertTrue(any([i for i in images
@@ -268,5 +268,5 @@
         # Becoming ACTIVE will modify the updated time
         # Filter by the image's created time
         params = {'changes-since': self.image1['created']}
-        images = self.client.list_images(detail=True, **params)
+        images = self.client.list_images(detail=True, **params)['images']
         self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
diff --git a/tempest/api/compute/images/test_list_images.py b/tempest/api/compute/images/test_list_images.py
index b67378c..6ca15d6 100644
--- a/tempest/api/compute/images/test_list_images.py
+++ b/tempest/api/compute/images/test_list_images.py
@@ -37,19 +37,19 @@
     @test.idempotent_id('490d0898-e12a-463f-aef0-c50156b9f789')
     def test_get_image(self):
         # Returns the correct details for a single image
-        image = self.client.show_image(self.image_ref)
+        image = self.client.show_image(self.image_ref)['image']
         self.assertEqual(self.image_ref, image['id'])
 
     @test.idempotent_id('fd51b7f4-d4a3-4331-9885-866658112a6f')
     def test_list_images(self):
         # The list of all images should contain the image
-        images = self.client.list_images()
+        images = self.client.list_images()['images']
         found = any([i for i in images if i['id'] == self.image_ref])
         self.assertTrue(found)
 
     @test.idempotent_id('9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6')
     def test_list_images_with_detail(self):
         # Detailed list of all images should contain the expected images
-        images = self.client.list_images(detail=True)
+        images = self.client.list_images(detail=True)['images']
         found = any([i for i in images if i['id'] == self.image_ref])
         self.assertTrue(found)
diff --git a/tempest/api/compute/keypairs/base.py b/tempest/api/compute/keypairs/base.py
new file mode 100644
index 0000000..76e5573
--- /dev/null
+++ b/tempest/api/compute/keypairs/base.py
@@ -0,0 +1,38 @@
+# Copyright 2015 Deutsche Telekom AG
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+
+
+class BaseKeypairTest(base.BaseComputeTest):
+    """Base test case class for all keypair API tests."""
+
+    _api_version = 2
+
+    @classmethod
+    def setup_clients(cls):
+        super(BaseKeypairTest, cls).setup_clients()
+        cls.client = cls.keypairs_client
+
+    def _delete_keypair(self, keypair_name):
+        self.client.delete_keypair(keypair_name)
+
+    def _create_keypair(self, keypair_name, pub_key=None):
+        kwargs = {'name': keypair_name}
+        if pub_key:
+            kwargs.update({'public_key': pub_key})
+        body = self.client.create_keypair(**kwargs)['keypair']
+        self.addCleanup(self._delete_keypair, keypair_name)
+        return body
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 45eaa97..d10bf14 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -13,28 +13,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.api.compute import base
+from tempest.api.compute.keypairs import base
 from tempest.common.utils import data_utils
 from tempest import test
 
 
-class KeyPairsV2TestJSON(base.BaseComputeTest):
-
-    _api_version = 2
-
-    @classmethod
-    def setup_clients(cls):
-        super(KeyPairsV2TestJSON, cls).setup_clients()
-        cls.client = cls.keypairs_client
-
-    def _delete_keypair(self, keypair_name):
-        self.client.delete_keypair(keypair_name)
-
-    def _create_keypair(self, keypair_name, pub_key=None):
-        body = self.client.create_keypair(keypair_name, pub_key)
-        self.addCleanup(self._delete_keypair, keypair_name)
-        return body
-
+class KeyPairsV2TestJSON(base.BaseKeypairTest):
     @test.idempotent_id('1d1dbedb-d7a0-432a-9d09-83f543c3c19b')
     def test_keypairs_create_list_delete(self):
         # Keypairs created should be available in the response list
@@ -50,9 +34,7 @@
             key_list.append(keypair)
         # Fetch all keypairs and verify the list
         # has all created keypairs
-        fetched_list = self.client.list_keypairs()
-        # We need to remove the extra 'keypair' element in the
-        # returned dict. See comment in keypairs_client.list_keypairs()
+        fetched_list = self.client.list_keypairs()['keypairs']
         new_list = list()
         for keypair in fetched_list:
             new_list.append(keypair['keypair'])
@@ -81,7 +63,7 @@
         # Keypair should be created, Got details by name and deleted
         k_name = data_utils.rand_name('keypair')
         self._create_keypair(k_name)
-        keypair_detail = self.client.show_keypair(k_name)
+        keypair_detail = self.client.show_keypair(k_name)['keypair']
         self.assertIn('name', keypair_detail)
         self.assertIn('public_key', keypair_detail)
         self.assertEqual(keypair_detail['name'], k_name,
diff --git a/tempest/api/compute/keypairs/test_keypairs_negative.py b/tempest/api/compute/keypairs/test_keypairs_negative.py
index 54b07f0..0ab78fb 100644
--- a/tempest/api/compute/keypairs/test_keypairs_negative.py
+++ b/tempest/api/compute/keypairs/test_keypairs_negative.py
@@ -16,22 +16,12 @@
 
 from tempest_lib import exceptions as lib_exc
 
-from tempest.api.compute import base
+from tempest.api.compute.keypairs import base
 from tempest.common.utils import data_utils
 from tempest import test
 
 
-class KeyPairsNegativeTestJSON(base.BaseV2ComputeTest):
-
-    @classmethod
-    def setup_clients(cls):
-        super(KeyPairsNegativeTestJSON, cls).setup_clients()
-        cls.client = cls.keypairs_client
-
-    def _create_keypair(self, keypair_name, pub_key=None):
-        self.client.create_keypair(keypair_name, pub_key)
-        self.addCleanup(self.client.delete_keypair, keypair_name)
-
+class KeyPairsNegativeTestJSON(base.BaseKeypairTest):
     @test.attr(type=['negative'])
     @test.idempotent_id('29cca892-46ae-4d48-bc32-8fe7e731eb81')
     def test_keypair_create_with_invalid_pub_key(self):
@@ -72,7 +62,7 @@
     def test_create_keypair_with_duplicate_name(self):
         # Keypairs with duplicate names should not be created
         k_name = data_utils.rand_name('keypair')
-        self.client.create_keypair(k_name)
+        self.client.create_keypair(name=k_name)
         # Now try the same keyname to create another key
         self.assertRaises(lib_exc.Conflict, self._create_keypair,
                           k_name)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index ff3f25b..3c22d28 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -25,7 +25,7 @@
     @classmethod
     def setup_clients(cls):
         super(SecurityGroupRulesTestJSON, cls).setup_clients()
-        cls.client = cls.security_groups_client
+        cls.client = cls.security_group_rules_client
 
     @classmethod
     def resource_setup(cls):
@@ -69,11 +69,11 @@
         security_group = self.create_security_group()
         securitygroup_id = security_group['id']
         # Adding rules to the created Security Group
-        rule = \
-            self.client.create_security_group_rule(securitygroup_id,
-                                                   self.ip_protocol,
-                                                   self.from_port,
-                                                   self.to_port)
+        rule = self.client.create_security_group_rule(
+            parent_group_id=securitygroup_id,
+            ip_protocol=self.ip_protocol,
+            from_port=self.from_port,
+            to_port=self.to_port)['security_group_rule']
         self.expected['parent_group_id'] = securitygroup_id
         self.expected['ip_range'] = {'cidr': '0.0.0.0/0'}
         self._check_expected_response(rule)
@@ -91,12 +91,12 @@
 
         # Adding rules to the created Security Group with optional cidr
         cidr = '10.2.3.124/24'
-        rule = \
-            self.client.create_security_group_rule(parent_group_id,
-                                                   self.ip_protocol,
-                                                   self.from_port,
-                                                   self.to_port,
-                                                   cidr=cidr)
+        rule = self.client.create_security_group_rule(
+            parent_group_id=parent_group_id,
+            ip_protocol=self.ip_protocol,
+            from_port=self.from_port,
+            to_port=self.to_port,
+            cidr=cidr)['security_group_rule']
         self.expected['parent_group_id'] = parent_group_id
         self.expected['ip_range'] = {'cidr': cidr}
         self._check_expected_response(rule)
@@ -118,12 +118,12 @@
         group_name = security_group['name']
 
         # Adding rules to the created Security Group with optional group_id
-        rule = \
-            self.client.create_security_group_rule(parent_group_id,
-                                                   self.ip_protocol,
-                                                   self.from_port,
-                                                   self.to_port,
-                                                   group_id=group_id)
+        rule = self.client.create_security_group_rule(
+            parent_group_id=parent_group_id,
+            ip_protocol=self.ip_protocol,
+            from_port=self.from_port,
+            to_port=self.to_port,
+            group_id=group_id)['security_group_rule']
         self.expected['parent_group_id'] = parent_group_id
         self.expected['group'] = {'tenant_id': self.client.tenant_id,
                                   'name': group_name}
@@ -140,28 +140,29 @@
         securitygroup_id = security_group['id']
 
         # Add a first rule to the created Security Group
-        rule = \
-            self.client.create_security_group_rule(securitygroup_id,
-                                                   self.ip_protocol,
-                                                   self.from_port,
-                                                   self.to_port)
+        rule = self.client.create_security_group_rule(
+            parent_group_id=securitygroup_id,
+            ip_protocol=self.ip_protocol,
+            from_port=self.from_port,
+            to_port=self.to_port)['security_group_rule']
         rule1_id = rule['id']
 
         # Add a second rule to the created Security Group
         ip_protocol2 = 'icmp'
         from_port2 = -1
         to_port2 = -1
-        rule = \
-            self.client.create_security_group_rule(securitygroup_id,
-                                                   ip_protocol2,
-                                                   from_port2, to_port2)
+        rule = self.client.create_security_group_rule(
+            parent_group_id=securitygroup_id,
+            ip_protocol=ip_protocol2,
+            from_port=from_port2,
+            to_port=to_port2)['security_group_rule']
         rule2_id = rule['id']
         # Delete the Security Group rule2 at the end of this method
         self.addCleanup(self.client.delete_security_group_rule, rule2_id)
 
         # Get rules of the created Security Group
-        rules = \
-            self.client.list_security_group_rules(securitygroup_id)
+        rules = (self.client.list_security_group_rules(securitygroup_id)
+                 ['rules'])
         self.assertTrue(any([i for i in rules if i['id'] == rule1_id]))
         self.assertTrue(any([i for i in rules if i['id'] == rule2_id]))
 
@@ -176,16 +177,16 @@
         security_group = self.create_security_group()
         sg2_id = security_group['id']
         # Adding rules to the Group1
-        self.client.create_security_group_rule(sg1_id,
-                                               self.ip_protocol,
-                                               self.from_port,
-                                               self.to_port,
-                                               group_id=sg2_id)
+        self.client.create_security_group_rule(
+            parent_group_id=sg1_id,
+            ip_protocol=self.ip_protocol,
+            from_port=self.from_port,
+            to_port=self.to_port,
+            group_id=sg2_id)['security_group_rule']
 
         # Delete group2
-        self.client.delete_security_group(sg2_id)
+        self.security_groups_client.delete_security_group(sg2_id)
         # Get rules of the Group1
-        rules = \
-            self.client.list_security_group_rules(sg1_id)
+        rules = self.client.list_security_group_rules(sg1_id)['rules']
         # The group1 has no rules because group2 has deleted
         self.assertEqual(0, len(rules))
diff --git a/tempest/api/compute/security_groups/test_security_group_rules_negative.py b/tempest/api/compute/security_groups/test_security_group_rules_negative.py
index 15e79ac..816038a 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules_negative.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules_negative.py
@@ -36,6 +36,7 @@
     def setup_clients(cls):
         super(SecurityGroupRulesNegativeTestJSON, cls).setup_clients()
         cls.client = cls.security_groups_client
+        cls.rules_client = cls.security_group_rules_client
 
     @test.attr(type=['negative'])
     @test.idempotent_id('1d507e98-7951-469b-82c3-23f1e6b8c254')
@@ -49,8 +50,10 @@
         from_port = 22
         to_port = 22
         self.assertRaises(lib_exc.NotFound,
-                          self.client.create_security_group_rule,
-                          parent_group_id, ip_protocol, from_port, to_port)
+                          self.rules_client.create_security_group_rule,
+                          parent_group_id=parent_group_id,
+                          ip_protocol=ip_protocol, from_port=from_port,
+                          to_port=to_port)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('2244d7e4-adb7-4ecb-9930-2d77e123ce4f')
@@ -64,8 +67,10 @@
         from_port = 22
         to_port = 22
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group_rule,
-                          parent_group_id, ip_protocol, from_port, to_port)
+                          self.rules_client.create_security_group_rule,
+                          parent_group_id=parent_group_id,
+                          ip_protocol=ip_protocol, from_port=from_port,
+                          to_port=to_port)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('8bd56d02-3ffa-4d67-9933-b6b9a01d6089')
@@ -80,16 +85,17 @@
         from_port = 22
         to_port = 22
 
-        rule = \
-            self.client.create_security_group_rule(parent_group_id,
-                                                   ip_protocol,
-                                                   from_port,
-                                                   to_port)
-        self.addCleanup(self.client.delete_security_group_rule, rule['id'])
+        rule = self.rules_client.create_security_group_rule(
+            parent_group_id=parent_group_id, ip_protocol=ip_protocol,
+            from_port=from_port, to_port=to_port)['security_group_rule']
+        self.addCleanup(self.rules_client.delete_security_group_rule,
+                        rule['id'])
         # Add the same rule to the group should fail
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group_rule,
-                          parent_group_id, ip_protocol, from_port, to_port)
+                          self.rules_client.create_security_group_rule,
+                          parent_group_id=parent_group_id,
+                          ip_protocol=ip_protocol, from_port=from_port,
+                          to_port=to_port)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('84c81249-9f6e-439c-9bbf-cbb0d2cddbdf')
@@ -106,8 +112,10 @@
         to_port = 22
 
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group_rule,
-                          parent_group_id, ip_protocol, from_port, to_port)
+                          self.rules_client.create_security_group_rule,
+                          parent_group_id=parent_group_id,
+                          ip_protocol=ip_protocol, from_port=from_port,
+                          to_port=to_port)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('12bbc875-1045-4f7a-be46-751277baedb9')
@@ -123,8 +131,10 @@
         from_port = data_utils.rand_int_id(start=65536)
         to_port = 22
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group_rule,
-                          parent_group_id, ip_protocol, from_port, to_port)
+                          self.rules_client.create_security_group_rule,
+                          parent_group_id=parent_group_id,
+                          ip_protocol=ip_protocol, from_port=from_port,
+                          to_port=to_port)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('ff88804d-144f-45d1-bf59-dd155838a43a')
@@ -140,8 +150,10 @@
         from_port = 22
         to_port = data_utils.rand_int_id(start=65536)
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group_rule,
-                          parent_group_id, ip_protocol, from_port, to_port)
+                          self.rules_client.create_security_group_rule,
+                          parent_group_id=parent_group_id,
+                          ip_protocol=ip_protocol, from_port=from_port,
+                          to_port=to_port)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('00296fa9-0576-496a-ae15-fbab843189e0')
@@ -157,8 +169,10 @@
         from_port = 22
         to_port = 21
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group_rule,
-                          secgroup_id, ip_protocol, from_port, to_port)
+                          self.rules_client.create_security_group_rule,
+                          parent_group_id=secgroup_id,
+                          ip_protocol=ip_protocol, from_port=from_port,
+                          to_port=to_port)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('56fddcca-dbb8-4494-a0db-96e9f869527c')
@@ -168,5 +182,5 @@
         # with non existent id
         non_existent_rule_id = not_existing_id()
         self.assertRaises(lib_exc.NotFound,
-                          self.client.delete_security_group_rule,
+                          self.rules_client.delete_security_group_rule,
                           non_existent_rule_id)
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index bd252b0..dbbeb70 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -40,7 +40,7 @@
             security_group_list.append(body)
         # Fetch all Security Groups and verify the list
         # has all created Security Groups
-        fetched_list = self.client.list_security_groups()
+        fetched_list = self.client.list_security_groups()['security_groups']
         # Now check if all the created Security Groups are in fetched list
         missing_sgs = \
             [sg for sg in security_group_list if sg not in fetched_list]
@@ -53,7 +53,7 @@
             self.client.delete_security_group(sg['id'])
             self.client.wait_for_resource_deletion(sg['id'])
         # Now check if all the created Security Groups are deleted
-        fetched_list = self.client.list_security_groups()
+        fetched_list = self.client.list_security_groups()['security_groups']
         deleted_sgs = \
             [sg for sg in security_group_list if sg in fetched_list]
         self.assertFalse(deleted_sgs,
@@ -75,8 +75,8 @@
                          "The created Security Group name is "
                          "not equal to the requested name")
         # Now fetch the created Security Group by its 'id'
-        fetched_group = \
-            self.client.show_security_group(securitygroup['id'])
+        fetched_group = (self.client.show_security_group(securitygroup['id'])
+                         ['security_group'])
         self.assertEqual(securitygroup, fetched_group,
                          "The fetched Security Group is different "
                          "from the created Group")
@@ -109,7 +109,7 @@
                           sg['id'])
 
         # Reboot and add the other security group
-        self.servers_client.reboot(server_id, 'HARD')
+        self.servers_client.reboot_server(server_id, 'HARD')
         waiters.wait_for_server_status(self.servers_client, server_id,
                                        'ACTIVE')
         self.servers_client.add_security_group(server_id, sg2['name'])
@@ -123,7 +123,7 @@
         # Shutdown the server and then verify we can destroy the
         # security groups, since no active server instance is using them
         self.servers_client.delete_server(server_id)
-        self.servers_client.wait_for_server_termination(server_id)
+        waiters.wait_for_server_termination(self.servers_client, server_id)
 
         self.client.delete_security_group(sg['id'])
         self.client.delete_security_group(sg2['id'])
@@ -143,7 +143,7 @@
                                           name=s_new_name,
                                           description=s_new_des)
         # get the security group
-        fetched_group = \
-            self.client.show_security_group(securitygroup_id)
+        fetched_group = (self.client.show_security_group(securitygroup_id)
+                         ['security_group'])
         self.assertEqual(s_new_name, fetched_group['name'])
         self.assertEqual(s_new_des, fetched_group['description'])
diff --git a/tempest/api/compute/security_groups/test_security_groups_negative.py b/tempest/api/compute/security_groups/test_security_groups_negative.py
index d8cbe3d..120d327 100644
--- a/tempest/api/compute/security_groups/test_security_groups_negative.py
+++ b/tempest/api/compute/security_groups/test_security_groups_negative.py
@@ -39,7 +39,7 @@
 
     def _generate_a_non_existent_security_group_id(self):
         security_group_id = []
-        body = self.client.list_security_groups()
+        body = self.client.list_security_groups()['security_groups']
         for i in range(len(body)):
             security_group_id.append(body[i]['id'])
         # Generate a non-existent security group id
@@ -72,16 +72,17 @@
         s_description = data_utils.rand_name('description')
         # Create Security Group with empty string as group name
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group, "", s_description)
+                          self.client.create_security_group,
+                          name="", description=s_description)
         # Create Security Group with white space in group name
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group, " ",
-                          s_description)
+                          self.client.create_security_group,
+                          name=" ", description=s_description)
         # Create Security Group with group name longer than 255 chars
         s_name = 'securitygroup-'.ljust(260, '0')
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group, s_name,
-                          s_description)
+                          self.client.create_security_group,
+                          name=s_name, description=s_description)
 
     @decorators.skip_because(bug="1161411",
                              condition=CONF.service_available.neutron)
@@ -96,8 +97,8 @@
         # Create Security Group with group description longer than 255 chars
         s_description = 'description-'.ljust(260, '0')
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group, s_name,
-                          s_description)
+                          self.client.create_security_group,
+                          name=s_name, description=s_description)
 
     @test.idempotent_id('9fdb4abc-6b66-4b27-b89c-eb215a956168')
     @testtools.skipIf(CONF.service_available.neutron,
@@ -109,11 +110,11 @@
         # be created
         s_name = data_utils.rand_name('securitygroup')
         s_description = data_utils.rand_name('description')
-        self.create_security_group(s_name, s_description)
+        self.create_security_group(name=s_name, description=s_description)
         # Now try the Security Group with the same 'Name'
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group, s_name,
-                          s_description)
+                          self.client.create_security_group,
+                          name=s_name, description=s_description)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('36a1629f-c6da-4a26-b8b8-55e7e5d5cd58')
@@ -121,7 +122,7 @@
     def test_delete_the_default_security_group(self):
         # Negative test:Deletion of the "default" Security Group should Fail
         default_security_group_id = None
-        body = self.client.list_security_groups()
+        body = self.client.list_security_groups()['security_groups']
         for i in range(len(body)):
             if body[i]['name'] == 'default':
                 default_security_group_id = body[i]['id']
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 396327b..dcdb562 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -48,13 +48,15 @@
 
     def wait_for_interface_status(self, server, port_id, status):
         """Waits for a interface to reach a given status."""
-        body = self.client.show_interface(server, port_id)
+        body = (self.client.show_interface(server, port_id)
+                ['interfaceAttachment'])
         interface_status = body['port_state']
         start = int(time.time())
 
         while(interface_status != status):
             time.sleep(self.build_interval)
-            body = self.client.show_interface(server, port_id)
+            body = (self.client.show_interface(server, port_id)
+                    ['interfaceAttachment'])
             interface_status = body['port_state']
 
             timed_out = int(time.time()) - start >= self.build_timeout
@@ -82,14 +84,16 @@
 
     def _create_server_get_interfaces(self):
         server = self.create_test_server(wait_until='ACTIVE')
-        ifs = self.client.list_interfaces(server['id'])
+        ifs = (self.client.list_interfaces(server['id'])
+               ['interfaceAttachments'])
         body = self.wait_for_interface_status(
             server['id'], ifs[0]['port_id'], 'ACTIVE')
         ifs[0]['port_state'] = body['port_state']
         return server, ifs
 
     def _test_create_interface(self, server):
-        iface = self.client.create_interface(server['id'])
+        iface = (self.client.create_interface(server['id'])
+                 ['interfaceAttachment'])
         iface = self.wait_for_interface_status(
             server['id'], iface['port_id'], 'ACTIVE')
         self._check_interface(iface)
@@ -97,8 +101,8 @@
 
     def _test_create_interface_by_network_id(self, server, ifs):
         network_id = ifs[0]['net_id']
-        iface = self.client.create_interface(server['id'],
-                                             network_id=network_id)
+        iface = self.client.create_interface(
+            server['id'], net_id=network_id)['interfaceAttachment']
         iface = self.wait_for_interface_status(
             server['id'], iface['port_id'], 'ACTIVE')
         self._check_interface(iface, network_id=network_id)
@@ -106,8 +110,8 @@
 
     def _test_show_interface(self, server, ifs):
         iface = ifs[0]
-        _iface = self.client.show_interface(server['id'],
-                                            iface['port_id'])
+        _iface = self.client.show_interface(
+            server['id'], iface['port_id'])['interfaceAttachment']
         self._check_interface(iface, port_id=_iface['port_id'],
                               network_id=_iface['net_id'],
                               fixed_ip=_iface['fixed_ips'][0]['ip_address'],
@@ -117,12 +121,14 @@
         # NOTE(danms): delete not the first or last, but one in the middle
         iface = ifs[1]
         self.client.delete_interface(server['id'], iface['port_id'])
-        _ifs = self.client.list_interfaces(server['id'])
+        _ifs = (self.client.list_interfaces(server['id'])
+                ['interfaceAttachments'])
         start = int(time.time())
 
         while len(ifs) == len(_ifs):
             time.sleep(self.build_interval)
-            _ifs = self.client.list_interfaces(server['id'])
+            _ifs = (self.client.list_interfaces(server['id'])
+                    ['interfaceAttachments'])
             timed_out = int(time.time()) - start >= self.build_timeout
             if len(ifs) == len(_ifs) and timed_out:
                 message = ('Failed to delete interface within '
@@ -161,7 +167,8 @@
         iface = self._test_create_interface_by_network_id(server, ifs)
         ifs.append(iface)
 
-        _ifs = self.client.list_interfaces(server['id'])
+        _ifs = (self.client.list_interfaces(server['id'])
+                ['interfaceAttachments'])
         self._compare_iface_list(ifs, _ifs)
 
         self._test_show_interface(server, ifs)
@@ -179,7 +186,7 @@
         self.assertTrue(interface_count > 0)
         self._check_interface(ifs[0])
         network_id = ifs[0]['net_id']
-        self.client.add_fixed_ip(server['id'], network_id)
+        self.client.add_fixed_ip(server['id'], networkId=network_id)
         # Remove the fixed IP from server.
         server_detail = self.os.servers_client.show_server(
             server['id'])
@@ -192,4 +199,4 @@
                     break
             if fixed_ip is not None:
                 break
-        self.client.remove_fixed_ip(server['id'], fixed_ip)
+        self.client.remove_fixed_ip(server['id'], address=fixed_ip)
diff --git a/tempest/api/compute/servers/test_availability_zone.py b/tempest/api/compute/servers/test_availability_zone.py
index d1fbcec..080441a 100644
--- a/tempest/api/compute/servers/test_availability_zone.py
+++ b/tempest/api/compute/servers/test_availability_zone.py
@@ -32,4 +32,4 @@
     def test_get_availability_zone_list_with_non_admin_user(self):
         # List of availability zone with non-administrator user
         availability_zone = self.client.list_availability_zones()
-        self.assertTrue(len(availability_zone) > 0)
+        self.assertTrue(len(availability_zone['availabilityZoneInfo']) > 0)
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 39447b8..35f2b2c 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -21,6 +21,7 @@
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
 from tempest.common.utils.linux import remote_client
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -65,6 +66,20 @@
         cls.password = cls.server_initial['adminPass']
         cls.server = cls.client.show_server(cls.server_initial['id'])
 
+    def _create_net_subnet_ret_net_from_cidr(self, cidr):
+        name_net = data_utils.rand_name(self.__class__.__name__)
+        net = self.network_client.create_network(name=name_net)
+        self.addCleanup(self.network_client.delete_network,
+                        net['network']['id'])
+
+        subnet = self.network_client.create_subnet(
+            network_id=net['network']['id'],
+            cidr=cidr,
+            ip_version=4)
+        self.addCleanup(self.network_client.delete_subnet,
+                        subnet['subnet']['id'])
+        return net
+
     @test.attr(type='smoke')
     @test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
     def test_verify_server_details(self):
@@ -102,7 +117,7 @@
     def test_verify_created_server_vcpus(self):
         # Verify that the number of vcpus reported by the instance matches
         # the amount stated by the flavor
-        flavor = self.flavors_client.show_flavor(self.flavor_ref)
+        flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(self.server),
             self.ssh_user,
@@ -128,7 +143,7 @@
         name = data_utils.rand_name('server_group')
         policies = ['affinity']
         body = self.server_groups_client.create_server_group(
-            name=name, policies=policies)
+            name=name, policies=policies)['server_group']
         group_id = body['id']
         self.addCleanup(self.server_groups_client.delete_server_group,
                         group_id)
@@ -138,7 +153,8 @@
                                          wait_until='ACTIVE')
 
         # Check a server is in the group
-        server_group = self.server_groups_client.get_server_group(group_id)
+        server_group = (self.server_groups_client.get_server_group(group_id)
+                        ['server_group'])
         self.assertIn(server['id'], server_group['members'])
 
     @test.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
@@ -147,29 +163,8 @@
     def test_verify_multiple_nics_order(self):
         # Verify that the networks order given at the server creation is
         # preserved within the server.
-        name_net1 = data_utils.rand_name(self.__class__.__name__)
-        net1 = self.network_client.create_network(name=name_net1)
-        self.addCleanup(self.network_client.delete_network,
-                        net1['network']['id'])
-
-        name_net2 = data_utils.rand_name(self.__class__.__name__)
-        net2 = self.network_client.create_network(name=name_net2)
-        self.addCleanup(self.network_client.delete_network,
-                        net2['network']['id'])
-
-        subnet1 = self.network_client.create_subnet(
-            network_id=net1['network']['id'],
-            cidr='19.80.0.0/24',
-            ip_version=4)
-        self.addCleanup(self.network_client.delete_subnet,
-                        subnet1['subnet']['id'])
-
-        subnet2 = self.network_client.create_subnet(
-            network_id=net2['network']['id'],
-            cidr='19.86.0.0/24',
-            ip_version=4)
-        self.addCleanup(self.network_client.delete_subnet,
-                        subnet2['subnet']['id'])
+        net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
+        net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
 
         networks = [{'uuid': net1['network']['id']},
                     {'uuid': net2['network']['id']}]
@@ -185,7 +180,8 @@
         # we're OK.
         def cleanup_server():
             self.client.delete_server(server_multi_nics['id'])
-            self.client.wait_for_server_termination(server_multi_nics['id'])
+            waiters.wait_for_server_termination(self.client,
+                                                server_multi_nics['id'])
 
         self.addCleanup(cleanup_server)
 
@@ -196,13 +192,51 @@
         # other times ['19.80.0.3', '19.86.0.3']. So we check if the first
         # address is in first network, similarly second address is in second
         # network.
-        addr = [addresses[name_net1][0]['addr'],
-                addresses[name_net2][0]['addr']]
+        addr = [addresses[net1['network']['name']][0]['addr'],
+                addresses[net2['network']['name']][0]['addr']]
         networks = [netaddr.IPNetwork('19.80.0.0/24'),
                     netaddr.IPNetwork('19.86.0.0/24')]
         for address, network in zip(addr, networks):
             self.assertIn(address, network)
 
+    @test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
+    @testtools.skipUnless(CONF.service_available.neutron,
+                          'Neutron service must be available.')
+    # The below skipUnless should be removed once Kilo-eol happens.
+    @testtools.skipUnless(CONF.compute_feature_enabled.
+                          allow_duplicate_networks,
+                          'Duplicate networks must be allowed')
+    def test_verify_duplicate_network_nics(self):
+        # Verify that server creation does not fail when more than one nic
+        # is created on the same network.
+        net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
+        net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+
+        networks = [{'uuid': net1['network']['id']},
+                    {'uuid': net2['network']['id']},
+                    {'uuid': net1['network']['id']}]
+
+        server_multi_nics = self.create_test_server(
+            networks=networks, wait_until='ACTIVE')
+
+        def cleanup_server():
+            self.client.delete_server(server_multi_nics['id'])
+            waiters.wait_for_server_termination(self.client,
+                                                server_multi_nics['id'])
+
+        self.addCleanup(cleanup_server)
+
+        addresses = self.client.list_addresses(server_multi_nics['id'])
+
+        addr = [addresses[net1['network']['name']][0]['addr'],
+                addresses[net2['network']['name']][0]['addr'],
+                addresses[net1['network']['name']][1]['addr']]
+        networks = [netaddr.IPNetwork('19.80.0.0/24'),
+                    netaddr.IPNetwork('19.86.0.0/24'),
+                    netaddr.IPNetwork('19.80.0.0/24')]
+        for address, network in zip(addr, networks):
+            self.assertIn(address, network)
+
 
 class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
     disk_config = 'AUTO'
@@ -239,10 +273,10 @@
 
             # Create a flavor with extra specs
             flavor = (self.flavor_client.
-                      create_flavor(flavor_with_eph_disk_name,
-                                    ram, vcpus, disk,
-                                    flavor_with_eph_disk_id,
-                                    ephemeral=1))
+                      create_flavor(name=flavor_with_eph_disk_name,
+                                    ram=ram, vcpus=vcpus, disk=disk,
+                                    id=flavor_with_eph_disk_id,
+                                    ephemeral=1))['flavor']
             self.addCleanup(flavor_clean_up, flavor['id'])
 
             return flavor['id']
@@ -257,9 +291,9 @@
 
             # Create a flavor without extra specs
             flavor = (self.flavor_client.
-                      create_flavor(flavor_no_eph_disk_name,
-                                    ram, vcpus, disk,
-                                    flavor_no_eph_disk_id))
+                      create_flavor(name=flavor_no_eph_disk_name,
+                                    ram=ram, vcpus=vcpus, disk=disk,
+                                    id=flavor_no_eph_disk_id))['flavor']
             self.addCleanup(flavor_clean_up, flavor['id'])
 
             return flavor['id']
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index b2acd34..cf0cf97 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -38,14 +38,14 @@
         # Delete a server while it's VM state is Building
         server = self.create_test_server(wait_until='BUILD')
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
 
     @test.idempotent_id('925fdfb4-5b13-47ea-ac8a-c36ae6fddb05')
     def test_delete_active_server(self):
         # Delete a server while it's VM state is Active
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
 
     @test.idempotent_id('546d368c-bb6c-4645-979a-83ed16f3a6be')
     def test_delete_server_while_in_shutoff_state(self):
@@ -54,7 +54,7 @@
         self.client.stop(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'SHUTOFF')
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
 
     @test.idempotent_id('943bd6e8-4d7a-4904-be83-7a6cc2d4213b')
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
@@ -65,7 +65,7 @@
         self.client.pause_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
 
     @test.idempotent_id('1f82ebd3-8253-4f4e-b93f-de9b7df56d8b')
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
@@ -76,7 +76,7 @@
         self.client.suspend_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'SUSPENDED')
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
 
     @test.idempotent_id('bb0cb402-09dd-4947-b6e5-5e7e1cfa61ad')
     @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
@@ -95,7 +95,7 @@
             waiters.wait_for_server_status(self.client, server['id'],
                                            'SHELVED')
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
 
     @test.idempotent_id('ab0c38b4-cdd8-49d3-9b92-0cb898723c01')
     @testtools.skipIf(not CONF.compute_feature_enabled.resize,
@@ -103,11 +103,11 @@
     def test_delete_server_while_in_verify_resize_state(self):
         # Delete a server while it's VM state is VERIFY_RESIZE
         server = self.create_test_server(wait_until='ACTIVE')
-        self.client.resize(server['id'], self.flavor_ref_alt)
+        self.client.resize_server(server['id'], self.flavor_ref_alt)
         waiters.wait_for_server_status(self.client, server['id'],
                                        'VERIFY_RESIZE')
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
 
     @test.idempotent_id('d0f3f0d6-d9b6-4a32-8da4-23015dcab23c')
     @test.services('volume')
@@ -117,18 +117,18 @@
         device = '/dev/%s' % CONF.compute.volume_device_name
         server = self.create_test_server(wait_until='ACTIVE')
 
-        volume = volumes_client.create_volume()
+        volume = volumes_client.create_volume(size=CONF.volume.volume_size)
         self.addCleanup(volumes_client.delete_volume, volume['id'])
         waiters.wait_for_volume_status(volumes_client,
                                        volume['id'], 'available')
         self.client.attach_volume(server['id'],
-                                  volume['id'],
+                                  volumeId=volume['id'],
                                   device=device)
         waiters.wait_for_volume_status(volumes_client,
                                        volume['id'], 'in-use')
 
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
         waiters.wait_for_volume_status(volumes_client,
                                        volume['id'], 'available')
 
@@ -152,12 +152,13 @@
         server = self.non_admin_client.show_server(server['id'])
         self.assertEqual(server['status'], 'ERROR')
         self.non_admin_client.delete_server(server['id'])
-        self.servers_client.wait_for_server_termination(server['id'],
-                                                        ignore_error=True)
+        waiters.wait_for_server_termination(self.servers_client,
+                                            server['id'],
+                                            ignore_error=True)
 
     @test.idempotent_id('73177903-6737-4f27-a60c-379e8ae8cf48')
     def test_admin_delete_servers_of_others(self):
         # Administrator can delete servers of others
         server = self.create_test_server(wait_until='ACTIVE')
         self.admin_client.delete_server(server['id'])
-        self.servers_client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
diff --git a/tempest/api/compute/servers/test_disk_config.py b/tempest/api/compute/servers/test_disk_config.py
index 3e8a0d2..5b9338a 100644
--- a/tempest/api/compute/servers/test_disk_config.py
+++ b/tempest/api/compute/servers/test_disk_config.py
@@ -57,9 +57,9 @@
         # A server should be rebuilt using the manual disk config option
         self._update_server_with_disk_config(disk_config='AUTO')
 
-        server = self.client.rebuild(self.server_id,
-                                     self.image_ref_alt,
-                                     disk_config='MANUAL')
+        server = self.client.rebuild_server(self.server_id,
+                                            self.image_ref_alt,
+                                            disk_config='MANUAL')
 
         # Wait for the server to become active
         waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
@@ -73,9 +73,9 @@
         # A server should be rebuilt using the auto disk config option
         self._update_server_with_disk_config(disk_config='MANUAL')
 
-        server = self.client.rebuild(self.server_id,
-                                     self.image_ref_alt,
-                                     disk_config='AUTO')
+        server = self.client.rebuild_server(self.server_id,
+                                            self.image_ref_alt,
+                                            disk_config='AUTO')
 
         # Wait for the server to become active
         waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
@@ -101,7 +101,8 @@
 
         # Resize with auto option
         flavor_id = self._get_alternative_flavor()
-        self.client.resize(self.server_id, flavor_id, disk_config='AUTO')
+        self.client.resize_server(self.server_id, flavor_id,
+                                  disk_config='AUTO')
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'VERIFY_RESIZE')
         self.client.confirm_resize(self.server_id)
@@ -119,7 +120,8 @@
 
         # Resize with manual option
         flavor_id = self._get_alternative_flavor()
-        self.client.resize(self.server_id, flavor_id, disk_config='MANUAL')
+        self.client.resize_server(self.server_id, flavor_id,
+                                  disk_config='MANUAL')
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'VERIFY_RESIZE')
         self.client.confirm_resize(self.server_id)
diff --git a/tempest/api/compute/servers/test_instance_actions.py b/tempest/api/compute/servers/test_instance_actions.py
index dc126a5..4ac1a49 100644
--- a/tempest/api/compute/servers/test_instance_actions.py
+++ b/tempest/api/compute/servers/test_instance_actions.py
@@ -35,7 +35,7 @@
     @test.idempotent_id('77ca5cc5-9990-45e0-ab98-1de8fead201a')
     def test_list_instance_actions(self):
         # List actions of the provided server
-        self.client.reboot(self.server_id, 'HARD')
+        self.client.reboot_server(self.server_id, 'HARD')
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
 
         body = self.client.list_instance_actions(self.server_id)
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index a75cb3e..5c63557 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -44,7 +44,7 @@
 
         # Check to see if the alternate image ref actually exists...
         images_client = cls.images_client
-        images = images_client.list_images()
+        images = images_client.list_images()['images']
 
         if cls.image_ref != cls.image_ref_alt and \
             any([image for image in images
@@ -305,12 +305,20 @@
             params = {'ip': ip}
         else:
             params = {'ip6': ip}
+        # capture all servers in case something goes wrong
+        all_servers = self.client.list_servers(detail=True)
         body = self.client.list_servers(**params)
         servers = body['servers']
 
-        self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
-        self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
-        self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
+        self.assertIn(self.s1_name, map(lambda x: x['name'], servers),
+                      "%s not found in %s, all servers %s" %
+                      (self.s1_name, servers, all_servers))
+        self.assertIn(self.s2_name, map(lambda x: x['name'], servers),
+                      "%s not found in %s, all servers %s" %
+                      (self.s2_name, servers, all_servers))
+        self.assertIn(self.s3_name, map(lambda x: x['name'], servers),
+                      "%s not found in %s, all servers %s" %
+                      (self.s3_name, servers, all_servers))
 
     @test.idempotent_id('67aec2d0-35fe-4503-9f92-f13272b867ed')
     def test_list_servers_detailed_limit_results(self):
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index def6cf5..f205ddf 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -17,6 +17,7 @@
 from tempest_lib import exceptions as lib_exc
 
 from tempest.api.compute import base
+from tempest.common import waiters
 from tempest import test
 
 
@@ -47,8 +48,8 @@
         # be put into ERROR status on a quick spawn, then delete,
         # as the compute node expects the instance local status
         # to be spawning, not deleted. See LP Bug#1061167
-        cls.client.wait_for_server_termination(srv['id'],
-                                               ignore_error=True)
+        waiters.wait_for_server_termination(cls.client, srv['id'],
+                                            ignore_error=True)
         cls.deleted_fixtures.append(srv)
 
     @test.attr(type=['negative'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index f0f6b8c..6b3b661 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -105,7 +105,7 @@
                 self.validation_resources['keypair']['private_key'])
             boot_time = linux_client.get_boot_time()
 
-        self.client.reboot(self.server_id, reboot_type)
+        self.client.reboot_server(self.server_id, reboot_type)
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
 
         if CONF.validation.run_validation:
@@ -132,7 +132,7 @@
         self._test_reboot_server('SOFT')
 
     def _rebuild_server_and_check(self, image_ref):
-        rebuilt_server = self.client.rebuild(self.server_id, image_ref)
+        rebuilt_server = self.client.rebuild_server(self.server_id, image_ref)
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
         msg = ('Server was not rebuilt to the original image. '
                'The original image: {0}. The current image: {1}'
@@ -148,12 +148,12 @@
         personality = [{'path': 'rebuild.txt',
                        'contents': base64.b64encode(file_contents)}]
         password = 'rebuildPassw0rd'
-        rebuilt_server = self.client.rebuild(self.server_id,
-                                             self.image_ref_alt,
-                                             name=new_name,
-                                             metadata=meta,
-                                             personality=personality,
-                                             adminPass=password)
+        rebuilt_server = self.client.rebuild_server(self.server_id,
+                                                    self.image_ref_alt,
+                                                    name=new_name,
+                                                    metadata=meta,
+                                                    personality=personality,
+                                                    adminPass=password)
 
         # If the server was rebuilt on a different image, restore it to the
         # original image once the test ends
@@ -193,7 +193,7 @@
                      if old_image == self.image_ref else self.image_ref)
         self.client.stop(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
-        rebuilt_server = self.client.rebuild(self.server_id, new_image)
+        rebuilt_server = self.client.rebuild_server(self.server_id, new_image)
         # If the server was rebuilt on a different image, restore it to the
         # original image once the test ends
         if self.image_ref_alt != self.image_ref:
@@ -223,7 +223,7 @@
             waiters.wait_for_server_status(self.client, self.server_id,
                                            'SHUTOFF')
 
-        self.client.resize(self.server_id, self.flavor_ref_alt)
+        self.client.resize_server(self.server_id, self.flavor_ref_alt)
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'VERIFY_RESIZE')
 
@@ -262,7 +262,7 @@
         # The server's RAM and disk space should return to its original
         # values after a resize is reverted
 
-        self.client.resize(self.server_id, self.flavor_ref_alt)
+        self.client.resize_server(self.server_id, self.flavor_ref_alt)
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'VERIFY_RESIZE')
 
@@ -323,7 +323,7 @@
             properties=properties,
             status='active',
             sort_key='created_at',
-            sort_dir='asc')
+            sort_dir='asc')['images']
         self.assertEqual(2, len(image_list))
         self.assertEqual((backup1, backup2),
                          (image_list[0]['name'], image_list[1]['name']))
@@ -347,7 +347,7 @@
             properties=properties,
             status='active',
             sort_key='created_at',
-            sort_dir='asc')
+            sort_dir='asc')['images']
         self.assertEqual(2, len(image_list),
                          'Unexpected number of images for '
                          'v2:test_create_backup; was the oldest backup not '
@@ -375,7 +375,7 @@
         # log file is truncated and we cannot get any console log through
         # "console-log" API.
         # The detail is https://bugs.launchpad.net/nova/+bug/1251920
-        self.client.reboot(self.server_id, 'HARD')
+        self.client.reboot_server(self.server_id, 'HARD')
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
         self.wait_for(self._get_output)
 
@@ -456,7 +456,7 @@
         server = self.client.show_server(self.server_id)
         image_name = server['name'] + '-shelved'
         params = {'name': image_name}
-        images = self.images_client.list_images(**params)
+        images = self.images_client.list_images(**params)['images']
         self.assertEqual(1, len(images))
         self.assertEqual(image_name, images[0]['name'])
 
@@ -474,6 +474,7 @@
     def test_lock_unlock_server(self):
         # Lock the server,try server stop(exceptions throw),unlock it and retry
         self.client.lock_server(self.server_id)
+        self.addCleanup(self.client.unlock_server, self.server_id)
         server = self.client.show_server(self.server_id)
         self.assertEqual(server['status'], 'ACTIVE')
         # Locked server is not allowed to be stopped by non-admin user
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 0e7c1eb..0da7912 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -61,7 +61,7 @@
         # delete the test server-group
         self.client.delete_server_group(server_group['id'])
         # validation of server-group deletion
-        server_group_list = self.client.list_server_groups()
+        server_group_list = self.client.list_server_groups()['server_groups']
         self.assertNotIn(server_group, server_group_list)
 
     def _create_delete_server_group(self, policy):
@@ -107,11 +107,11 @@
     def test_get_server_group(self):
         # Get the server-group
         body = self.client.get_server_group(
-            self.created_server_group['id'])
+            self.created_server_group['id'])['server_group']
         self.assertEqual(self.created_server_group, body)
 
     @test.idempotent_id('d4874179-27b4-4d7d-80e4-6c560cdfe321')
     def test_list_server_groups(self):
         # List the server-group
-        body = self.client.list_server_groups()
+        body = self.client.list_server_groups()['server_groups']
         self.assertIn(self.created_server_group, body)
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 98a2f9d..96ce45e 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -41,16 +41,15 @@
         super(ServerRescueTestJSON, cls).resource_setup()
 
         # Floating IP creation
-        body = cls.floating_ips_client.create_floating_ip()
+        body = cls.floating_ips_client.create_floating_ip()['floating_ip']
         cls.floating_ip_id = str(body['id']).strip()
         cls.floating_ip = str(body['ip']).strip()
 
         # Security group creation
         cls.sg_name = data_utils.rand_name('sg')
         cls.sg_desc = data_utils.rand_name('sg-desc')
-        cls.sg = \
-            cls.security_groups_client.create_security_group(cls.sg_name,
-                                                             cls.sg_desc)
+        cls.sg = cls.security_groups_client.create_security_group(
+            name=cls.sg_name, description=cls.sg_desc)['security_group']
         cls.sg_id = cls.sg['id']
 
         # Server for positive tests
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 2fe63ed..9d06188 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -61,7 +61,7 @@
 
     def _create_volume(self):
         volume = self.volumes_extensions_client.create_volume(
-            CONF.volume.volume_size, display_name=data_utils.rand_name(
+            size=CONF.volume.volume_size, display_name=data_utils.rand_name(
                 self.__class__.__name__ + '_volume'))
         self.addCleanup(self.delete_volume, volume['id'])
         waiters.wait_for_volume_status(self.volumes_extensions_client,
@@ -100,7 +100,7 @@
     @test.attr(type=['negative'])
     @test.idempotent_id('db22b618-f157-4566-a317-1b6d467a8094')
     def test_rescued_vm_reboot(self):
-        self.assertRaises(lib_exc.Conflict, self.servers_client.reboot,
+        self.assertRaises(lib_exc.Conflict, self.servers_client.reboot_server,
                           self.rescue_id, 'HARD')
 
     @test.attr(type=['negative'])
@@ -116,7 +116,7 @@
     @test.idempotent_id('70cdb8a1-89f8-437d-9448-8844fd82bf46')
     def test_rescued_vm_rebuild(self):
         self.assertRaises(lib_exc.Conflict,
-                          self.servers_client.rebuild,
+                          self.servers_client.rebuild_server,
                           self.rescue_id,
                           self.image_ref_alt)
 
@@ -137,7 +137,7 @@
         self.assertRaises(lib_exc.Conflict,
                           self.servers_client.attach_volume,
                           self.server_id,
-                          volume['id'],
+                          volumeId=volume['id'],
                           device='/dev/%s' % self.device)
 
     @test.idempotent_id('f56e465b-fe10-48bf-b75d-646cda3a8bc9')
@@ -148,7 +148,7 @@
 
         # Attach the volume to the server
         self.servers_client.attach_volume(self.server_id,
-                                          volume['id'],
+                                          volumeId=volume['id'],
                                           device='/dev/%s' % self.device)
         waiters.wait_for_volume_status(self.volumes_extensions_client,
                                        volume['id'], 'in-use')
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 2c1e69c..c243adf 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -63,7 +63,7 @@
         # Specify a keypair while creating a server
 
         key_name = data_utils.rand_name('key')
-        self.keypairs_client.create_keypair(key_name)
+        self.keypairs_client.create_keypair(name=key_name)
         self.addCleanup(self.keypairs_client.delete_keypair, key_name)
         self.keypairs_client.list_keypairs()
         server = self.create_test_server(key_name=key_name)
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index fe05456..8f81de0 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -122,7 +122,7 @@
         # Resize a non-existent server
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
-                          self.client.resize,
+                          self.client.resize_server,
                           nonexistent_server, self.flavor_ref)
 
     @test.idempotent_id('ced1a1d7-2ab6-45c9-b90f-b27d87b30efd')
@@ -132,7 +132,7 @@
     def test_resize_server_with_non_existent_flavor(self):
         # Resize a server with non-existent flavor
         nonexistent_flavor = data_utils.rand_uuid()
-        self.assertRaises(lib_exc.BadRequest, self.client.resize,
+        self.assertRaises(lib_exc.BadRequest, self.client.resize_server,
                           self.server_id, flavor_ref=nonexistent_flavor)
 
     @test.idempotent_id('45436a7d-a388-4a35-a9d8-3adc5d0d940b')
@@ -141,7 +141,7 @@
     @test.attr(type=['negative'])
     def test_resize_server_with_null_flavor(self):
         # Resize a server with null flavor
-        self.assertRaises(lib_exc.BadRequest, self.client.resize,
+        self.assertRaises(lib_exc.BadRequest, self.client.resize_server,
                           self.server_id, flavor_ref="")
 
     @test.attr(type=['negative'])
@@ -149,7 +149,7 @@
     def test_reboot_non_existent_server(self):
         # Reboot a non existent server
         nonexistent_server = data_utils.rand_uuid()
-        self.assertRaises(lib_exc.NotFound, self.client.reboot,
+        self.assertRaises(lib_exc.NotFound, self.client.reboot_server,
                           nonexistent_server, 'SOFT')
 
     @test.idempotent_id('d1417e7f-a509-41b5-a102-d5eed8613369')
@@ -171,12 +171,12 @@
         # Rebuild and Reboot a deleted server
         server = self.create_test_server()
         self.client.delete_server(server['id'])
-        self.client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
 
         self.assertRaises(lib_exc.NotFound,
-                          self.client.rebuild,
+                          self.client.rebuild_server,
                           server['id'], self.image_ref_alt)
-        self.assertRaises(lib_exc.NotFound, self.client.reboot,
+        self.assertRaises(lib_exc.NotFound, self.client.reboot_server,
                           server['id'], 'SOFT')
 
     @test.attr(type=['negative'])
@@ -185,7 +185,7 @@
         # Rebuild a non existent server
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
-                          self.client.rebuild,
+                          self.client.rebuild_server,
                           nonexistent_server,
                           self.image_ref_alt)
 
@@ -480,7 +480,7 @@
         server = self.client.show_server(self.server_id)
         image_name = server['name'] + '-shelved'
         params = {'name': image_name}
-        images = self.images_client.list_images(**params)
+        images = self.images_client.list_images(**params)['images']
         self.assertEqual(1, len(images))
         self.assertEqual(image_name, images[0]['name'])
 
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 58c2206..b5a24c7 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -52,11 +52,13 @@
         cls.glance_client = cls.os.image_client
         cls.keypairs_client = cls.os.keypairs_client
         cls.security_client = cls.os.security_groups_client
+        cls.rule_client = cls.os.security_group_rules_client
 
         cls.alt_client = cls.alt_manager.servers_client
         cls.alt_images_client = cls.alt_manager.images_client
         cls.alt_keypairs_client = cls.alt_manager.keypairs_client
         cls.alt_security_client = cls.alt_manager.security_groups_client
+        cls.alt_rule_client = cls.alt_manager.security_group_rules_client
 
     @classmethod
     def resource_setup(cls):
@@ -68,27 +70,29 @@
         body = cls.glance_client.create_image(name=name,
                                               container_format='bare',
                                               disk_format='raw',
-                                              is_public=False)
+                                              is_public=False)['image']
         image_id = body['id']
         image_file = six.StringIO(('*' * 1024))
-        body = cls.glance_client.update_image(image_id, data=image_file)
+        body = cls.glance_client.update_image(image_id,
+                                              data=image_file)['image']
         cls.glance_client.wait_for_image_status(image_id, 'active')
-        cls.image = cls.images_client.show_image(image_id)
+        cls.image = cls.images_client.show_image(image_id)['image']
 
         cls.keypairname = data_utils.rand_name('keypair')
-        cls.keypairs_client.create_keypair(cls.keypairname)
+        cls.keypairs_client.create_keypair(name=cls.keypairname)
 
         name = data_utils.rand_name('security')
         description = data_utils.rand_name('description')
         cls.security_group = cls.security_client.create_security_group(
-            name, description)
+            name=name, description=description)['security_group']
 
         parent_group_id = cls.security_group['id']
         ip_protocol = 'tcp'
         from_port = 22
         to_port = 22
-        cls.rule = cls.security_client.create_security_group_rule(
-            parent_group_id, ip_protocol, from_port, to_port)
+        cls.rule = cls.rule_client.create_security_group_rule(
+            parent_group_id=parent_group_id, ip_protocol=ip_protocol,
+            from_port=from_port, to_port=to_port)['security_group_rule']
 
     @classmethod
     def resource_cleanup(cls):
@@ -151,19 +155,19 @@
     @test.idempotent_id('14cb5ff5-f646-45ca-8f51-09081d6c0c24')
     def test_reboot_server_for_alt_account_fails(self):
         # A reboot request for another user's server should fail
-        self.assertRaises(lib_exc.NotFound, self.alt_client.reboot,
+        self.assertRaises(lib_exc.NotFound, self.alt_client.reboot_server,
                           self.server['id'], 'HARD')
 
     @test.idempotent_id('8a0bce51-cd00-480b-88ba-dbc7d8408a37')
     def test_rebuild_server_for_alt_account_fails(self):
         # A rebuild request for another user's server should fail
-        self.assertRaises(lib_exc.NotFound, self.alt_client.rebuild,
+        self.assertRaises(lib_exc.NotFound, self.alt_client.rebuild_server,
                           self.server['id'], self.image_ref_alt)
 
     @test.idempotent_id('e4da647e-f982-4e61-9dad-1d1abebfb933')
     def test_resize_server_for_alt_account_fails(self):
         # A resize request for another user's server should fail
-        self.assertRaises(lib_exc.NotFound, self.alt_client.resize,
+        self.assertRaises(lib_exc.NotFound, self.alt_client.resize_server,
                           self.server['id'], self.flavor_ref_alt)
 
     @test.idempotent_id('a9fe8112-0ffa-4902-b061-f892bd5fe0d3')
@@ -171,7 +175,7 @@
         # A create image request for another user's server should fail
         self.assertRaises(lib_exc.NotFound,
                           self.alt_images_client.create_image,
-                          self.server['id'], 'testImage')
+                          self.server['id'], name='testImage')
 
     @test.idempotent_id('95d445f6-babc-4f2e-aea3-aa24ec5e7f0d')
     def test_create_server_with_unauthorized_image(self):
@@ -207,7 +211,8 @@
             resp = {}
             resp['status'] = None
             self.assertRaises(lib_exc.BadRequest,
-                              self.alt_keypairs_client.create_keypair, k_name)
+                              self.alt_keypairs_client.create_keypair,
+                              name=k_name)
         finally:
             # Next request the base_url is back to normal
             if (resp['status'] is not None):
@@ -259,7 +264,7 @@
             resp['status'] = None
             self.assertRaises(lib_exc.BadRequest,
                               self.alt_security_client.create_security_group,
-                              s_name, s_description)
+                              name=s_name, description=s_description)
         finally:
             # Next request the base_url is back to normal
             if resp['status'] is not None:
@@ -292,21 +297,22 @@
         to_port = -1
         try:
             # Change the base URL to impersonate another user
-            self.alt_security_client.auth_provider.set_alt_auth_data(
+            self.alt_rule_client.auth_provider.set_alt_auth_data(
                 request_part='url',
-                auth_data=self.security_client.auth_provider.auth_data
+                auth_data=self.rule_client.auth_provider.auth_data
             )
             resp = {}
             resp['status'] = None
             self.assertRaises(lib_exc.BadRequest,
-                              self.alt_security_client.
+                              self.alt_rule_client.
                               create_security_group_rule,
-                              parent_group_id, ip_protocol, from_port,
-                              to_port)
+                              parent_group_id=parent_group_id,
+                              ip_protocol=ip_protocol,
+                              from_port=from_port, to_port=to_port)
         finally:
             # Next request the base_url is back to normal
             if resp['status'] is not None:
-                self.alt_security_client.delete_security_group_rule(resp['id'])
+                self.alt_rule_client.delete_security_group_rule(resp['id'])
                 LOG.error("Create security group rule request should not "
                           "happen if the tenant id does not match the"
                           " current user")
@@ -316,7 +322,7 @@
         # A DELETE request for another user's security group rule
         # should fail
         self.assertRaises(lib_exc.NotFound,
-                          self.alt_security_client.delete_security_group_rule,
+                          self.alt_rule_client.delete_security_group_rule,
                           self.rule['id'])
 
     @test.idempotent_id('c5f52351-53d9-4fc9-83e5-917f7f5e3d71')
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 4cc4328..6e57aff 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -32,7 +32,7 @@
         # List of all extensions
         if len(CONF.compute_feature_enabled.api_extensions) == 0:
             raise self.skipException('There are not any extensions configured')
-        extensions = self.extensions_client.list_extensions()
+        extensions = self.extensions_client.list_extensions()['extensions']
         ext = CONF.compute_feature_enabled.api_extensions[0]
         if ext == 'all':
             self.assertIn('Hosts', map(lambda x: x['name'], extensions))
@@ -49,4 +49,4 @@
     def test_get_extension(self):
         # get the specified extensions
         extension = self.extensions_client.show_extension('os-consoles')
-        self.assertEqual('os-consoles', extension['alias'])
+        self.assertEqual('os-consoles', extension['extension']['alias'])
diff --git a/tempest/api/compute/test_live_block_migration_negative.py b/tempest/api/compute/test_live_block_migration_negative.py
index fabe55d..2cd85f2 100644
--- a/tempest/api/compute/test_live_block_migration_negative.py
+++ b/tempest/api/compute/test_live_block_migration_negative.py
@@ -40,10 +40,10 @@
         cls.admin_servers_client = cls.os_adm.servers_client
 
     def _migrate_server_to(self, server_id, dest_host):
+        bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
         body = self.admin_servers_client.live_migrate_server(
-            server_id, dest_host,
-            CONF.compute_feature_enabled.
-            block_migration_for_live_migration)
+            server_id, host=dest_host, block_migration=bmflm,
+            disk_over_commit=False)
         return body
 
     @test.attr(type=['negative'])
diff --git a/tempest/api/compute/test_networks.py b/tempest/api/compute/test_networks.py
index deb9ee2..bf2a78d 100644
--- a/tempest/api/compute/test_networks.py
+++ b/tempest/api/compute/test_networks.py
@@ -33,5 +33,5 @@
 
     @test.idempotent_id('3fe07175-312e-49a5-a623-5f52eeada4c2')
     def test_list_networks(self):
-        networks = self.client.list_networks()
+        networks = self.client.list_networks()['networks']
         self.assertNotEmpty(networks, "No networks found.")
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 8e4278a..6496854 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -83,7 +83,7 @@
         # Attach the volume to the server
         self.attachment = self.servers_client.attach_volume(
             self.server['id'],
-            self.volume['id'],
+            volumeId=self.volume['id'],
             device='/dev/%s' % self.device)
         self.volumes_client.wait_for_volume_status(self.volume['id'], 'in-use')
 
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 44339a3..f05a5a3 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -46,7 +46,8 @@
         v_name = data_utils.rand_name('Volume')
         metadata = {'Type': 'work'}
         # Create volume
-        volume = self.client.create_volume(display_name=v_name,
+        volume = self.client.create_volume(size=CONF.volume.volume_size,
+                                           display_name=v_name,
                                            metadata=metadata)
         self.addCleanup(self.delete_volume, volume['id'])
         self.assertIn('id', volume)
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index 22b3d13..421d96f 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -54,7 +54,8 @@
             v_name = data_utils.rand_name('volume')
             metadata = {'Type': 'work'}
             try:
-                volume = cls.client.create_volume(display_name=v_name,
+                volume = cls.client.create_volume(size=CONF.volume.volume_size,
+                                                  display_name=v_name,
                                                   metadata=metadata)
                 waiters.wait_for_volume_status(cls.client,
                                                volume['id'], 'available')
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 904cbb6..5d78539 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -297,6 +297,7 @@
                                                           flavor_id,
                                                           node_configs,
                                                           **kwargs)
+        resp_body = resp_body['node_group_template']
         # store id of created node group template
         cls._node_group_templates.append(resp_body['id'])
 
@@ -316,6 +317,7 @@
                                                        node_groups,
                                                        cluster_configs,
                                                        **kwargs)
+        resp_body = resp_body['cluster_template']
         # store id of created cluster template
         cls._cluster_templates.append(resp_body['id'])
 
@@ -330,6 +332,7 @@
         removed in tearDownClass method.
         """
         resp_body = cls.client.create_data_source(name, type, url, **kwargs)
+        resp_body = resp_body['data_source']
         # store id of created data source
         cls._data_sources.append(resp_body['id'])
 
@@ -343,6 +346,7 @@
         be automatically removed in tearDownClass method.
         """
         resp_body = cls.client.create_job_binary_internal(name, data)
+        resp_body = resp_body['job_binary_internal']
         # store id of created job binary internal
         cls._job_binary_internals.append(resp_body['id'])
 
@@ -357,6 +361,7 @@
         removed in tearDownClass method.
         """
         resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
+        resp_body = resp_body['job_binary']
         # store id of created job binary
         cls._job_binaries.append(resp_body['id'])
 
@@ -372,6 +377,7 @@
         """
         resp_body = cls.client.create_job(name,
                                           job_type, mains, libs, **kwargs)
+        resp_body = resp_body['job']
         # store id of created job
         cls._jobs.append(resp_body['id'])
 
@@ -400,7 +406,7 @@
         """
         if not cls.default_plugin:
             return None
-        plugin = cls.client.get_plugin(cls.default_plugin)
+        plugin = cls.client.get_plugin(cls.default_plugin)['plugin']
 
         for version in DEFAULT_TEMPLATES[cls.default_plugin].keys():
             if version in plugin['versions']:
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index e357a85..42cbd14 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -98,7 +98,7 @@
         template_info = self._create_cluster_template()
 
         # check for cluster template in list
-        templates = self.client.list_cluster_templates()
+        templates = self.client.list_cluster_templates()['cluster_templates']
         templates_info = [(template['id'], template['name'])
                           for template in templates]
         self.assertIn(template_info, templates_info)
@@ -110,6 +110,7 @@
 
         # check cluster template fetch by id
         template = self.client.get_cluster_template(template_id)
+        template = template['cluster_template']
         self.assertEqual(template_name, template['name'])
         self.assertDictContainsSubset(self.cluster_template, template)
 
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
index dd16b2f..67d09a0 100644
--- a/tempest/api/data_processing/test_data_sources.py
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -68,13 +68,13 @@
 
     def _list_data_sources(self, source_info):
         # check for data source in list
-        sources = self.client.list_data_sources()
+        sources = self.client.list_data_sources()['data_sources']
         sources_info = [(source['id'], source['name']) for source in sources]
         self.assertIn(source_info, sources_info)
 
     def _get_data_source(self, source_id, source_name, source_body):
         # check data source fetch by id
-        source = self.client.get_data_source(source_id)
+        source = self.client.get_data_source(source_id)['data_source']
         self.assertEqual(source_name, source['name'])
         self.assertDictContainsSubset(source_body, source)
 
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
index fb21270..98b7e24 100644
--- a/tempest/api/data_processing/test_job_binaries.py
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -80,7 +80,7 @@
         binary_info = self._create_job_binary(self.swift_job_binary_with_extra)
 
         # check for job binary in list
-        binaries = self.client.list_job_binaries()
+        binaries = self.client.list_job_binaries()['binaries']
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
@@ -91,7 +91,7 @@
             self._create_job_binary(self.swift_job_binary_with_extra))
 
         # check job binary fetch by id
-        binary = self.client.get_job_binary(binary_id)
+        binary = self.client.get_job_binary(binary_id)['job_binary']
         self.assertEqual(binary_name, binary['name'])
         self.assertDictContainsSubset(self.swift_job_binary, binary)
 
@@ -115,7 +115,7 @@
         binary_info = self._create_job_binary(self.internal_db_job_binary)
 
         # check for job binary in list
-        binaries = self.client.list_job_binaries()
+        binaries = self.client.list_job_binaries()['binaries']
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
@@ -126,7 +126,7 @@
             self._create_job_binary(self.internal_db_job_binary))
 
         # check job binary fetch by id
-        binary = self.client.get_job_binary(binary_id)
+        binary = self.client.get_job_binary(binary_id)['job_binary']
         self.assertEqual(binary_name, binary['name'])
         self.assertDictContainsSubset(self.internal_db_job_binary, binary)
 
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
index 3d76ebe..6919fa5 100644
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -57,7 +57,7 @@
         binary_info = self._create_job_binary_internal()
 
         # check for job binary internal in list
-        binaries = self.client.list_job_binary_internals()
+        binaries = self.client.list_job_binary_internals()['binaries']
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
@@ -68,7 +68,7 @@
 
         # check job binary internal fetch by id
         binary = self.client.get_job_binary_internal(binary_id)
-        self.assertEqual(binary_name, binary['name'])
+        self.assertEqual(binary_name, binary['job_binary_internal']['name'])
 
     @test.attr(type='smoke')
     @test.idempotent_id('b3568c33-4eed-40d5-aae4-6ff3b2ac58f5')
diff --git a/tempest/api/data_processing/test_jobs.py b/tempest/api/data_processing/test_jobs.py
index 83eb54d..7798056 100644
--- a/tempest/api/data_processing/test_jobs.py
+++ b/tempest/api/data_processing/test_jobs.py
@@ -71,7 +71,7 @@
         job_info = self._create_job()
 
         # check for job in list
-        jobs = self.client.list_jobs()
+        jobs = self.client.list_jobs()['jobs']
         jobs_info = [(job['id'], job['name']) for job in jobs]
         self.assertIn(job_info, jobs_info)
 
@@ -81,7 +81,7 @@
         job_id, job_name = self._create_job()
 
         # check job fetch by id
-        job = self.client.get_job(job_id)
+        job = self.client.get_job(job_id)['job']
         self.assertEqual(job_name, job['name'])
 
     @test.attr(type='smoke')
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index 102799d..388bb58 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -65,6 +65,7 @@
 
         # check for node group template in list
         templates = self.client.list_node_group_templates()
+        templates = templates['node_group_templates']
         templates_info = [(template['id'], template['name'])
                           for template in templates]
         self.assertIn(template_info, templates_info)
@@ -76,6 +77,7 @@
 
         # check node group template fetch by id
         template = self.client.get_node_group_template(template_id)
+        template = template['node_group_template']
         self.assertEqual(template_name, template['name'])
         self.assertDictContainsSubset(self.node_group_template, template)
 
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index 92a5bd0..14594e4 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -25,7 +25,7 @@
 
         It ensures main plugins availability.
         """
-        plugins = self.client.list_plugins()
+        plugins = self.client.list_plugins()['plugins']
         plugins_names = [plugin['name'] for plugin in plugins]
         for enabled_plugin in CONF.data_processing_feature_enabled.plugins:
             self.assertIn(enabled_plugin, plugins_names)
@@ -41,12 +41,13 @@
     @test.idempotent_id('53cf6487-2cfb-4a6f-8671-97c542c6e901')
     def test_plugin_get(self):
         for plugin_name in self._list_all_plugin_names():
-            plugin = self.client.get_plugin(plugin_name)
+            plugin = self.client.get_plugin(plugin_name)['plugin']
             self.assertEqual(plugin_name, plugin['name'])
 
             for plugin_version in plugin['versions']:
                 detailed_plugin = self.client.get_plugin(plugin_name,
                                                          plugin_version)
+                detailed_plugin = detailed_plugin['plugin']
                 self.assertEqual(plugin_name, detailed_plugin['name'])
 
                 # check that required image tags contains name and version
diff --git a/tempest/api/database/flavors/test_flavors.py b/tempest/api/database/flavors/test_flavors.py
index e698baa..c97ddd7 100644
--- a/tempest/api/database/flavors/test_flavors.py
+++ b/tempest/api/database/flavors/test_flavors.py
@@ -58,7 +58,8 @@
     @test.services('compute')
     def test_compare_db_flavors_with_os(self):
         db_flavors = self.client.list_db_flavors()
-        os_flavors = self.os_flavors_client.list_flavors(detail=True)
+        os_flavors = (self.os_flavors_client.list_flavors(detail=True)
+                      ['flavors'])
         self.assertEqual(len(os_flavors), len(db_flavors),
                          "OS flavors %s do not match DB flavors %s" %
                          (os_flavors, db_flavors))
diff --git a/tempest/api/database/limits/test_limits.py b/tempest/api/database/limits/test_limits.py
index 7d2fbac..4b7f2d6 100644
--- a/tempest/api/database/limits/test_limits.py
+++ b/tempest/api/database/limits/test_limits.py
@@ -29,7 +29,7 @@
     def test_absolute_limits(self):
         # Test to verify if all absolute limit paramaters are
         # present when verb is ABSOLUTE
-        limits = self.client.list_db_limits()
+        limits = self.client.list_db_limits()['limits']
         expected_abs_limits = ['max_backups', 'max_volumes',
                                'max_instances', 'verb']
         absolute_limit = [l for l in limits
diff --git a/tempest/api/database/versions/test_versions.py b/tempest/api/database/versions/test_versions.py
index 55d8246..ae568b1 100644
--- a/tempest/api/database/versions/test_versions.py
+++ b/tempest/api/database/versions/test_versions.py
@@ -27,7 +27,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('6952cd77-90cd-4dca-bb60-8e2c797940cf')
     def test_list_db_versions(self):
-        versions = self.client.list_db_versions()
+        versions = self.client.list_db_versions()['versions']
         self.assertTrue(len(versions) > 0, "No database versions found")
         # List of all versions should contain the current version, and there
         # should only be one 'current' version
diff --git a/tempest/api/identity/admin/v2/test_endpoints.py b/tempest/api/identity/admin/v2/test_endpoints.py
new file mode 100644
index 0000000..3af2e90
--- /dev/null
+++ b/tempest/api/identity/admin/v2/test_endpoints.py
@@ -0,0 +1,90 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class EndPointsTestJSON(base.BaseIdentityV2AdminTest):
+
+    @classmethod
+    def resource_setup(cls):
+        super(EndPointsTestJSON, cls).resource_setup()
+        cls.service_ids = list()
+        s_name = data_utils.rand_name('service')
+        s_type = data_utils.rand_name('type')
+        s_description = data_utils.rand_name('description')
+        cls.service_data =\
+            cls.client.create_service(s_name, s_type,
+                                      description=s_description)
+        cls.service_id = cls.service_data['id']
+        cls.service_ids.append(cls.service_id)
+        # Create endpoints so as to use for LIST and GET test cases
+        cls.setup_endpoints = list()
+        for i in range(2):
+            region = data_utils.rand_name('region')
+            url = data_utils.rand_url()
+            endpoint = cls.client.create_endpoint(cls.service_id,
+                                                  region,
+                                                  publicurl=url,
+                                                  adminurl=url,
+                                                  internalurl=url)
+            # list_endpoints() will return 'enabled' field
+            endpoint['enabled'] = True
+            cls.setup_endpoints.append(endpoint)
+
+    @classmethod
+    def resource_cleanup(cls):
+        for e in cls.setup_endpoints:
+            cls.client.delete_endpoint(e['id'])
+        for s in cls.service_ids:
+            cls.client.delete_service(s)
+        super(EndPointsTestJSON, cls).resource_cleanup()
+
+    @test.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
+    def test_list_endpoints(self):
+        # Get a list of endpoints
+        fetched_endpoints = self.client.list_endpoints()
+        # Asserting LIST endpoints
+        missing_endpoints =\
+            [e for e in self.setup_endpoints if e not in fetched_endpoints]
+        self.assertEqual(0, len(missing_endpoints),
+                         "Failed to find endpoint %s in fetched list" %
+                         ', '.join(str(e) for e in missing_endpoints))
+
+    @test.idempotent_id('9974530a-aa28-4362-8403-f06db02b26c1')
+    def test_create_list_delete_endpoint(self):
+        region = data_utils.rand_name('region')
+        url = data_utils.rand_url()
+        endpoint = self.client.create_endpoint(self.service_id,
+                                               region,
+                                               publicurl=url,
+                                               adminurl=url,
+                                               internalurl=url)
+        # Asserting Create Endpoint response body
+        self.assertIn('id', endpoint)
+        self.assertEqual(region, endpoint['region'])
+        self.assertEqual(url, endpoint['publicurl'])
+        # Checking if created endpoint is present in the list of endpoints
+        fetched_endpoints = self.client.list_endpoints()
+        fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
+        self.assertIn(endpoint['id'], fetched_endpoints_id)
+        # Deleting the endpoint created in this method
+        self.client.delete_endpoint(endpoint['id'])
+        # Checking whether endpoint is deleted successfully
+        fetched_endpoints = self.client.list_endpoints()
+        fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
+        self.assertNotIn(endpoint['id'], fetched_endpoints_id)
diff --git a/tempest/api/identity/admin/v2/test_roles.py b/tempest/api/identity/admin/v2/test_roles.py
index 1babc45..0b28a07 100644
--- a/tempest/api/identity/admin/v2/test_roles.py
+++ b/tempest/api/identity/admin/v2/test_roles.py
@@ -76,7 +76,7 @@
         self.data.setup_test_role()
         role_id = self.data.role['id']
         role_name = self.data.role['name']
-        body = self.client.get_role(role_id)
+        body = self.client.get_role(role_id)['role']
         self.assertEqual(role_id, body['id'])
         self.assertEqual(role_name, body['name'])
 
diff --git a/tempest/api/identity/admin/v2/test_tenants.py b/tempest/api/identity/admin/v2/test_tenants.py
index f828f66..9fff5f3 100644
--- a/tempest/api/identity/admin/v2/test_tenants.py
+++ b/tempest/api/identity/admin/v2/test_tenants.py
@@ -32,7 +32,7 @@
             self.data.tenants.append(tenant)
             tenants.append(tenant)
         tenant_ids = map(lambda x: x['id'], tenants)
-        body = self.client.list_tenants()
+        body = self.client.list_tenants()['tenants']
         found = [t for t in body if t['id'] in tenant_ids]
         self.assertEqual(len(found), len(tenants), 'Tenants not created')
 
@@ -40,7 +40,7 @@
             self.client.delete_tenant(tenant['id'])
             self.data.tenants.remove(tenant)
 
-        body = self.client.list_tenants()
+        body = self.client.list_tenants()['tenants']
         found = [tenant for tenant in body if tenant['id'] in tenant_ids]
         self.assertFalse(any(found), 'Tenants failed to delete')
 
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index 662d06c..d22b27f 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -33,12 +33,12 @@
         for i in range(2):
             cls.project = cls.client.create_project(
                 data_utils.rand_name('project'),
-                description=data_utils.rand_name('project-desc'))
+                description=data_utils.rand_name('project-desc'))['project']
             cls.projects.append(cls.project['id'])
 
         cls.user_body = cls.client.create_user(
             u_name, description=u_desc, password=u_password,
-            email=u_email, project_id=cls.projects[0])
+            email=u_email, project_id=cls.projects[0])['user']
 
     @classmethod
     def resource_cleanup(cls):
@@ -57,7 +57,7 @@
                 data_utils.rand_name('Secret')]
         cred = self.creds_client.create_credential(
             keys[0], keys[1], self.user_body['id'],
-            self.projects[0])
+            self.projects[0])['credential']
         self.addCleanup(self._delete_credential, cred['id'])
         for value1 in self.creds_list[0]:
             self.assertIn(value1, cred)
@@ -68,14 +68,14 @@
                     data_utils.rand_name('NewSecret')]
         update_body = self.creds_client.update_credential(
             cred['id'], access_key=new_keys[0], secret_key=new_keys[1],
-            project_id=self.projects[1])
+            project_id=self.projects[1])['credential']
         self.assertEqual(cred['id'], update_body['id'])
         self.assertEqual(self.projects[1], update_body['project_id'])
         self.assertEqual(self.user_body['id'], update_body['user_id'])
         self.assertEqual(update_body['blob']['access'], new_keys[0])
         self.assertEqual(update_body['blob']['secret'], new_keys[1])
 
-        get_body = self.creds_client.get_credential(cred['id'])
+        get_body = self.creds_client.get_credential(cred['id'])['credential']
         for value1 in self.creds_list[0]:
             self.assertEqual(update_body[value1],
                              get_body[value1])
@@ -92,11 +92,11 @@
             cred = self.creds_client.create_credential(
                 data_utils.rand_name('Access'),
                 data_utils.rand_name('Secret'),
-                self.user_body['id'], self.projects[0])
+                self.user_body['id'], self.projects[0])['credential']
             created_cred_ids.append(cred['id'])
             self.addCleanup(self._delete_credential, cred['id'])
 
-        creds = self.creds_client.list_credentials()
+        creds = self.creds_client.list_credentials()['credentials']
 
         for i in creds:
             fetched_cred_ids.append(i['id'])
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
index 98fff09..4c69758 100644
--- a/tempest/api/identity/admin/v3/test_default_project_id.py
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -39,13 +39,14 @@
     def test_default_project_id(self):
         # create a domain
         dom_name = data_utils.rand_name('dom')
-        domain_body = self.client.create_domain(dom_name)
+        domain_body = self.client.create_domain(dom_name)['domain']
         dom_id = domain_body['id']
         self.addCleanup(self._delete_domain, dom_id)
 
         # create a project in the domain
         proj_name = data_utils.rand_name('proj')
-        proj_body = self.client.create_project(proj_name, domain_id=dom_id)
+        proj_body = self.client.create_project(proj_name,
+                                               domain_id=dom_id)['project']
         proj_id = proj_body['id']
         self.addCleanup(self.client.delete_project, proj_id)
         self.assertEqual(proj_body['domain_id'], dom_id,
@@ -57,7 +58,7 @@
         user_name = data_utils.rand_name('user')
         user_body = self.client.create_user(user_name, password=user_name,
                                             domain_id=dom_id,
-                                            default_project_id=proj_id)
+                                            default_project_id=proj_id)['user']
         user_id = user_body['id']
         self.addCleanup(self.client.delete_user, user_id)
         self.assertEqual(user_body['domain_id'], dom_id,
@@ -82,6 +83,6 @@
 
         # verify the user's token and see that it is scoped to the project
         token, auth_data = admin_client.auth_provider.get_auth()
-        result = admin_client.identity_v3_client.get_token(token)
+        result = admin_client.identity_v3_client.get_token(token)['token']
         self.assertEqual(result['project']['domain']['id'], dom_id)
         self.assertEqual(result['project']['id'], proj_id)
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 5bfb981..742d737 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -37,12 +37,12 @@
         for _ in range(3):
             domain = self.client.create_domain(
                 data_utils.rand_name('domain'),
-                description=data_utils.rand_name('domain-desc'))
+                description=data_utils.rand_name('domain-desc'))['domain']
             # Delete the domain at the end of this method
             self.addCleanup(self._delete_domain, domain['id'])
             domain_ids.append(domain['id'])
         # List and Verify Domains
-        body = self.client.list_domains()
+        body = self.client.list_domains()['domains']
         for d in body:
             fetched_ids.append(d['id'])
         missing_doms = [d for d in domain_ids if d not in fetched_ids]
@@ -54,7 +54,7 @@
         d_name = data_utils.rand_name('domain')
         d_desc = data_utils.rand_name('domain-desc')
         domain = self.client.create_domain(
-            d_name, description=d_desc)
+            d_name, description=d_desc)['domain']
         self.addCleanup(self._delete_domain, domain['id'])
         self.assertIn('id', domain)
         self.assertIn('description', domain)
@@ -69,7 +69,7 @@
         new_name = data_utils.rand_name('new-name')
 
         updated_domain = self.client.update_domain(
-            domain['id'], name=new_name, description=new_desc)
+            domain['id'], name=new_name, description=new_desc)['domain']
         self.assertIn('id', updated_domain)
         self.assertIn('description', updated_domain)
         self.assertIn('name', updated_domain)
@@ -80,7 +80,7 @@
         self.assertEqual(new_desc, updated_domain['description'])
         self.assertEqual(True, updated_domain['enabled'])
 
-        fetched_domain = self.client.get_domain(domain['id'])
+        fetched_domain = self.client.get_domain(domain['id'])['domain']
         self.assertEqual(new_name, fetched_domain['name'])
         self.assertEqual(new_desc, fetched_domain['description'])
         self.assertEqual(True, fetched_domain['enabled'])
@@ -91,7 +91,7 @@
         d_name = data_utils.rand_name('domain')
         d_desc = data_utils.rand_name('domain-desc')
         domain = self.client.create_domain(
-            d_name, description=d_desc, enabled=False)
+            d_name, description=d_desc, enabled=False)['domain']
         self.addCleanup(self.client.delete_domain, domain['id'])
         self.assertEqual(d_name, domain['name'])
         self.assertFalse(domain['enabled'])
@@ -101,7 +101,7 @@
     def test_create_domain_without_description(self):
         # Create domain only with name
         d_name = data_utils.rand_name('domain')
-        domain = self.client.create_domain(d_name)
+        domain = self.client.create_domain(d_name)['domain']
         self.addCleanup(self._delete_domain, domain['id'])
         self.assertIn('id', domain)
         expected_data = {'name': d_name, 'enabled': True}
@@ -119,6 +119,6 @@
     @test.attr(type='smoke')
     @test.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
     def test_default_domain_exists(self):
-        domain = self.client.get_domain(self.domain_id)
+        domain = self.client.get_domain(self.domain_id)['domain']
 
         self.assertTrue(domain['enabled'])
diff --git a/tempest/api/identity/admin/v3/test_domains_negative.py b/tempest/api/identity/admin/v3/test_domains_negative.py
index e2f3ef5..156179c 100644
--- a/tempest/api/identity/admin/v3/test_domains_negative.py
+++ b/tempest/api/identity/admin/v3/test_domains_negative.py
@@ -28,7 +28,8 @@
     def test_delete_active_domain(self):
         d_name = data_utils.rand_name('domain')
         d_desc = data_utils.rand_name('domain-desc')
-        domain = self.client.create_domain(d_name, description=d_desc)
+        domain = self.client.create_domain(d_name,
+                                           description=d_desc)['domain']
         domain_id = domain['id']
 
         self.addCleanup(self.delete_domain, domain_id)
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 9a8104f..e44a96b 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -36,6 +36,7 @@
         cls.service_data =\
             cls.service_client.create_service(s_name, s_type,
                                               description=s_description)
+        cls.service_data = cls.service_data['service']
         cls.service_id = cls.service_data['id']
         cls.service_ids.append(cls.service_id)
         # Create endpoints so as to use for LIST and GET test cases
@@ -44,8 +45,8 @@
             region = data_utils.rand_name('region')
             url = data_utils.rand_url()
             interface = 'public'
-            endpoint = cls.client.create_endpoint(
-                cls.service_id, interface, url, region=region, enabled=True)
+            endpoint = (cls.client.create_endpoint(cls.service_id, interface,
+                        url, region=region, enabled=True))['endpoint']
             cls.setup_endpoints.append(endpoint)
 
     @classmethod
@@ -59,7 +60,7 @@
     @test.idempotent_id('c19ecf90-240e-4e23-9966-21cee3f6a618')
     def test_list_endpoints(self):
         # Get a list of endpoints
-        fetched_endpoints = self.client.list_endpoints()
+        fetched_endpoints = self.client.list_endpoints()['endpoints']
         # Asserting LIST endpoints
         missing_endpoints =\
             [e for e in self.setup_endpoints if e not in fetched_endpoints]
@@ -72,21 +73,20 @@
         region = data_utils.rand_name('region')
         url = data_utils.rand_url()
         interface = 'public'
-        endpoint =\
-            self.client.create_endpoint(self.service_id, interface, url,
-                                        region=region, enabled=True)
+        endpoint = (self.client.create_endpoint(self.service_id, interface,
+                    url, region=region, enabled=True)['endpoint'])
         # Asserting Create Endpoint response body
         self.assertIn('id', endpoint)
         self.assertEqual(region, endpoint['region'])
         self.assertEqual(url, endpoint['url'])
         # Checking if created endpoint is present in the list of endpoints
-        fetched_endpoints = self.client.list_endpoints()
+        fetched_endpoints = self.client.list_endpoints()['endpoints']
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
         self.assertIn(endpoint['id'], fetched_endpoints_id)
         # Deleting the endpoint created in this method
         self.client.delete_endpoint(endpoint['id'])
         # Checking whether endpoint is deleted successfully
-        fetched_endpoints = self.client.list_endpoints()
+        fetched_endpoints = self.client.list_endpoints()['endpoints']
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
         self.assertNotIn(endpoint['id'], fetched_endpoints_id)
 
@@ -101,7 +101,7 @@
         endpoint_for_update =\
             self.client.create_endpoint(self.service_id, interface1,
                                         url1, region=region1,
-                                        enabled=True)
+                                        enabled=True)['endpoint']
         self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
         # Creating service so as update endpoint with new service ID
         s_name = data_utils.rand_name('service')
@@ -110,6 +110,7 @@
         service2 =\
             self.service_client.create_service(s_name, s_type,
                                                description=s_description)
+        service2 = service2['service']
         self.service_ids.append(service2['id'])
         # Updating endpoint with new values
         region2 = data_utils.rand_name('region')
@@ -119,7 +120,8 @@
             self.client.update_endpoint(endpoint_for_update['id'],
                                         service_id=service2['id'],
                                         interface=interface2, url=url2,
-                                        region=region2, enabled=False)
+                                        region=region2,
+                                        enabled=False)['endpoint']
         # Asserting if the attributes of endpoint are updated
         self.assertEqual(service2['id'], endpoint['service_id'])
         self.assertEqual(interface2, endpoint['interface'])
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index b043415..8cf853b 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -38,7 +38,8 @@
         s_description = data_utils.rand_name('description')
         cls.service_data = (
             cls.service_client.create_service(s_name, s_type,
-                                              description=s_description))
+                                              description=s_description)
+            ['service'])
         cls.service_id = cls.service_data['id']
         cls.service_ids.append(cls.service_id)
 
@@ -78,7 +79,8 @@
         interface1 = 'public'
         endpoint_for_update = (
             self.client.create_endpoint(self.service_id, interface1,
-                                        url1, region=region1, enabled=True))
+                                        url1, region=region1,
+                                        enabled=True))['endpoint']
         self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
 
         self.assertRaises(lib_exc.BadRequest, self.client.update_endpoint,
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index 88e2959..5ce6354 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -25,7 +25,7 @@
         name = data_utils.rand_name('Group')
         description = data_utils.rand_name('Description')
         group = self.client.create_group(name,
-                                         description=description)
+                                         description=description)['group']
         self.addCleanup(self.client.delete_group, group['id'])
         self.assertEqual(group['name'], name)
         self.assertEqual(group['description'], description)
@@ -34,11 +34,11 @@
         new_desc = data_utils.rand_name('UpdateDescription')
         updated_group = self.client.update_group(group['id'],
                                                  name=new_name,
-                                                 description=new_desc)
+                                                 description=new_desc)['group']
         self.assertEqual(updated_group['name'], new_name)
         self.assertEqual(updated_group['description'], new_desc)
 
-        new_group = self.client.get_group(group['id'])
+        new_group = self.client.get_group(group['id'])['group']
         self.assertEqual(group['id'], new_group['id'])
         self.assertEqual(new_name, new_group['name'])
         self.assertEqual(new_desc, new_group['description'])
@@ -47,25 +47,25 @@
     @test.idempotent_id('1598521a-2f36-4606-8df9-30772bd51339')
     def test_group_users_add_list_delete(self):
         name = data_utils.rand_name('Group')
-        group = self.client.create_group(name)
+        group = self.client.create_group(name)['group']
         self.addCleanup(self.client.delete_group, group['id'])
         # add user into group
         users = []
         for i in range(3):
             name = data_utils.rand_name('User')
-            user = self.client.create_user(name)
+            user = self.client.create_user(name)['user']
             users.append(user)
             self.addCleanup(self.client.delete_user, user['id'])
             self.client.add_group_user(group['id'], user['id'])
 
         # list users in group
-        group_users = self.client.list_group_users(group['id'])
+        group_users = self.client.list_group_users(group['id'])['users']
         self.assertEqual(sorted(users), sorted(group_users))
         # delete user in group
         for user in users:
             self.client.delete_group_user(group['id'],
                                           user['id'])
-        group_users = self.client.list_group_users(group['id'])
+        group_users = self.client.list_group_users(group['id'])['users']
         self.assertEqual(len(group_users), 0)
 
     @test.idempotent_id('64573281-d26a-4a52-b899-503cb0f4e4ec')
@@ -73,18 +73,18 @@
         # create a user
         user = self.client.create_user(
             data_utils.rand_name('User'),
-            password=data_utils.rand_name('Pass'))
+            password=data_utils.rand_name('Pass'))['user']
         self.addCleanup(self.client.delete_user, user['id'])
         # create two groups, and add user into them
         groups = []
         for i in range(2):
             name = data_utils.rand_name('Group')
-            group = self.client.create_group(name)
+            group = self.client.create_group(name)['group']
             groups.append(group)
             self.addCleanup(self.client.delete_group, group['id'])
             self.client.add_group_user(group['id'], user['id'])
         # list groups which user belongs to
-        user_groups = self.client.list_user_groups(user['id'])
+        user_groups = self.client.list_user_groups(user['id'])['groups']
         self.assertEqual(sorted(groups), sorted(user_groups))
         self.assertEqual(2, len(user_groups))
 
@@ -97,11 +97,11 @@
             name = data_utils.rand_name('Group')
             description = data_utils.rand_name('Description')
             group = self.client.create_group(name,
-                                             description=description)
+                                             description=description)['group']
             self.addCleanup(self.client.delete_group, group['id'])
             group_ids.append(group['id'])
         # List and Verify Groups
-        body = self.client.list_groups()
+        body = self.client.list_groups()['groups']
         for g in body:
             fetched_ids.append(g['id'])
         missing_groups = [g for g in group_ids if g not in fetched_ids]
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 12d80bb..5185fea 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -28,22 +28,23 @@
         # Create project with domain
         cls.p1_name = data_utils.rand_name('project')
         cls.p1 = cls.client.create_project(
-            cls.p1_name, enabled=False, domain_id=cls.data.domain['id'])
+            cls.p1_name, enabled=False,
+            domain_id=cls.data.domain['id'])['project']
         cls.data.projects.append(cls.p1)
         cls.project_ids.append(cls.p1['id'])
         # Create default project
         p2_name = data_utils.rand_name('project')
-        cls.p2 = cls.client.create_project(p2_name)
+        cls.p2 = cls.client.create_project(p2_name)['project']
         cls.data.projects.append(cls.p2)
         cls.project_ids.append(cls.p2['id'])
 
     @test.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
     def test_projects_list(self):
         # List projects
-        list_projects = self.client.list_projects()
+        list_projects = self.client.list_projects()['projects']
 
         for p in self.project_ids:
-            get_project = self.client.get_project(p)
+            get_project = self.client.get_project(p)['project']
             self.assertIn(get_project, list_projects)
 
     @test.idempotent_id('fab13f3c-f6a6-4b9f-829b-d32fd44fdf10')
@@ -63,6 +64,6 @@
         self._list_projects_with_params({'name': self.p1_name}, 'name')
 
     def _list_projects_with_params(self, params, key):
-        body = self.client.list_projects(params)
+        body = self.client.list_projects(params)['projects']
         self.assertIn(self.p1[key], map(lambda x: x[key], body))
         self.assertNotIn(self.p2[key], map(lambda x: x[key], body))
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index d3d51b4..320b479 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -25,7 +25,7 @@
         # assert the response based on expected and not_expected
         # expected: user expected in the list response
         # not_expected: user, which should not be present in list response
-        body = self.client.get_users(params)
+        body = self.client.get_users(params)['users']
         self.assertIn(expected[key], map(lambda x: x[key], body))
         self.assertNotIn(not_expected[key],
                          map(lambda x: x[key], body))
@@ -41,13 +41,13 @@
         u1_name = data_utils.rand_name('test_user')
         cls.domain_enabled_user = cls.client.create_user(
             u1_name, password=alt_password,
-            email=cls.alt_email, domain_id=cls.data.domain['id'])
+            email=cls.alt_email, domain_id=cls.data.domain['id'])['user']
         cls.data.v3_users.append(cls.domain_enabled_user)
         # Create default not enabled user
         u2_name = data_utils.rand_name('test_user')
         cls.non_domain_enabled_user = cls.client.create_user(
             u2_name, password=alt_password,
-            email=cls.alt_email, enabled=False)
+            email=cls.alt_email, enabled=False)['user']
         cls.data.v3_users.append(cls.non_domain_enabled_user)
 
     @test.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
@@ -77,7 +77,7 @@
     @test.idempotent_id('b30d4651-a2ea-4666-8551-0c0e49692635')
     def test_list_users(self):
         # List users
-        body = self.client.get_users()
+        body = self.client.get_users()['users']
         fetched_ids = [u['id'] for u in body]
         missing_users = [u['id'] for u in self.data.v3_users
                          if u['id'] not in fetched_ids]
@@ -88,7 +88,7 @@
     @test.idempotent_id('b4baa3ae-ac00-4b4e-9e27-80deaad7771f')
     def test_get_user(self):
         # Get a user detail
-        user = self.client.get_user(self.data.v3_users[0]['id'])
+        user = self.client.get_user(self.data.v3_users[0]['id'])['user']
         self.assertEqual(self.data.v3_users[0]['id'], user['id'])
         self.assertEqual(self.data.v3_users[0]['name'], user['name'])
         self.assertEqual(self.alt_email, user['email'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 8b67945..d079fec 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -32,12 +32,12 @@
             blob = data_utils.rand_name('BlobName')
             policy_type = data_utils.rand_name('PolicyType')
             policy = self.policy_client.create_policy(blob,
-                                                      policy_type)
+                                                      policy_type)['policy']
             # Delete the Policy at the end of this method
             self.addCleanup(self._delete_policy, policy['id'])
             policy_ids.append(policy['id'])
         # List and Verify Policies
-        body = self.policy_client.list_policies()
+        body = self.policy_client.list_policies()['policies']
         for p in body:
             fetched_ids.append(p['id'])
         missing_pols = [p for p in policy_ids if p not in fetched_ids]
@@ -49,7 +49,7 @@
         # Test to update policy
         blob = data_utils.rand_name('BlobName')
         policy_type = data_utils.rand_name('PolicyType')
-        policy = self.policy_client.create_policy(blob, policy_type)
+        policy = self.policy_client.create_policy(blob, policy_type)['policy']
         self.addCleanup(self._delete_policy, policy['id'])
         self.assertIn('id', policy)
         self.assertIn('type', policy)
@@ -60,10 +60,10 @@
         # Update policy
         update_type = data_utils.rand_name('UpdatedPolicyType')
         data = self.policy_client.update_policy(
-            policy['id'], type=update_type)
+            policy['id'], type=update_type)['policy']
         self.assertIn('type', data)
         # Assertion for updated value with fetched value
-        fetched_policy = self.policy_client.get_policy(policy['id'])
+        fetched_policy = self.policy_client.get_policy(policy['id'])['policy']
         self.assertIn('id', fetched_policy)
         self.assertIn('blob', fetched_policy)
         self.assertIn('type', fetched_policy)
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index 17712f3..f014307 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -26,13 +26,13 @@
         project_name = data_utils.rand_name('project')
         project_desc = data_utils.rand_name('desc')
         project = self.client.create_project(
-            project_name, description=project_desc)
+            project_name, description=project_desc)['project']
         self.data.projects.append(project)
         project_id = project['id']
         desc1 = project['description']
         self.assertEqual(desc1, project_desc, 'Description should have '
                          'been sent in response for create')
-        body = self.client.get_project(project_id)
+        body = self.client.get_project(project_id)['project']
         desc2 = body['description']
         self.assertEqual(desc2, project_desc, 'Description does not appear'
                          'to be set')
@@ -43,12 +43,12 @@
         self.data.setup_test_domain()
         project_name = data_utils.rand_name('project')
         project = self.client.create_project(
-            project_name, domain_id=self.data.domain['id'])
+            project_name, domain_id=self.data.domain['id'])['project']
         self.data.projects.append(project)
         project_id = project['id']
         self.assertEqual(project_name, project['name'])
         self.assertEqual(self.data.domain['id'], project['domain_id'])
-        body = self.client.get_project(project_id)
+        body = self.client.get_project(project_id)['project']
         self.assertEqual(project_name, body['name'])
         self.assertEqual(self.data.domain['id'], body['domain_id'])
 
@@ -57,12 +57,12 @@
         # Create a project that is enabled
         project_name = data_utils.rand_name('project')
         project = self.client.create_project(
-            project_name, enabled=True)
+            project_name, enabled=True)['project']
         self.data.projects.append(project)
         project_id = project['id']
         en1 = project['enabled']
         self.assertTrue(en1, 'Enable should be True in response')
-        body = self.client.get_project(project_id)
+        body = self.client.get_project(project_id)['project']
         en2 = body['enabled']
         self.assertTrue(en2, 'Enable should be True in lookup')
 
@@ -71,12 +71,12 @@
         # Create a project that is not enabled
         project_name = data_utils.rand_name('project')
         project = self.client.create_project(
-            project_name, enabled=False)
+            project_name, enabled=False)['project']
         self.data.projects.append(project)
         en1 = project['enabled']
         self.assertEqual('false', str(en1).lower(),
                          'Enable should be False in response')
-        body = self.client.get_project(project['id'])
+        body = self.client.get_project(project['id'])['project']
         en2 = body['enabled']
         self.assertEqual('false', str(en2).lower(),
                          'Enable should be False in lookup')
@@ -85,17 +85,18 @@
     def test_project_update_name(self):
         # Update name attribute of a project
         p_name1 = data_utils.rand_name('project')
-        project = self.client.create_project(p_name1)
+        project = self.client.create_project(p_name1)['project']
         self.data.projects.append(project)
 
         resp1_name = project['name']
 
         p_name2 = data_utils.rand_name('project2')
-        body = self.client.update_project(project['id'], name=p_name2)
+        body = self.client.update_project(project['id'],
+                                          name=p_name2)['project']
         resp2_name = body['name']
         self.assertNotEqual(resp1_name, resp2_name)
 
-        body = self.client.get_project(project['id'])
+        body = self.client.get_project(project['id'])['project']
         resp3_name = body['name']
 
         self.assertNotEqual(resp1_name, resp3_name)
@@ -108,17 +109,17 @@
         p_name = data_utils.rand_name('project')
         p_desc = data_utils.rand_name('desc')
         project = self.client.create_project(
-            p_name, description=p_desc)
+            p_name, description=p_desc)['project']
         self.data.projects.append(project)
         resp1_desc = project['description']
 
         p_desc2 = data_utils.rand_name('desc2')
         body = self.client.update_project(
-            project['id'], description=p_desc2)
+            project['id'], description=p_desc2)['project']
         resp2_desc = body['description']
         self.assertNotEqual(resp1_desc, resp2_desc)
 
-        body = self.client.get_project(project['id'])
+        body = self.client.get_project(project['id'])['project']
         resp3_desc = body['description']
 
         self.assertNotEqual(resp1_desc, resp3_desc)
@@ -130,18 +131,18 @@
         # Update the enabled attribute of a project
         p_name = data_utils.rand_name('project')
         p_en = False
-        project = self.client.create_project(p_name, enabled=p_en)
+        project = self.client.create_project(p_name, enabled=p_en)['project']
         self.data.projects.append(project)
 
         resp1_en = project['enabled']
 
         p_en2 = True
         body = self.client.update_project(
-            project['id'], enabled=p_en2)
+            project['id'], enabled=p_en2)['project']
         resp2_en = body['enabled']
         self.assertNotEqual(resp1_en, resp2_en)
 
-        body = self.client.get_project(project['id'])
+        body = self.client.get_project(project['id'])['project']
         resp3_en = body['enabled']
 
         self.assertNotEqual(resp1_en, resp3_en)
@@ -153,7 +154,7 @@
         # Associate a user to a project
         # Create a Project
         p_name = data_utils.rand_name('project')
-        project = self.client.create_project(p_name)
+        project = self.client.create_project(p_name)['project']
         self.data.projects.append(project)
 
         # Create a User
@@ -163,12 +164,12 @@
         u_password = data_utils.rand_name('pass')
         user = self.client.create_user(
             u_name, description=u_desc, password=u_password,
-            email=u_email, project_id=project['id'])
+            email=u_email, project_id=project['id'])['user']
         # Delete the User at the end of this method
         self.addCleanup(self.client.delete_user, user['id'])
 
         # Get User To validate the user details
-        new_user_get = self.client.get_user(user['id'])
+        new_user_get = self.client.get_user(user['id'])['user']
         # Assert response body of GET
         self.assertEqual(u_name, new_user_get['name'])
         self.assertEqual(u_desc, new_user_get['description'])
diff --git a/tempest/api/identity/admin/v3/test_projects_negative.py b/tempest/api/identity/admin/v3/test_projects_negative.py
index d5ee5a7..9b60d54 100644
--- a/tempest/api/identity/admin/v3/test_projects_negative.py
+++ b/tempest/api/identity/admin/v3/test_projects_negative.py
@@ -34,7 +34,7 @@
     def test_project_create_duplicate(self):
         # Project names should be unique
         project_name = data_utils.rand_name('project-dup')
-        project = self.client.create_project(project_name)
+        project = self.client.create_project(project_name)['project']
         self.data.projects.append(project)
 
         self.assertRaises(
@@ -69,7 +69,7 @@
     def test_project_delete_by_unauthorized_user(self):
         # Non-admin user should not be able to delete a project
         project_name = data_utils.rand_name('project')
-        project = self.client.create_project(project_name)
+        project = self.client.create_project(project_name)['project']
         self.data.projects.append(project)
         self.assertRaises(
             lib_exc.Forbidden, self.non_admin_client.delete_project,
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index 7eb92bc..e96e0f5 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -33,7 +33,7 @@
         cls.setup_regions = list()
         for i in range(2):
             r_description = data_utils.rand_name('description')
-            region = cls.client.create_region(r_description)
+            region = cls.client.create_region(r_description)['region']
             cls.setup_regions.append(region)
 
     @classmethod
@@ -51,7 +51,8 @@
     def test_create_update_get_delete_region(self):
         r_description = data_utils.rand_name('description')
         region = self.client.create_region(
-            r_description, parent_region_id=self.setup_regions[0]['id'])
+            r_description,
+            parent_region_id=self.setup_regions[0]['id'])['region']
         self.addCleanup(self._delete_region, region['id'])
         self.assertEqual(r_description, region['description'])
         self.assertEqual(self.setup_regions[0]['id'],
@@ -61,12 +62,12 @@
         region = self.client.update_region(
             region['id'],
             description=r_alt_description,
-            parent_region_id=self.setup_regions[1]['id'])
+            parent_region_id=self.setup_regions[1]['id'])['region']
         self.assertEqual(r_alt_description, region['description'])
         self.assertEqual(self.setup_regions[1]['id'],
                          region['parent_region_id'])
         # Get the details of region
-        region = self.client.get_region(region['id'])
+        region = self.client.get_region(region['id'])['region']
         self.assertEqual(r_alt_description, region['description'])
         self.assertEqual(self.setup_regions[1]['id'],
                          region['parent_region_id'])
@@ -78,7 +79,7 @@
         r_region_id = data_utils.rand_uuid()
         r_description = data_utils.rand_name('description')
         region = self.client.create_region(
-            r_description, unique_region_id=r_region_id)
+            r_description, unique_region_id=r_region_id)['region']
         self.addCleanup(self._delete_region, region['id'])
         # Asserting Create Region with specific id response body
         self.assertEqual(r_region_id, region['id'])
@@ -87,7 +88,7 @@
     @test.idempotent_id('d180bf99-544a-445c-ad0d-0c0d27663796')
     def test_list_regions(self):
         # Get a list of regions
-        fetched_regions = self.client.list_regions()
+        fetched_regions = self.client.list_regions()['regions']
         missing_regions =\
             [e for e in self.setup_regions if e not in fetched_regions]
         # Asserting List Regions response
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index f58a5c5..ffc991a 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -25,7 +25,7 @@
         super(RolesV3TestJSON, cls).resource_setup()
         for _ in range(3):
             role_name = data_utils.rand_name(name='role')
-            role = cls.client.create_role(role_name)
+            role = cls.client.create_role(role_name)['role']
             cls.data.v3_roles.append(role)
         cls.fetched_role_ids = list()
         u_name = data_utils.rand_name('user')
@@ -34,20 +34,20 @@
         cls.u_password = data_utils.rand_name('pass')
         cls.domain = cls.client.create_domain(
             data_utils.rand_name('domain'),
-            description=data_utils.rand_name('domain-desc'))
+            description=data_utils.rand_name('domain-desc'))['domain']
         cls.project = cls.client.create_project(
             data_utils.rand_name('project'),
             description=data_utils.rand_name('project-desc'),
-            domain_id=cls.domain['id'])
+            domain_id=cls.domain['id'])['project']
         cls.group_body = cls.client.create_group(
             data_utils.rand_name('Group'), project_id=cls.project['id'],
-            domain_id=cls.domain['id'])
+            domain_id=cls.domain['id'])['group']
         cls.user_body = cls.client.create_user(
             u_name, description=u_desc, password=cls.u_password,
             email=u_email, project_id=cls.project['id'],
-            domain_id=cls.domain['id'])
+            domain_id=cls.domain['id'])['user']
         cls.role = cls.client.create_role(
-            data_utils.rand_name('Role'))
+            data_utils.rand_name('Role'))['role']
 
     @classmethod
     def resource_cleanup(cls):
@@ -69,23 +69,23 @@
     @test.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
     def test_role_create_update_get_list(self):
         r_name = data_utils.rand_name('Role')
-        role = self.client.create_role(r_name)
+        role = self.client.create_role(r_name)['role']
         self.addCleanup(self.client.delete_role, role['id'])
         self.assertIn('name', role)
         self.assertEqual(role['name'], r_name)
 
         new_name = data_utils.rand_name('NewRole')
-        updated_role = self.client.update_role(new_name, role['id'])
+        updated_role = self.client.update_role(new_name, role['id'])['role']
         self.assertIn('name', updated_role)
         self.assertIn('id', updated_role)
         self.assertIn('links', updated_role)
         self.assertNotEqual(r_name, updated_role['name'])
 
-        new_role = self.client.get_role(role['id'])
+        new_role = self.client.get_role(role['id'])['role']
         self.assertEqual(new_name, new_role['name'])
         self.assertEqual(updated_role['id'], new_role['id'])
 
-        roles = self.client.list_roles()
+        roles = self.client.list_roles()['roles']
         self.assertIn(role['id'], [r['id'] for r in roles])
 
     @test.idempotent_id('c6b80012-fe4a-498b-9ce8-eb391c05169f')
@@ -94,7 +94,7 @@
             self.project['id'], self.user_body['id'], self.role['id'])
 
         roles = self.client.list_user_roles_on_project(
-            self.project['id'], self.user_body['id'])
+            self.project['id'], self.user_body['id'])['roles']
 
         for i in roles:
             self.fetched_role_ids.append(i['id'])
@@ -111,7 +111,7 @@
             self.domain['id'], self.user_body['id'], self.role['id'])
 
         roles = self.client.list_user_roles_on_domain(
-            self.domain['id'], self.user_body['id'])
+            self.domain['id'], self.user_body['id'])['roles']
 
         for i in roles:
             self.fetched_role_ids.append(i['id'])
@@ -129,7 +129,7 @@
             self.project['id'], self.group_body['id'], self.role['id'])
         # List group roles on project
         roles = self.client.list_group_roles_on_project(
-            self.project['id'], self.group_body['id'])
+            self.project['id'], self.group_body['id'])['roles']
 
         for i in roles:
             self.fetched_role_ids.append(i['id'])
@@ -158,7 +158,7 @@
             self.domain['id'], self.group_body['id'], self.role['id'])
 
         roles = self.client.list_group_roles_on_domain(
-            self.domain['id'], self.group_body['id'])
+            self.domain['id'], self.group_body['id'])['roles']
 
         for i in roles:
             self.fetched_role_ids.append(i['id'])
@@ -172,6 +172,6 @@
     @test.idempotent_id('f5654bcc-08c4-4f71-88fe-05d64e06de94')
     def test_list_roles(self):
         # Return a list of all roles
-        body = self.client.list_roles()
+        body = self.client.list_roles()['roles']
         found = [role for role in body if role in self.data.v3_roles]
         self.assertEqual(len(found), len(self.data.v3_roles))
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index 95a7dcc..d920f64 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -37,7 +37,7 @@
         serv_type = data_utils.rand_name('type')
         desc = data_utils.rand_name('description')
         create_service = self.service_client.create_service(
-            serv_type, name=name, description=desc)
+            serv_type, name=name, description=desc)['service']
         self.addCleanup(self._del_service, create_service['id'])
         self.assertIsNotNone(create_service['id'])
 
@@ -50,13 +50,13 @@
         resp1_desc = create_service['description']
         s_desc2 = data_utils.rand_name('desc2')
         update_service = self.service_client.update_service(
-            s_id, description=s_desc2)
+            s_id, description=s_desc2)['service']
         resp2_desc = update_service['description']
 
         self.assertNotEqual(resp1_desc, resp2_desc)
 
         # Get service
-        fetched_service = self.service_client.get_service(s_id)
+        fetched_service = self.service_client.get_service(s_id)['service']
         resp3_desc = fetched_service['description']
 
         self.assertEqual(resp2_desc, resp3_desc)
@@ -68,7 +68,7 @@
         name = data_utils.rand_name('service')
         serv_type = data_utils.rand_name('type')
         service = self.service_client.create_service(
-            serv_type, name=name)
+            serv_type, name=name)['service']
         self.addCleanup(self.service_client.delete_service, service['id'])
         self.assertIn('id', service)
         expected_data = {'name': name, 'type': serv_type}
@@ -82,13 +82,13 @@
             name = data_utils.rand_name('service')
             serv_type = data_utils.rand_name('type')
             create_service = self.service_client.create_service(
-                serv_type, name=name)
+                serv_type, name=name)['service']
             self.addCleanup(self.service_client.delete_service,
                             create_service['id'])
             service_ids.append(create_service['id'])
 
         # List and Verify Services
-        services = self.service_client.list_services()
+        services = self.service_client.list_services()['services']
         fetched_ids = [service['id'] for service in services]
         found = [s for s in fetched_ids if s in service_ids]
         self.assertEqual(len(found), len(service_ids))
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 951bc78..5681ac6 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -32,14 +32,14 @@
         u_password = data_utils.rand_name('pass')
         user = self.client.create_user(
             u_name, description=u_desc, password=u_password,
-            email=u_email)
+            email=u_email)['user']
         self.addCleanup(self.client.delete_user, user['id'])
         # Perform Authentication
         resp = self.token.auth(user_id=user['id'],
                                password=u_password).response
         subject_token = resp['x-subject-token']
         # Perform GET Token
-        token_details = self.client.get_token(subject_token)
+        token_details = self.client.get_token(subject_token)['token']
         self.assertEqual(resp['x-subject-token'], subject_token)
         self.assertEqual(token_details['user']['id'], user['id'])
         self.assertEqual(token_details['user']['name'], u_name)
@@ -61,21 +61,22 @@
         # Create a user.
         user_name = data_utils.rand_name(name='user')
         user_password = data_utils.rand_name(name='pass')
-        user = self.client.create_user(user_name, password=user_password)
+        user = self.client.create_user(user_name,
+                                       password=user_password)['user']
         self.addCleanup(self.client.delete_user, user['id'])
 
         # Create a couple projects
         project1_name = data_utils.rand_name(name='project')
-        project1 = self.client.create_project(project1_name)
+        project1 = self.client.create_project(project1_name)['project']
         self.addCleanup(self.client.delete_project, project1['id'])
 
         project2_name = data_utils.rand_name(name='project')
-        project2 = self.client.create_project(project2_name)
+        project2 = self.client.create_project(project2_name)['project']
         self.addCleanup(self.client.delete_project, project2['id'])
 
         # Create a role
         role_name = data_utils.rand_name(name='role')
-        role = self.client.create_role(role_name)
+        role = self.client.create_role(role_name)['role']
         self.addCleanup(self.client.delete_role, role['id'])
 
         # Grant the user the role on both projects.
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index 1ac34eb..fac8826 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -34,7 +34,6 @@
         if not CONF.identity_feature_enabled.trust:
             raise self.skipException("Trusts aren't enabled")
 
-        self.trustee_username = CONF.identity.alt_username
         self.trust_id = None
 
     def tearDown(self):
@@ -48,7 +47,7 @@
         # create a project that trusts will be granted on
         self.trustor_project_name = data_utils.rand_name(name='project')
         project = self.client.create_project(self.trustor_project_name,
-                                             domain_id='default')
+                                             domain_id='default')['project']
         self.trustor_project_id = project['id']
         self.assertIsNotNone(self.trustor_project_id)
 
@@ -63,17 +62,17 @@
             password=self.trustor_password,
             email=u_email,
             project_id=self.trustor_project_id,
-            domain_id='default')
+            domain_id='default')['user']
         self.trustor_user_id = user['id']
 
         # And two roles, one we'll delegate and one we won't
         self.delegated_role = data_utils.rand_name('DelegatedRole')
         self.not_delegated_role = data_utils.rand_name('NotDelegatedRole')
 
-        role = self.client.create_role(self.delegated_role)
+        role = self.client.create_role(self.delegated_role)['role']
         self.delegated_role_id = role['id']
 
-        role = self.client.create_role(self.not_delegated_role)
+        role = self.client.create_role(self.not_delegated_role)['role']
         self.not_delegated_role_id = role['id']
 
         # Assign roles to trustor
@@ -118,7 +117,7 @@
             project_id=self.trustor_project_id,
             role_names=[self.delegated_role],
             impersonation=impersonate,
-            expires_at=expires)
+            expires_at=expires)['trust']
         self.trust_id = trust_create['id']
         return trust_create
 
@@ -141,7 +140,7 @@
             self.assertEqual(1, len(trust['roles']))
 
     def get_trust(self):
-        trust_get = self.trustor_client.get_trust(self.trust_id)
+        trust_get = self.trustor_client.get_trust(self.trust_id)['trust']
         return trust_get
 
     def validate_role(self, role):
@@ -157,12 +156,12 @@
     def check_trust_roles(self):
         # Check we find the delegated role
         roles_get = self.trustor_client.get_trust_roles(
-            self.trust_id)
+            self.trust_id)['roles']
         self.assertEqual(1, len(roles_get))
         self.validate_role(roles_get[0])
 
         role_get = self.trustor_client.get_trust_role(
-            self.trust_id, self.delegated_role_id)
+            self.trust_id, self.delegated_role_id)['role']
         self.validate_role(role_get)
 
         role_get = self.trustor_client.check_trust_role(
@@ -245,7 +244,7 @@
 
     @test.idempotent_id('3e48f95d-e660-4fa9-85e0-5a3d85594384')
     def test_trust_expire_invalid(self):
-        # Test case to check we can check an invlaid expiry time
+        # Test case to check we can check an invalid expiry time
         # is rejected with the correct error
         # with an expiry specified
         expires_str = 'bad.123Z'
@@ -257,7 +256,7 @@
     def test_get_trusts_query(self):
         self.create_trust()
         trusts_get = self.trustor_client.get_trusts(
-            trustor_user_id=self.trustor_user_id)
+            trustor_user_id=self.trustor_user_id)['trusts']
         self.assertEqual(1, len(trusts_get))
         self.validate_trust(trusts_get[0], summary=True)
 
@@ -265,7 +264,7 @@
     @test.idempotent_id('4773ebd5-ecbf-4255-b8d8-b63e6f72b65d')
     def test_get_trusts_all(self):
         self.create_trust()
-        trusts_get = self.client.get_trusts()
+        trusts_get = self.client.get_trusts()['trusts']
         trusts = [t for t in trusts_get
                   if t['id'] == self.trust_id]
         self.assertEqual(1, len(trusts))
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 19cb24e..8fac0b3 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -30,13 +30,13 @@
         u_password = data_utils.rand_name('pass')
         user = self.client.create_user(
             u_name, description=u_desc, password=u_password,
-            email=u_email, enabled=False)
+            email=u_email, enabled=False)['user']
         # Delete the User at the end of this method
         self.addCleanup(self.client.delete_user, user['id'])
         # Creating second project for updation
         project = self.client.create_project(
             data_utils.rand_name('project'),
-            description=data_utils.rand_name('project-desc'))
+            description=data_utils.rand_name('project-desc'))['project']
         # Delete the Project at the end of this method
         self.addCleanup(self.client.delete_project, project['id'])
         # Updating user details with new values
@@ -46,7 +46,7 @@
         update_user = self.client.update_user(
             user['id'], name=u_name2, description=u_description2,
             project_id=project['id'],
-            email=u_email2, enabled=False)
+            email=u_email2, enabled=False)['user']
         self.assertEqual(u_name2, update_user['name'])
         self.assertEqual(u_description2, update_user['description'])
         self.assertEqual(project['id'],
@@ -54,7 +54,7 @@
         self.assertEqual(u_email2, update_user['email'])
         self.assertEqual(False, update_user['enabled'])
         # GET by id after updation
-        new_user_get = self.client.get_user(user['id'])
+        new_user_get = self.client.get_user(user['id'])['user']
         # Assert response body of GET after updation
         self.assertEqual(u_name2, new_user_get['name'])
         self.assertEqual(u_description2, new_user_get['description'])
@@ -69,7 +69,7 @@
         u_name = data_utils.rand_name('user')
         original_password = data_utils.rand_name('pass')
         user = self.client.create_user(
-            u_name, password=original_password)
+            u_name, password=original_password)['user']
         # Delete the User at the end all test methods
         self.addCleanup(self.client.delete_user, user['id'])
         # Update user with new password
@@ -80,7 +80,7 @@
                                password=new_password).response
         subject_token = resp['x-subject-token']
         # Perform GET Token to verify and confirm password is updated
-        token_details = self.client.get_token(subject_token)
+        token_details = self.client.get_token(subject_token)['token']
         self.assertEqual(resp['x-subject-token'], subject_token)
         self.assertEqual(token_details['user']['id'], user['id'])
         self.assertEqual(token_details['user']['name'], u_name)
@@ -92,7 +92,7 @@
         fetched_project_ids = list()
         u_project = self.client.create_project(
             data_utils.rand_name('project'),
-            description=data_utils.rand_name('project-desc'))
+            description=data_utils.rand_name('project-desc'))['project']
         # Delete the Project at the end of this method
         self.addCleanup(self.client.delete_project, u_project['id'])
         # Create a user.
@@ -102,23 +102,23 @@
         u_password = data_utils.rand_name('pass')
         user_body = self.client.create_user(
             u_name, description=u_desc, password=u_password,
-            email=u_email, enabled=False, project_id=u_project['id'])
+            email=u_email, enabled=False, project_id=u_project['id'])['user']
         # Delete the User at the end of this method
         self.addCleanup(self.client.delete_user, user_body['id'])
         # Creating Role
         role_body = self.client.create_role(
-            data_utils.rand_name('role'))
+            data_utils.rand_name('role'))['role']
         # Delete the Role at the end of this method
         self.addCleanup(self.client.delete_role, role_body['id'])
 
-        user = self.client.get_user(user_body['id'])
-        role = self.client.get_role(role_body['id'])
+        user = self.client.get_user(user_body['id'])['user']
+        role = self.client.get_role(role_body['id'])['role']
         for i in range(2):
             # Creating project so as to assign role
             project_body = self.client.create_project(
                 data_utils.rand_name('project'),
-                description=data_utils.rand_name('project-desc'))
-            project = self.client.get_project(project_body['id'])
+                description=data_utils.rand_name('project-desc'))['project']
+            project = self.client.get_project(project_body['id'])['project']
             # Delete the Project at the end of this method
             self.addCleanup(self.client.delete_project, project_body['id'])
             # Assigning roles to user on project
@@ -126,7 +126,7 @@
                                          user['id'],
                                          role['id'])
             assigned_project_ids.append(project['id'])
-        body = self.client.list_user_projects(user['id'])
+        body = self.client.list_user_projects(user['id'])['projects']
         for i in body:
             fetched_project_ids.append(i['id'])
         # verifying the project ids in list
@@ -142,5 +142,5 @@
     def test_get_user(self):
         # Get a user detail
         self.data.setup_test_v3_user()
-        user = self.client.get_user(self.data.v3_user['id'])
+        user = self.client.get_user(self.data.v3_user['id'])['user']
         self.assertEqual(self.data.v3_user['id'], user['id'])
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 0654f37..7b23e66 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -48,8 +48,12 @@
     def get_tenant_by_name(cls, name):
         try:
             tenants = cls.client.list_tenants()
+            # TODO(jswarren): always retrieve 'tenants' value
+            # once both clients return full response objects
+            if 'tenants' in tenants:
+                tenants = tenants['tenants']
         except AttributeError:
-            tenants = cls.client.list_projects()
+            tenants = cls.client.list_projects()['projects']
         tenant = [t for t in tenants if t['name'] == name]
         if len(tenant) > 0:
             return tenant[0]
@@ -153,21 +157,21 @@
 
     @classmethod
     def get_user_by_name(cls, name):
-        users = cls.client.get_users()
+        users = cls.client.get_users()['users']
         user = [u for u in users if u['name'] == name]
         if len(user) > 0:
             return user[0]
 
     @classmethod
     def get_tenant_by_name(cls, name):
-        tenants = cls.client.list_projects()
+        tenants = cls.client.list_projects()['projects']
         tenant = [t for t in tenants if t['name'] == name]
         if len(tenant) > 0:
             return tenant[0]
 
     @classmethod
     def get_role_by_name(cls, name):
-        roles = cls.client.list_roles()
+        roles = cls.client.list_roles()['roles']
         role = [r for r in roles if r['name'] == name]
         if len(role) > 0:
             return role[0]
@@ -237,7 +241,7 @@
                 self.test_user,
                 password=self.test_password,
                 project_id=self.project['id'],
-                email=self.test_email)
+                email=self.test_email)['user']
             self.v3_users.append(self.v3_user)
 
         def setup_test_project(self):
@@ -246,13 +250,13 @@
             self.test_description = data_utils.rand_name('desc')
             self.project = self.client.create_project(
                 name=self.test_project,
-                description=self.test_description)
+                description=self.test_description)['project']
             self.projects.append(self.project)
 
         def setup_test_v3_role(self):
             """Set up a test v3 role."""
             self.test_role = data_utils.rand_name('role')
-            self.v3_role = self.client.create_role(self.test_role)
+            self.v3_role = self.client.create_role(self.test_role)['role']
             self.v3_roles.append(self.v3_role)
 
         def setup_test_domain(self):
@@ -261,7 +265,7 @@
             self.test_description = data_utils.rand_name('desc')
             self.domain = self.client.create_domain(
                 name=self.test_domain,
-                description=self.test_description)
+                description=self.test_description)['domain']
             self.domains.append(self.domain)
 
         @staticmethod
diff --git a/tempest/api/image/admin/v2/test_images.py b/tempest/api/image/admin/v2/test_images.py
index 1608b76..09877ba 100644
--- a/tempest/api/image/admin/v2/test_images.py
+++ b/tempest/api/image/admin/v2/test_images.py
@@ -20,7 +20,7 @@
 from tempest.api.image import base
 from tempest import config
 from tempest import test
-
+from tempest_lib import exceptions as lib_exc
 
 CONF = config.CONF
 
@@ -43,13 +43,20 @@
         image_id = body['id']
         self.addCleanup(self.client.delete_image, image_id)
         # upload an image file
-        image_file = moves.cStringIO(data_utils.random_bytes())
+        content = data_utils.random_bytes()
+        image_file = moves.cStringIO(content)
         self.client.store_image_file(image_id, image_file)
         # deactivate image
         self.admin_client.deactivate_image(image_id)
         body = self.client.show_image(image_id)
         self.assertEqual("deactivated", body['status'])
+        # non-admin user unable to download deactivated image
+        self.assertRaises(lib_exc.Forbidden, self.client.load_image_file,
+                          image_id)
         # reactivate image
         self.admin_client.reactivate_image(image_id)
         body = self.client.show_image(image_id)
         self.assertEqual("active", body['status'])
+        # non-admin user able to download image after reactivation by admin
+        body = self.client.load_image_file(image_id)
+        self.assertEqual(content, body.data)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 87013db..da0ce83 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -72,6 +72,10 @@
 
         image = cls.client.create_image(name, container_format,
                                         disk_format, **kwargs)
+        # Image objects returned by the v1 client have the image
+        # data inside a dict that is keyed against 'image'.
+        if 'image' in image:
+            image = image['image']
         cls.created_images.append(image['id'])
         return image
 
@@ -146,7 +150,7 @@
         cls.alt_tenant_id = cls.alt_img_client.tenant_id
 
     def _list_image_ids_as_alt(self):
-        image_list = self.alt_img_client.list_images()
+        image_list = self.alt_img_client.list_images()['images']
         image_ids = map(lambda x: x['id'], image_list)
         return image_ids
 
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 8beed32..d4dbfcd 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -45,7 +45,7 @@
 
         # Now try uploading an image file
         image_file = moves.cStringIO(data_utils.random_bytes())
-        body = self.client.update_image(image_id, data=image_file)
+        body = self.client.update_image(image_id, data=image_file)['image']
         self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
 
@@ -119,9 +119,6 @@
         img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
         img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
         cls.created_set = set(cls.created_images)
-        # 4x-4x remote image
-        cls.remote_set = set((img1, img2, img3, img4))
-        cls.standard_set = set((img5, img6, img7, img8))
         # 5x bare, 3x ami
         cls.bare_set = set((img1, img3, img4, img7, img8))
         cls.ami_set = set((img2, img5, img6))
@@ -168,14 +165,14 @@
     @test.idempotent_id('246178ab-3b33-4212-9a4b-a7fe8261794d')
     def test_index_no_params(self):
         # Simple test to see all fixture images returned
-        images_list = self.client.list_images()
+        images_list = self.client.list_images()['images']
         image_list = map(lambda x: x['id'], images_list)
         for image_id in self.created_images:
             self.assertIn(image_id, image_list)
 
     @test.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
     def test_index_disk_format(self):
-        images_list = self.client.list_images(disk_format='ami')
+        images_list = self.client.list_images(disk_format='ami')['images']
         for image in images_list:
             self.assertEqual(image['disk_format'], 'ami')
         result_set = set(map(lambda x: x['id'], images_list))
@@ -184,7 +181,8 @@
 
     @test.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
     def test_index_container_format(self):
-        images_list = self.client.list_images(container_format='bare')
+        images_list = (self.client.list_images(container_format='bare')
+                       ['images'])
         for image in images_list:
             self.assertEqual(image['container_format'], 'bare')
         result_set = set(map(lambda x: x['id'], images_list))
@@ -193,7 +191,7 @@
 
     @test.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
     def test_index_max_size(self):
-        images_list = self.client.list_images(size_max=42)
+        images_list = self.client.list_images(size_max=42)['images']
         for image in images_list:
             self.assertTrue(image['size'] <= 42)
         result_set = set(map(lambda x: x['id'], images_list))
@@ -202,7 +200,7 @@
 
     @test.idempotent_id('6ffc16d0-4cbf-4401-95c8-4ac63eac34d8')
     def test_index_min_size(self):
-        images_list = self.client.list_images(size_min=142)
+        images_list = self.client.list_images(size_min=142)['images']
         for image in images_list:
             self.assertTrue(image['size'] >= 142)
         result_set = set(map(lambda x: x['id'], images_list))
@@ -214,7 +212,7 @@
         images_list = self.client.list_images(detail=True,
                                               status='active',
                                               sort_key='size',
-                                              sort_dir='desc')
+                                              sort_dir='desc')['images']
         top_size = images_list[0]['size']  # We have non-zero sized images
         for image in images_list:
             size = image['size']
@@ -226,7 +224,7 @@
     def test_index_name(self):
         images_list = self.client.list_images(
             detail=True,
-            name='New Remote Image dup')
+            name='New Remote Image dup')['images']
         result_set = set(map(lambda x: x['id'], images_list))
         for image in images_list:
             self.assertEqual(image['name'], 'New Remote Image dup')
@@ -272,7 +270,7 @@
         self.assertEqual(metadata['properties'], {'key1': 'value1'})
         metadata['properties'].update(req_metadata)
         metadata = self.client.update_image(
-            self.image_id, properties=metadata['properties'])
+            self.image_id, properties=metadata['properties'])['image']
 
         resp_metadata = self.client.get_image_meta(self.image_id)
         expected = {'key1': 'alt1', 'key2': 'value2'}
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 20e9bca..b446ec3 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -88,7 +88,7 @@
         self.client.wait_for_resource_deletion(image_id)
 
         # Verifying deletion
-        images = self.client.list_images()
+        images = self.client.list_images()['images']
         images_id = [item['id'] for item in images]
         self.assertNotIn(image_id, images_id)
 
@@ -164,7 +164,7 @@
         """
         Perform list action with given params and validates result.
         """
-        images_list = self.client.list_images(params=params)
+        images_list = self.client.list_images(params=params)['images']
         # Validating params of fetched images
         for image in images_list:
             for key in params:
@@ -174,7 +174,7 @@
     @test.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
     def test_index_no_params(self):
         # Simple test to see all fixture images returned
-        images_list = self.client.list_images()
+        images_list = self.client.list_images()['images']
         image_list = map(lambda x: x['id'], images_list)
 
         for image in self.created_images:
@@ -217,7 +217,7 @@
 
         size = image['size']
         params = {"size_min": size - 500, "size_max": size + 500}
-        images_list = self.client.list_images(params=params)
+        images_list = self.client.list_images(params=params)['images']
         image_size_list = map(lambda x: x['size'], images_list)
 
         for image_size in image_size_list:
@@ -235,7 +235,7 @@
     def test_list_images_param_limit(self):
         # Test to get images by limit
         params = {"limit": 2}
-        images_list = self.client.list_images(params=params)
+        images_list = self.client.list_images(params=params)['images']
 
         self.assertEqual(len(images_list), params['limit'],
                          "Failed to get images by limit")
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 4a86aca..86b4973 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -32,7 +32,6 @@
         # dhcp agent: this is done by creating a regular port
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
-        cls.cidr = cls.subnet['cidr']
         cls.port = cls.create_port(cls.network)
 
     @test.idempotent_id('5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d')
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index cb113a7..63395cc 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -61,7 +61,7 @@
 
         # Change quotas for tenant
         quota_set = self.admin_client.update_quotas(tenant_id,
-                                                    **new_quotas)
+                                                    **new_quotas)['quota']
         self.addCleanup(self.admin_client.reset_quotas, tenant_id)
         for key, value in six.iteritems(new_quotas):
             self.assertEqual(value, quota_set[key])
diff --git a/tempest/api/network/admin/test_routers_dvr.py b/tempest/api/network/admin/test_routers_dvr.py
index 9e2d080..365698d 100644
--- a/tempest/api/network/admin/test_routers_dvr.py
+++ b/tempest/api/network/admin/test_routers_dvr.py
@@ -89,7 +89,9 @@
         attribute will be set to True
         """
         name = data_utils.rand_name('router')
-        router = self.admin_client.create_router(name, distributed=False)
+        # router needs to be in admin state down in order to be upgraded to DVR
+        router = self.admin_client.create_router(name, distributed=False,
+                                                 admin_state_up=False)
         self.addCleanup(self.admin_client.delete_router,
                         router['router']['id'])
         self.assertFalse(router['router']['distributed'])
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index fda8fc3..fa6fa33 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -77,7 +77,6 @@
     @classmethod
     def resource_setup(cls):
         super(BaseNetworkTest, cls).resource_setup()
-        cls.network_cfg = CONF.network
         cls.networks = []
         cls.subnets = []
         cls.ports = []
diff --git a/tempest/api/network/test_extensions.py b/tempest/api/network/test_extensions.py
index be7174b..f56688f 100644
--- a/tempest/api/network/test_extensions.py
+++ b/tempest/api/network/test_extensions.py
@@ -39,7 +39,8 @@
                           'ext-gw-mode', 'binding', 'quotas',
                           'agent', 'dhcp_agent_scheduler', 'provider',
                           'router', 'extraroute', 'external-net',
-                          'allowed-address-pairs', 'extra_dhcp_opt']
+                          'allowed-address-pairs', 'extra_dhcp_opt',
+                          'metering', 'dvr', 'service-type']
         expected_alias = [ext for ext in expected_alias if
                           test.is_extension_enabled(ext, 'network')]
         actual_alias = list()
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index f0923d2..4b4a4e2 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -25,7 +25,7 @@
 
 class FloatingIPTestJSON(base.BaseNetworkTest):
     """
-    Tests the following operations in the Quantum API using the REST client for
+    Tests the following operations in the Neutron API using the REST client for
     Neutron:
 
         Create a Floating IP
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 842a56d..5685be8 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -66,7 +66,6 @@
         cls.name = cls.network['name']
         cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
                                                                cls._ip_version)
-        cls.cidr = cls.subnet['cidr']
         cls._subnet_data = {6: {'gateway':
                                 str(cls._get_gateway_from_tempest_conf(6)),
                                 'allocation_pools':
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 4f54562..6a8fbec 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -156,26 +156,21 @@
         network = self.create_network()
         subnet = self.create_subnet(network)
         self.addCleanup(self.client.delete_subnet, subnet['id'])
-        # Create two ports specifying a fixed_ips
-        address = self._get_ipaddress_from_tempest_conf()
-        _fixed_ip_1 = str(address + 3)
-        _fixed_ip_2 = str(address + 4)
-        fixed_ips_1 = [{'ip_address': _fixed_ip_1}]
-        port_1 = self.client.create_port(network_id=network['id'],
-                                         fixed_ips=fixed_ips_1)
+        # Create two ports
+        port_1 = self.client.create_port(network_id=network['id'])
         self.addCleanup(self.client.delete_port, port_1['port']['id'])
-        fixed_ips_2 = [{'ip_address': _fixed_ip_2}]
-        port_2 = self.client.create_port(network_id=network['id'],
-                                         fixed_ips=fixed_ips_2)
+        port_2 = self.client.create_port(network_id=network['id'])
         self.addCleanup(self.client.delete_port, port_2['port']['id'])
         # List ports filtered by fixed_ips
-        fixed_ips = 'ip_address=' + _fixed_ip_1
+        port_1_fixed_ip = port_1['port']['fixed_ips'][0]['ip_address']
+        fixed_ips = 'ip_address=' + port_1_fixed_ip
         port_list = self.client.list_ports(fixed_ips=fixed_ips)
+        # Check that we got the desired port
         ports = port_list['ports']
         self.assertEqual(len(ports), 1)
         self.assertEqual(ports[0]['id'], port_1['port']['id'])
         self.assertEqual(ports[0]['fixed_ips'][0]['ip_address'],
-                         _fixed_ip_1)
+                         port_1_fixed_ip)
         self.assertEqual(ports[0]['network_id'], network['id'])
 
     @test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 78b51c8..aa3d274 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -275,13 +275,13 @@
         next_cidr = netaddr.IPNetwork(self.tenant_cidr)
         # Prepare to build several routes
         test_routes = []
-        routes_num = 5
+        routes_num = 4
         # Create a router
-        self.router = self._create_router(
+        router = self._create_router(
             data_utils.rand_name('router-'), True)
         self.addCleanup(
             self._delete_extra_routes,
-            self.router['id'])
+            router['id'])
         # Update router extra route, second ip of the range is
         # used as next hop
         for i in range(routes_num):
@@ -290,7 +290,7 @@
             next_cidr = next_cidr.next()
 
             # Add router interface with subnet id
-            self.create_router_interface(self.router['id'], subnet['id'])
+            self.create_router_interface(router['id'], subnet['id'])
 
             cidr = netaddr.IPNetwork(subnet['cidr'])
             next_hop = str(cidr[2])
@@ -300,9 +300,9 @@
             )
 
         test_routes.sort(key=lambda x: x['destination'])
-        extra_route = self.client.update_extra_routes(self.router['id'],
+        extra_route = self.client.update_extra_routes(router['id'],
                                                       test_routes)
-        show_body = self.client.show_router(self.router['id'])
+        show_body = self.client.show_router(router['id'])
         # Assert the number of routes
         self.assertEqual(routes_num, len(extra_route['router']['routes']))
         self.assertEqual(routes_num, len(show_body['router']['routes']))
@@ -327,13 +327,13 @@
 
     @test.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
     def test_update_router_admin_state(self):
-        self.router = self._create_router(data_utils.rand_name('router-'))
-        self.assertFalse(self.router['admin_state_up'])
+        router = self._create_router(data_utils.rand_name('router-'))
+        self.assertFalse(router['admin_state_up'])
         # Update router admin state
-        update_body = self.client.update_router(self.router['id'],
+        update_body = self.client.update_router(router['id'],
                                                 admin_state_up=True)
         self.assertTrue(update_body['router']['admin_state_up'])
-        show_body = self.client.show_router(self.router['id'])
+        show_body = self.client.show_router(router['id'])
         self.assertTrue(show_body['router']['admin_state_up'])
 
     @test.attr(type='smoke')
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 266f726..5b6b0fa 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -96,7 +96,7 @@
     @classmethod
     def _create_keypair(cls, name_start='keypair-heat-'):
         kp_name = data_utils.rand_name(name_start)
-        body = cls.keypairs_client.create_keypair(kp_name)
+        body = cls.keypairs_client.create_keypair(name=kp_name)['keypair']
         cls.keypairs.append(kp_name)
         return body
 
@@ -163,7 +163,7 @@
 
     def list_resources(self, stack_identifier):
         """Get a dict mapping of resource names to types."""
-        resources = self.client.list_resources(stack_identifier)
+        resources = self.client.list_resources(stack_identifier)['resources']
         self.assertIsInstance(resources, list)
         for res in resources:
             self.assert_fields_in_dict(res, 'logical_resource_id',
@@ -174,5 +174,5 @@
                     for r in resources)
 
     def get_stack_output(self, stack_identifier, output_key):
-        body = self.client.show_stack(stack_identifier)
+        body = self.client.show_stack(stack_identifier)['stack']
         return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index b27d6c9..cb6d1db 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -75,7 +75,8 @@
         cls.stack_id = cls.stack_identifier.split('/')[1]
         try:
             cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
-            resources = cls.client.list_resources(cls.stack_identifier)
+            resources = (cls.client.list_resources(cls.stack_identifier)
+                         ['resources'])
         except exceptions.TimeoutException as e:
             if CONF.compute_feature_enabled.console_output:
                 # attempt to log the server console to help with debugging
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 494b6fd..e37587c 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -47,7 +47,7 @@
         cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
 
     def _list_stacks(self, expected_num=None, **filter_kwargs):
-        stacks = self.client.list_stacks(params=filter_kwargs)
+        stacks = self.client.list_stacks(params=filter_kwargs)['stacks']
         self.assertIsInstance(stacks, list)
         if expected_num is not None:
             self.assertEqual(expected_num, len(stacks))
@@ -63,7 +63,7 @@
     @test.idempotent_id('992f96e3-41ee-4ff6-91c7-bcfb670c0919')
     def test_stack_show(self):
         """Getting details about created stack should be possible."""
-        stack = self.client.show_stack(self.stack_name)
+        stack = self.client.show_stack(self.stack_name)['stack']
         self.assertIsInstance(stack, dict)
         self.assert_fields_in_dict(stack, 'stack_name', 'id', 'links',
                                    'parameters', 'outputs', 'disable_rollback',
@@ -100,7 +100,7 @@
     def test_show_resource(self):
         """Getting details about created resource should be possible."""
         resource = self.client.show_resource(self.stack_identifier,
-                                             self.resource_name)
+                                             self.resource_name)['resource']
         self.assertIsInstance(resource, dict)
         self.assert_fields_in_dict(resource, 'resource_name', 'description',
                                    'links', 'logical_resource_id',
@@ -115,14 +115,14 @@
         """Getting metadata for created resources should be possible."""
         metadata = self.client.show_resource_metadata(
             self.stack_identifier,
-            self.resource_name)
+            self.resource_name)['metadata']
         self.assertIsInstance(metadata, dict)
         self.assertEqual(['Tom', 'Stinky'], metadata.get('kittens', None))
 
     @test.idempotent_id('46567533-0a7f-483b-8942-fa19e0f17839')
     def test_list_events(self):
         """Getting list of created events for the stack should be possible."""
-        events = self.client.list_events(self.stack_identifier)
+        events = self.client.list_events(self.stack_identifier)['events']
         self.assertIsInstance(events, list)
 
         for event in events:
@@ -138,12 +138,12 @@
     def test_show_event(self):
         """Getting details about an event should be possible."""
         events = self.client.list_resource_events(self.stack_identifier,
-                                                  self.resource_name)
+                                                  self.resource_name)['events']
         self.assertNotEqual([], events)
         events.sort(key=lambda event: event['event_time'])
         event_id = events[0]['id']
         event = self.client.show_event(self.stack_identifier,
-                                       self.resource_name, event_id)
+                                       self.resource_name, event_id)['event']
         self.assertIsInstance(event, dict)
         self.assert_fields_in_dict(event, 'resource_name', 'event_time',
                                    'links', 'logical_resource_id',
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 6a04dbd..b4d7fa0 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -43,7 +43,8 @@
 
         cls.stack_id = cls.stack_identifier.split('/')[1]
         cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
-        resources = cls.client.list_resources(cls.stack_identifier)
+        resources = (cls.client.list_resources(cls.stack_identifier)
+                     ['resources'])
         cls.test_resources = {}
         for resource in resources:
             cls.test_resources[resource['logical_resource_id']] = resource
@@ -70,7 +71,7 @@
 
     @test.idempotent_id('8d77dec7-91fd-45a6-943d-5abd45e338a4')
     def test_stack_keypairs_output(self):
-        stack = self.client.show_stack(self.stack_name)
+        stack = self.client.show_stack(self.stack_name)['stack']
         self.assertIsInstance(stack, dict)
 
         output_map = {}
diff --git a/tempest/api/orchestration/stacks/test_resource_types.py b/tempest/api/orchestration/stacks/test_resource_types.py
index 8f15f9c..8cf40de 100644
--- a/tempest/api/orchestration/stacks/test_resource_types.py
+++ b/tempest/api/orchestration/stacks/test_resource_types.py
@@ -20,7 +20,7 @@
     @test.idempotent_id('7123d082-3577-4a30-8f00-f805327c4ffd')
     def test_resource_type_list(self):
         """Verify it is possible to list resource types."""
-        resource_types = self.client.list_resource_types()
+        resource_types = self.client.list_resource_types()['resource_types']
         self.assertIsInstance(resource_types, list)
         self.assertIn('OS::Nova::Server', resource_types)
 
@@ -28,7 +28,7 @@
     @test.idempotent_id('0e85a483-828b-4a28-a0e3-f0a21809192b')
     def test_resource_type_show(self):
         """Verify it is possible to get schema about resource types."""
-        resource_types = self.client.list_resource_types()
+        resource_types = self.client.list_resource_types()['resource_types']
         self.assertNotEmpty(resource_types)
 
         for resource_type in resource_types:
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index abec906..f766b00 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -30,7 +30,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('d35d628c-07f6-4674-85a1-74db9919e986')
     def test_stack_list_responds(self):
-        stacks = self.client.list_stacks()
+        stacks = self.client.list_stacks()['stacks']
         self.assertIsInstance(stacks, list)
 
     @test.attr(type='smoke')
@@ -47,20 +47,20 @@
         self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
 
         # check for stack in list
-        stacks = self.client.list_stacks()
+        stacks = self.client.list_stacks()['stacks']
         list_ids = list([stack['id'] for stack in stacks])
         self.assertIn(stack_id, list_ids)
 
         # fetch the stack
-        stack = self.client.show_stack(stack_identifier)
+        stack = self.client.show_stack(stack_identifier)['stack']
         self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
 
         # fetch the stack by name
-        stack = self.client.show_stack(stack_name)
+        stack = self.client.show_stack(stack_name)['stack']
         self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
 
         # fetch the stack by id
-        stack = self.client.show_stack(stack_id)
+        stack = self.client.show_stack(stack_id)['stack']
         self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
 
         # delete the stack
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index 30166df..c0f1c4b 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -55,7 +55,8 @@
         cls.stack_id = cls.stack_identifier.split('/')[1]
         cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
         cls.test_resources = {}
-        resources = cls.client.list_resources(cls.stack_identifier)
+        resources = (cls.client.list_resources(cls.stack_identifier)
+                     ['resources'])
         for resource in resources:
             cls.test_resources[resource['logical_resource_id']] = resource
 
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 3be807b..5d1784f 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -85,6 +85,11 @@
         body = client.create_image(
             data_utils.rand_name('image'), container_format='bare',
             disk_format='raw', visibility='private')
+        # TODO(jswarren) Move ['image'] up to initial body value assignment
+        # once both v1 and v2 glance clients include the full response
+        # object.
+        if 'image' in body:
+            body = body['image']
         cls.image_ids.append(body['id'])
         return body
 
@@ -121,3 +126,27 @@
             'Sample for metric:%s with query:%s has not been added to the '
             'database within %d seconds' % (metric, query,
                                             CONF.compute.build_timeout))
+
+
+class BaseTelemetryAdminTest(BaseTelemetryTest):
+    """Base test case class for admin Telemetry API tests."""
+
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(BaseTelemetryAdminTest, cls).setup_clients()
+        cls.telemetry_admin_client = cls.os_adm.telemetry_client
+
+    def await_events(self, query):
+        timeout = CONF.compute.build_timeout
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout:
+            body = self.telemetry_admin_client.list_events(query)
+            if body:
+                return body
+            time.sleep(CONF.compute.build_interval)
+
+        raise exceptions.TimeoutException(
+            'Event with query:%s has not been added to the '
+            'database within %d seconds' % (query, CONF.compute.build_timeout))
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 52793c8..71a00c9 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -10,6 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest_lib import decorators
 import testtools
 
 from tempest.api.telemetry import base
@@ -29,8 +30,7 @@
                                     "is disabled")
 
     @test.idempotent_id('d7f8c1c8-d470-4731-8604-315d3956caad')
-    @testtools.skipIf(not CONF.service_available.nova,
-                      "Nova is not available.")
+    @test.services('compute')
     def test_check_nova_notification(self):
 
         body = self.create_server()
@@ -71,3 +71,28 @@
 
         for metric in self.glance_v2_notifications:
             self.await_samples(metric, query)
+
+
+class TelemetryNotificationAdminAPITestJSON(base.BaseTelemetryAdminTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(TelemetryNotificationAdminAPITestJSON, cls).skip_checks()
+        if CONF.telemetry.too_slow_to_test:
+            raise cls.skipException("Ceilometer feature for fast work mysql "
+                                    "is disabled")
+
+    @test.idempotent_id('29604198-8b45-4fc0-8af8-1cae4f94ebe9')
+    @test.services('compute')
+    @decorators.skip_because(bug='1480490')
+    def test_check_nova_notification_event_and_meter(self):
+
+        body = self.create_server()
+
+        if CONF.telemetry_feature_enabled.events:
+            query = ('instance_id', 'eq', body['id'])
+            self.await_events(query)
+
+        query = ('resource', 'eq', body['id'])
+        for metric in self.nova_notifications:
+            self.await_samples(metric, query)
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index c7e989d..bbdf4a8 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -66,7 +66,7 @@
         else:
             extra_specs = {spec_key_without_prefix: backend_name_key}
         self.type = self.volume_types_client.create_volume_type(
-            type_name, extra_specs=extra_specs)
+            type_name, extra_specs=extra_specs)['volume_type']
         self.volume_type_id_list.append(self.type['id'])
 
         params = {self.name_field: vol_name, 'volume_type': type_name}
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index c860b4b..66973a7 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -41,8 +41,8 @@
         # Create a test shared snapshot for tests
         snap_name = data_utils.rand_name(cls.__name__ + '-Snapshot')
         params = {cls.name_field: snap_name}
-        cls.snapshot = \
-            cls.client.create_snapshot(cls.volume['id'], **params)
+        cls.snapshot = cls.client.create_snapshot(
+            cls.volume['id'], **params)['snapshot']
         cls.client.wait_for_snapshot_status(cls.snapshot['id'],
                                             'available')
 
@@ -86,8 +86,8 @@
         status = 'creating'
         self.admin_snapshots_client.\
             reset_snapshot_status(self.snapshot['id'], status)
-        snapshot_get \
-            = self.admin_snapshots_client.show_snapshot(self.snapshot['id'])
+        snapshot_get = self.admin_snapshots_client.show_snapshot(
+            self.snapshot['id'])['snapshot']
         self.assertEqual(status, snapshot_get['status'])
 
     @test.idempotent_id('41288afd-d463-485e-8f6e-4eea159413eb')
@@ -103,8 +103,8 @@
         progress_alias = self._get_progress_alias()
         self.client.update_snapshot_status(self.snapshot['id'],
                                            status, progress)
-        snapshot_get \
-            = self.admin_snapshots_client.show_snapshot(self.snapshot['id'])
+        snapshot_get = self.admin_snapshots_client.show_snapshot(
+            self.snapshot['id'])['snapshot']
         self.assertEqual(status, snapshot_get['status'])
         self.assertEqual(progress, snapshot_get[progress_alias])
 
diff --git a/tempest/api/volume/admin/test_volume_hosts.py b/tempest/api/volume/admin/test_volume_hosts.py
index dd14d8c..b28488a 100644
--- a/tempest/api/volume/admin/test_volume_hosts.py
+++ b/tempest/api/volume/admin/test_volume_hosts.py
@@ -21,7 +21,7 @@
 
     @test.idempotent_id('d5f3efa2-6684-4190-9ced-1c2f526352ad')
     def test_list_hosts(self):
-        hosts = self.hosts_client.list_hosts()
+        hosts = self.hosts_client.list_hosts()['hosts']
         self.assertTrue(len(hosts) >= 2, "No. of hosts are < 2,"
                         "response of list hosts is: % s" % hosts)
 
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
index 4f80a31..74fffb9 100644
--- a/tempest/api/volume/admin/test_volume_services.py
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -26,19 +26,22 @@
     @classmethod
     def resource_setup(cls):
         super(VolumesServicesV2TestJSON, cls).resource_setup()
-        cls.services = cls.admin_volume_services_client.list_services()
+        cls.services = (cls.admin_volume_services_client.list_services()
+                        ['services'])
         cls.host_name = cls.services[0]['host']
         cls.binary_name = cls.services[0]['binary']
 
     @test.idempotent_id('e0218299-0a59-4f43-8b2b-f1c035b3d26d')
     def test_list_services(self):
-        services = self.admin_volume_services_client.list_services()
+        services = (self.admin_volume_services_client.list_services()
+                    ['services'])
         self.assertNotEqual(0, len(services))
 
     @test.idempotent_id('63a3e1ca-37ee-4983-826d-83276a370d25')
     def test_get_service_by_service_binary_name(self):
         params = {'binary': self.binary_name}
-        services = self.admin_volume_services_client.list_services(params)
+        services = (self.admin_volume_services_client.list_services(params)
+                    ['services'])
         self.assertNotEqual(0, len(services))
         for service in services:
             self.assertEqual(self.binary_name, service['binary'])
@@ -49,7 +52,8 @@
                             service['host'] == self.host_name]
         params = {'host': self.host_name}
 
-        services = self.admin_volume_services_client.list_services(params)
+        services = (self.admin_volume_services_client.list_services(params)
+                    ['services'])
 
         # we could have a periodic job checkin between the 2 service
         # lookups, so only compare binary lists.
@@ -63,7 +67,8 @@
     def test_get_service_by_service_and_host_name(self):
         params = {'host': self.host_name, 'binary': self.binary_name}
 
-        services = self.admin_volume_services_client.list_services(params)
+        services = (self.admin_volume_services_client.list_services(params)
+                    ['services'])
         self.assertEqual(1, len(services))
         self.assertEqual(self.host_name, services[0]['host'])
         self.assertEqual(self.binary_name, services[0]['binary'])
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index b79c185..dd69b7f 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -33,7 +33,7 @@
     @test.idempotent_id('9d9b28e3-1b2e-4483-a2cc-24aa0ea1de54')
     def test_volume_type_list(self):
         # List Volume types.
-        body = self.volume_types_client.list_volume_types()
+        body = self.volume_types_client.list_volume_types()['volume_types']
         self.assertIsInstance(body, list)
 
     @test.idempotent_id('c03cc62c-f4e9-4623-91ec-64ce2f9c1260')
@@ -51,7 +51,7 @@
             vol_type_name = data_utils.rand_name("volume-type")
             vol_type = self.volume_types_client.create_volume_type(
                 vol_type_name,
-                extra_specs=extra_specs)
+                extra_specs=extra_specs)['volume_type']
             volume_types.append(vol_type)
             self.addCleanup(self._delete_volume_type, vol_type['id'])
         params = {self.name_field: vol_name,
@@ -97,7 +97,7 @@
                        "vendor_name": vendor}
         body = self.volume_types_client.create_volume_type(
             name,
-            extra_specs=extra_specs)
+            extra_specs=extra_specs)['volume_type']
         self.assertIn('id', body)
         self.addCleanup(self._delete_volume_type, body['id'])
         self.assertIn('name', body)
@@ -107,7 +107,7 @@
         self.assertTrue(body['id'] is not None,
                         "Field volume_type id is empty or not found.")
         fetched_volume_type = self.volume_types_client.show_volume_type(
-            body['id'])
+            body['id'])['volume_type']
         self.assertEqual(name, fetched_volume_type['name'],
                          'The fetched Volume_type is different '
                          'from the created Volume_type')
@@ -124,13 +124,13 @@
         provider = "LuksEncryptor"
         control_location = "front-end"
         name = data_utils.rand_name("volume-type")
-        body = self.volume_types_client.create_volume_type(name)
+        body = self.volume_types_client.create_volume_type(name)['volume_type']
         self.addCleanup(self._delete_volume_type, body['id'])
 
         # Create encryption type
         encryption_type = self.volume_types_client.create_encryption_type(
             body['id'], provider=provider,
-            control_location=control_location)
+            control_location=control_location)['encryption']
         self.assertIn('volume_type_id', encryption_type)
         self.assertEqual(provider, encryption_type['provider'],
                          "The created encryption_type provider is not equal "
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index c840697..bec803c 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -25,7 +25,7 @@
         super(VolumeTypesExtraSpecsV2Test, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type')
         cls.volume_type = cls.volume_types_client.create_volume_type(
-            vol_type_name)
+            vol_type_name)['volume_type']
 
     @classmethod
     def resource_cleanup(cls):
@@ -37,11 +37,11 @@
         # List Volume types extra specs.
         extra_specs = {"spec1": "val1"}
         body = self.volume_types_client.create_volume_type_extra_specs(
-            self.volume_type['id'], extra_specs)
+            self.volume_type['id'], extra_specs)['extra_specs']
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
         body = self.volume_types_client.list_volume_types_extra_specs(
-            self.volume_type['id'])
+            self.volume_type['id'])['extra_specs']
         self.assertIsInstance(body, dict)
         self.assertIn('spec1', body)
 
@@ -50,7 +50,7 @@
         # Update volume type extra specs
         extra_specs = {"spec2": "val1"}
         body = self.volume_types_client.create_volume_type_extra_specs(
-            self.volume_type['id'], extra_specs)
+            self.volume_type['id'], extra_specs)['extra_specs']
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
 
@@ -69,7 +69,7 @@
         extra_specs = {"spec3": "val1"}
         body = self.volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'],
-            extra_specs)
+            extra_specs)['extra_specs']
         self.assertEqual(extra_specs, body,
                          "Volume type extra spec incorrectly created")
 
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index e49e8b2..040ef53 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -31,7 +31,7 @@
         cls.extra_specs = {"spec1": "val1"}
         cls.volume_type = cls.volume_types_client.create_volume_type(
             vol_type_name,
-            extra_specs=cls.extra_specs)
+            extra_specs=cls.extra_specs)['volume_type']
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 2c545a7..86fa668 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -38,13 +38,17 @@
 
         cls.volume = cls.create_volume()
 
+    def _delete_backup(self, backup_id):
+        self.backups_adm_client.delete_backup(backup_id)
+        self.backups_adm_client.wait_for_backup_deletion(backup_id)
+
     @test.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
     def test_volume_backup_create_get_detailed_list_restore_delete(self):
         # Create backup
         backup_name = data_utils.rand_name('Backup')
         create_backup = self.backups_adm_client.create_backup
         backup = create_backup(self.volume['id'],
-                               name=backup_name)
+                               name=backup_name)['backup']
         self.addCleanup(self.backups_adm_client.delete_backup,
                         backup['id'])
         self.assertEqual(backup_name, backup['name'])
@@ -54,16 +58,17 @@
                                                        'available')
 
         # Get a given backup
-        backup = self.backups_adm_client.show_backup(backup['id'])
+        backup = self.backups_adm_client.show_backup(backup['id'])['backup']
         self.assertEqual(backup_name, backup['name'])
 
         # Get all backups with detail
-        backups = self.backups_adm_client.list_backups(detail=True)
+        backups = self.backups_adm_client.list_backups(detail=True)['backups']
         self.assertIn((backup['name'], backup['id']),
                       [(m['name'], m['id']) for m in backups])
 
         # Restore backup
-        restore = self.backups_adm_client.restore_backup(backup['id'])
+        restore = self.backups_adm_client.restore_backup(
+            backup['id'])['restore']
 
         # Delete backup
         self.addCleanup(self.admin_volume_client.delete_volume,
@@ -74,6 +79,55 @@
         self.admin_volume_client.wait_for_volume_status(
             restore['volume_id'], 'available')
 
+    @test.idempotent_id('a99c54a1-dd80-4724-8a13-13bf58d4068d')
+    def test_volume_backup_export_import(self):
+        # Create backup
+        backup_name = data_utils.rand_name('Backup')
+        backup = (self.backups_adm_client.create_backup(self.volume['id'],
+                                                        name=backup_name)
+                  ['backup'])
+        self.addCleanup(self._delete_backup, backup['id'])
+        self.assertEqual(backup_name, backup['name'])
+        self.backups_adm_client.wait_for_backup_status(backup['id'],
+                                                       'available')
+
+        # Export Backup
+        export_backup = (self.backups_adm_client.export_backup(backup['id'])
+                         ['backup-record'])
+        self.assertIn('backup_service', export_backup)
+        self.assertIn('backup_url', export_backup)
+        self.assertTrue(export_backup['backup_service'].startswith(
+                        'cinder.backup.drivers'))
+        self.assertIsNotNone(export_backup['backup_url'])
+
+        # Import Backup
+        import_backup = self.backups_adm_client.import_backup(
+            backup_service=export_backup['backup_service'],
+            backup_url=export_backup['backup_url'])['backup']
+        self.addCleanup(self._delete_backup, import_backup['id'])
+        self.assertIn("id", import_backup)
+        self.backups_adm_client.wait_for_backup_status(import_backup['id'],
+                                                       'available')
+
+        # Verify Import Backup
+        backups = self.backups_adm_client.list_backups(detail=True)['backups']
+        self.assertIn(import_backup['id'], [b['id'] for b in backups])
+
+        # Restore backup
+        restore = (self.backups_adm_client.restore_backup(import_backup['id'])
+                   ['restore'])
+        self.addCleanup(self.admin_volume_client.delete_volume,
+                        restore['volume_id'])
+        self.assertEqual(import_backup['id'], restore['backup_id'])
+        self.admin_volume_client.wait_for_volume_status(restore['volume_id'],
+                                                        'available')
+
+        # Verify if restored volume is there in volume list
+        volumes = self.admin_volume_client.list_volumes()
+        self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
+        self.backups_adm_client.wait_for_backup_status(import_backup['id'],
+                                                       'available')
+
 
 class VolumesBackupsV1Test(VolumesBackupsV2Test):
     _api_version = 1
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index f8ae5eb..c987100 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -62,6 +62,7 @@
         super(BaseVolumeTest, cls).setup_clients()
         cls.servers_client = cls.os.servers_client
         cls.networks_client = cls.os.networks_client
+        cls.images_client = cls.os.images_client
 
         if cls._api_version == 1:
             cls.snapshots_client = cls.os.snapshots_client
@@ -121,8 +122,8 @@
     @classmethod
     def create_snapshot(cls, volume_id=1, **kwargs):
         """Wrapper utility that returns a test snapshot."""
-        snapshot = cls.snapshots_client.create_snapshot(volume_id,
-                                                        **kwargs)
+        snapshot = cls.snapshots_client.create_snapshot(
+            volume_id, **kwargs)['snapshot']
         cls.snapshots.append(snapshot)
         cls.snapshots_client.wait_for_snapshot_status(snapshot['id'],
                                                       'available')
@@ -216,7 +217,7 @@
         name = name or data_utils.rand_name(cls.__name__ + '-QoS')
         consumer = consumer or 'front-end'
         qos_specs = cls.volume_qos_client.create_qos(name, consumer,
-                                                     **kwargs)
+                                                     **kwargs)['qos_specs']
         cls.qos_specs.append(qos_specs['id'])
         return qos_specs
 
diff --git a/tempest/api/volume/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
index f188fa9..366b8d2 100644
--- a/tempest/api/volume/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -31,7 +31,8 @@
     @test.idempotent_id('01f1ae88-eba9-4c6b-a011-6f7ace06b725')
     def test_get_availability_zone_list(self):
         # List of availability zone
-        availability_zone = self.client.list_availability_zones()
+        availability_zone = (self.client.list_availability_zones()
+                             ['availabilityZoneInfo'])
         self.assertTrue(len(availability_zone) > 0)
 
 
diff --git a/tempest/api/volume/test_extensions.py b/tempest/api/volume/test_extensions.py
index 17db45f..cce9ace 100644
--- a/tempest/api/volume/test_extensions.py
+++ b/tempest/api/volume/test_extensions.py
@@ -30,7 +30,8 @@
     @test.idempotent_id('94607eb0-43a5-47ca-82aa-736b41bd2e2c')
     def test_list_extensions(self):
         # List of all extensions
-        extensions = self.volumes_extension_client.list_extensions()
+        extensions = (self.volumes_extension_client.list_extensions()
+                      ['extensions'])
         if len(CONF.volume_feature_enabled.api_extensions) == 0:
             raise self.skipException('There are not any extensions configured')
         extension_list = [extension.get('alias') for extension in extensions]
diff --git a/tempest/api/volume/test_qos.py b/tempest/api/volume/test_qos.py
index 84fd7f6..2f7c3df 100644
--- a/tempest/api/volume/test_qos.py
+++ b/tempest/api/volume/test_qos.py
@@ -47,13 +47,13 @@
         self.volume_qos_client.wait_for_resource_deletion(body['id'])
 
         # validate the deletion
-        list_qos = self.volume_qos_client.list_qos()
+        list_qos = self.volume_qos_client.list_qos()['qos_specs']
         self.assertNotIn(body, list_qos)
 
     def _create_test_volume_type(self):
         vol_type_name = utils.rand_name("volume-type")
         vol_type = self.volume_types_client.create_volume_type(
-            vol_type_name)
+            vol_type_name)['volume_type']
         self.addCleanup(self.volume_types_client.delete_volume_type,
                         vol_type['id'])
         return vol_type
@@ -64,7 +64,7 @@
 
     def _test_get_association_qos(self):
         body = self.volume_qos_client.show_association_qos(
-            self.created_qos['id'])
+            self.created_qos['id'])['qos_associations']
 
         associations = []
         for association in body:
@@ -99,24 +99,27 @@
     @test.idempotent_id('7aa214cc-ac1a-4397-931f-3bb2e83bb0fd')
     def test_get_qos(self):
         """Tests the detail of a given qos-specs"""
-        body = self.volume_qos_client.show_qos(self.created_qos['id'])
+        body = self.volume_qos_client.show_qos(
+            self.created_qos['id'])['qos_specs']
         self.assertEqual(self.qos_name, body['name'])
         self.assertEqual(self.qos_consumer, body['consumer'])
 
     @test.idempotent_id('75e04226-bcf7-4595-a34b-fdf0736f38fc')
     def test_list_qos(self):
         """Tests the list of all qos-specs"""
-        body = self.volume_qos_client.list_qos()
+        body = self.volume_qos_client.list_qos()['qos_specs']
         self.assertIn(self.created_qos, body)
 
     @test.idempotent_id('ed00fd85-4494-45f2-8ceb-9e2048919aed')
     def test_set_unset_qos_key(self):
         """Test the addition of a specs key to qos-specs"""
         args = {'iops_bytes': '500'}
-        body = self.volume_qos_client.set_qos_key(self.created_qos['id'],
-                                                  iops_bytes='500')
+        body = self.volume_qos_client.set_qos_key(
+            self.created_qos['id'],
+            iops_bytes='500')['qos_specs']
         self.assertEqual(args, body)
-        body = self.volume_qos_client.show_qos(self.created_qos['id'])
+        body = self.volume_qos_client.show_qos(
+            self.created_qos['id'])['qos_specs']
         self.assertEqual(args['iops_bytes'], body['specs']['iops_bytes'])
 
         # test the deletion of a specs key from qos-specs
@@ -125,7 +128,8 @@
         operation = 'qos-key-unset'
         self.volume_qos_client.wait_for_qos_operations(self.created_qos['id'],
                                                        operation, keys)
-        body = self.volume_qos_client.show_qos(self.created_qos['id'])
+        body = self.volume_qos_client.show_qos(
+            self.created_qos['id'])['qos_specs']
         self.assertNotIn(keys[0], body['specs'])
 
     @test.idempotent_id('1dd93c76-6420-485d-a771-874044c416ac')
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index 641317a..ce6ba90 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -48,16 +48,18 @@
                     "key3": "value3"}
         expected = {"key2": "value2",
                     "key3": "value3"}
-        body = self.client.create_snapshot_metadata(self.snapshot_id,
-                                                    metadata)
+        body = self.client.create_snapshot_metadata(
+            self.snapshot_id, metadata)['metadata']
         # Get the metadata of the snapshot
-        body = self.client.show_snapshot_metadata(self.snapshot_id)
+        body = self.client.show_snapshot_metadata(
+            self.snapshot_id)['metadata']
         self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))
 
         # Delete one item metadata of the snapshot
         self.client.delete_snapshot_metadata_item(
             self.snapshot_id, "key1")
-        body = self.client.show_snapshot_metadata(self.snapshot_id)
+        body = self.client.show_snapshot_metadata(
+            self.snapshot_id)['metadata']
         self.assertThat(body.items(), matchers.ContainsAll(expected.items()))
         self.assertNotIn("key1", body)
 
@@ -70,17 +72,19 @@
         update = {"key3": "value3_update",
                   "key4": "value4"}
         # Create metadata for the snapshot
-        body = self.client.create_snapshot_metadata(self.snapshot_id,
-                                                    metadata)
+        body = self.client.create_snapshot_metadata(
+            self.snapshot_id, metadata)['metadata']
         # Get the metadata of the snapshot
-        body = self.client.show_snapshot_metadata(self.snapshot_id)
+        body = self.client.show_snapshot_metadata(
+            self.snapshot_id)['metadata']
         self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))
 
         # Update metadata item
         body = self.client.update_snapshot_metadata(
-            self.snapshot_id, update)
+            self.snapshot_id, update)['metadata']
         # Get the metadata of the snapshot
-        body = self.client.show_snapshot_metadata(self.snapshot_id)
+        body = self.client.show_snapshot_metadata(
+            self.snapshot_id)['metadata']
         self.assertEqual(update, body)
 
     @test.idempotent_id('e8ff85c5-8f97-477f-806a-3ac364a949ed')
@@ -94,16 +98,18 @@
                   "key2": "value2",
                   "key3": "value3_update"}
         # Create metadata for the snapshot
-        body = self.client.create_snapshot_metadata(self.snapshot_id,
-                                                    metadata)
+        body = self.client.create_snapshot_metadata(
+            self.snapshot_id, metadata)['metadata']
         # Get the metadata of the snapshot
-        body = self.client.show_snapshot_metadata(self.snapshot_id)
+        body = self.client.show_snapshot_metadata(
+            self.snapshot_id)['metadata']
         self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))
         # Update metadata item
         body = self.client.update_snapshot_metadata_item(
-            self.snapshot_id, "key3", update_item)
+            self.snapshot_id, "key3", update_item)['meta']
         # Get the metadata of the snapshot
-        body = self.client.show_snapshot_metadata(self.snapshot_id)
+        body = self.client.show_snapshot_metadata(
+            self.snapshot_id)['metadata']
         self.assertThat(body.items(), matchers.ContainsAll(expect.items()))
 
 
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 067c0c1..79615b3 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -45,15 +45,12 @@
         cls.volume = cls.create_volume()
         cls.client.wait_for_volume_status(cls.volume['id'], 'available')
 
-    def _delete_image_with_wait(self, image_id):
-        self.image_client.delete_image(image_id)
-        self.image_client.wait_for_resource_deletion(image_id)
-
     @classmethod
     def resource_cleanup(cls):
         # Delete the test instance
         cls.servers_client.delete_server(cls.server['id'])
-        cls.servers_client.wait_for_server_termination(cls.server['id'])
+        waiters.wait_for_server_termination(cls.servers_client,
+                                            cls.server['id'])
 
         super(VolumesV2ActionsTest, cls).resource_cleanup()
 
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 29c21ed..9db2321 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -131,7 +131,11 @@
     @test.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
     @test.services('image')
     def test_volume_create_get_update_delete_from_image(self):
-        self._volume_create_get_update_delete(imageRef=CONF.compute.image_ref)
+        image = self.images_client.show_image(CONF.compute.image_ref)['image']
+        min_disk = image.get('minDisk')
+        disk_size = max(min_disk, CONF.volume.volume_size)
+        self._volume_create_get_update_delete(
+            imageRef=CONF.compute.image_ref, size=disk_size)
 
     @test.idempotent_id('3f591b4a-7dc6-444c-bd51-77469506b3a1')
     def test_volume_create_get_update_delete_as_clone(self):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 5203444..48f40f0 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -181,8 +181,8 @@
     def test_attach_volumes_with_nonexistent_volume_id(self):
         srv_name = data_utils.rand_name('Instance')
         server = self.create_server(srv_name)
-        self.addCleanup(self.servers_client.wait_for_server_termination,
-                        server['id'])
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
         self.addCleanup(self.servers_client.delete_server, server['id'])
         waiters.wait_for_server_status(self.servers_client, server['id'],
                                        'ACTIVE')
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 1df1896..058e220 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -49,12 +49,11 @@
         and validates result.
         """
         if with_detail:
-            fetched_snap_list = \
-                self.snapshots_client.\
-                list_snapshots(detail=True, params=params)
+            fetched_snap_list = self.snapshots_client.list_snapshots(
+                detail=True, params=params)['snapshots']
         else:
-            fetched_snap_list = \
-                self.snapshots_client.list_snapshots(params=params)
+            fetched_snap_list = self.snapshots_client.list_snapshots(
+                params=params)['snapshots']
 
         # Validating params of fetched snapshots
         for snap in fetched_snap_list:
@@ -75,7 +74,8 @@
                                        'ACTIVE')
         mountpoint = '/dev/%s' % CONF.compute.volume_device_name
         self.servers_client.attach_volume(
-            server['id'], self.volume_origin['id'], mountpoint)
+            server['id'], volumeId=self.volume_origin['id'],
+            device=mountpoint)
         self.volumes_client.wait_for_volume_status(self.volume_origin['id'],
                                                    'in-use')
         self.addCleanup(self.volumes_client.wait_for_volume_status,
@@ -98,14 +98,15 @@
         snapshot = self.create_snapshot(self.volume_origin['id'], **params)
 
         # Get the snap and check for some of its details
-        snap_get = self.snapshots_client.show_snapshot(snapshot['id'])
+        snap_get = self.snapshots_client.show_snapshot(
+            snapshot['id'])['snapshot']
         self.assertEqual(self.volume_origin['id'],
                          snap_get['volume_id'],
                          "Referred volume origin mismatch")
 
         # Compare also with the output from the list action
         tracking_data = (snapshot['id'], snapshot[self.name_field])
-        snaps_list = self.snapshots_client.list_snapshots()
+        snaps_list = self.snapshots_client.list_snapshots()['snapshots']
         snaps_data = [(f['id'], f[self.name_field]) for f in snaps_list]
         self.assertIn(tracking_data, snaps_data)
 
@@ -114,14 +115,14 @@
         new_desc = 'This is the new description of snapshot.'
         params = {self.name_field: new_s_name,
                   self.descrip_field: new_desc}
-        update_snapshot = \
-            self.snapshots_client.update_snapshot(snapshot['id'], **params)
+        update_snapshot = self.snapshots_client.update_snapshot(
+            snapshot['id'], **params)['snapshot']
         # Assert response body for update_snapshot method
         self.assertEqual(new_s_name, update_snapshot[self.name_field])
         self.assertEqual(new_desc, update_snapshot[self.descrip_field])
         # Assert response body for show_snapshot method
-        updated_snapshot = \
-            self.snapshots_client.show_snapshot(snapshot['id'])
+        updated_snapshot = self.snapshots_client.show_snapshot(
+            snapshot['id'])['snapshot']
         self.assertEqual(new_s_name, updated_snapshot[self.name_field])
         self.assertEqual(new_desc, updated_snapshot[self.descrip_field])
 
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index d1eb694..ddc6822 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -118,7 +118,7 @@
         else:
             remaining = None
 
-        # Mark that we are not comming from a next link
+        # Mark that the current iteration is not from a 'next' link
         next = None
 
         while True:
@@ -149,8 +149,8 @@
                     # We no longer expect it
                     remaining.remove(element_id)
 
-            # If we come from a next link check that absolute url is the same
-            # as the one used for this request
+            # If the current iteration is from a 'next' link, check that the
+            # absolute url is the same as the one used for this request
             if next:
                 self.assertEqual(next, response.response['content-location'])
 
diff --git a/tempest/clients.py b/tempest/clients.py
index 6a2c601..b9f7991 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -42,9 +42,9 @@
 from tempest.services.compute.json.fixed_ips_client import FixedIPsClient
 from tempest.services.compute.json.flavors_client import FlavorsClient
 from tempest.services.compute.json.floating_ip_pools_client import \
-    FloatingIpPoolsClient
+    FloatingIPPoolsClient
 from tempest.services.compute.json.floating_ips_bulk_client import \
-    FloatingIpsBulkClient
+    FloatingIPsBulkClient
 from tempest.services.compute.json.floating_ips_client import \
     FloatingIPsClient
 from tempest.services.compute.json.hosts_client import HostsClient
@@ -65,6 +65,8 @@
 from tempest.services.compute.json.quotas_client import QuotasClient
 from tempest.services.compute.json.security_group_default_rules_client import \
     SecurityGroupDefaultRulesClient
+from tempest.services.compute.json.security_group_rules_client import \
+    SecurityGroupRulesClient
 from tempest.services.compute.json.security_groups_client import \
     SecurityGroupsClient
 from tempest.services.compute.json.server_groups_client import \
@@ -241,8 +243,8 @@
         # with identity v2
         if CONF.identity_feature_enabled.api_v2 and \
                 CONF.identity.auth_version == 'v2':
-            # EC2 and S3 clients, if used, will check onfigured AWS credentials
-            # and generate new ones if needed
+            # EC2 and S3 clients, if used, will check configured AWS
+            # credentials and generate new ones if needed
             self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
             self.s3_client = botoclients.ObjectClientS3(self.identity_client)
 
@@ -280,12 +282,14 @@
         self.flavors_client = FlavorsClient(self.auth_provider, **params)
         self.extensions_client = ExtensionsClient(self.auth_provider,
                                                   **params)
-        self.floating_ip_pools_client = FloatingIpPoolsClient(
+        self.floating_ip_pools_client = FloatingIPPoolsClient(
             self.auth_provider, **params)
-        self.floating_ips_bulk_client = FloatingIpsBulkClient(
+        self.floating_ips_bulk_client = FloatingIPsBulkClient(
             self.auth_provider, **params)
         self.floating_ips_client = FloatingIPsClient(self.auth_provider,
                                                      **params)
+        self.security_group_rules_client = SecurityGroupRulesClient(
+            self.auth_provider, **params)
         self.security_groups_client = SecurityGroupsClient(
             self.auth_provider, **params)
         self.interfaces_client = InterfacesClient(self.auth_provider,
@@ -317,8 +321,7 @@
             'build_timeout': CONF.volume.build_timeout
         })
         self.volumes_extensions_client = VolumesExtensionsClient(
-            self.auth_provider, default_volume_size=CONF.volume.volume_size,
-            **params_volume)
+            self.auth_provider, **params_volume)
 
     def _set_database_clients(self):
         self.database_flavors_client = DatabaseFlavorsClient(
@@ -340,15 +343,25 @@
     def _set_identity_clients(self):
         params = {
             'service': CONF.identity.catalog_type,
-            'region': CONF.identity.region,
-            'endpoint_type': 'adminURL'
+            'region': CONF.identity.region
         }
         params.update(self.default_params_with_timeout_values)
-
+        params_v2_admin = params.copy()
+        params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type
+        # Client uses admin endpoint type of Keystone API v2
         self.identity_client = IdentityClient(self.auth_provider,
-                                              **params)
+                                              **params_v2_admin)
+        params_v2_public = params.copy()
+        params_v2_public['endpoint_type'] = (
+            CONF.identity.v2_public_endpoint_type)
+        # Client uses public endpoint type of Keystone API v2
+        self.identity_public_client = IdentityClient(self.auth_provider,
+                                                     **params_v2_public)
+        params_v3 = params.copy()
+        params_v3['endpoint_type'] = CONF.identity.v3_endpoint_type
+        # Client uses the endpoint type of Keystone API v3
         self.identity_v3_client = IdentityV3Client(self.auth_provider,
-                                                   **params)
+                                                   **params_v3)
         self.endpoints_client = EndPointClient(self.auth_provider,
                                                **params)
         self.service_client = ServiceClient(self.auth_provider, **params)
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 0360146..e05cab3 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -160,7 +160,7 @@
                 raise exceptions.TempestException(
                     "Role: %s - doesn't exist" % r
                 )
-    existing = [x['name'] for x in identity_admin.list_tenants()]
+    existing = [x['name'] for x in identity_admin.list_tenants()['tenants']]
     for tenant in resources['tenants']:
         if tenant not in existing:
             identity_admin.create_tenant(tenant)
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 1de20d6..03ae51f 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python
 
-# Copyright 2014 Dell Inc.
+# Copyright 2015 Dell Inc.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
 #
-#         http://www.apache.org/licenses/LICENSE-2.0
+#        http://www.apache.org/licenses/LICENSE-2.0
 #
 #    Unless required by applicable law or agreed to in writing, software
 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -76,12 +76,12 @@
                   CONF.identity.alt_username]
 
     if IS_NEUTRON:
-        CONF_PRIV_NETWORK = _get_priv_net_id(CONF.compute.fixed_network_name,
-                                             CONF.identity.tenant_name)
+        CONF_PRIV_NETWORK = _get_network_id(CONF.compute.fixed_network_name,
+                                            CONF.identity.tenant_name)
         CONF_NETWORKS = [CONF_PUB_NETWORK, CONF_PRIV_NETWORK]
 
 
-def _get_priv_net_id(prv_net_name, tenant_name):
+def _get_network_id(net_name, tenant_name):
     am = clients.AdminManager()
     net_cl = am.network_client
     id_cl = am.identity_client
@@ -91,7 +91,7 @@
     t_id = tenant['id']
     n_id = None
     for net in networks['networks']:
-        if (net['tenant_id'] == t_id and net['name'] == prv_net_name):
+        if (net['tenant_id'] == t_id and net['name'] == net_name):
             n_id = net['id']
             break
     return n_id
@@ -103,6 +103,10 @@
         for key, value in kwargs.items():
             setattr(self, key, value)
 
+        self.tenant_filter = {}
+        if hasattr(self, 'tenant_id'):
+            self.tenant_filter['tenant_id'] = self.tenant_id
+
     def _filter_by_tenant_id(self, item_list):
         if (item_list is None
                 or len(item_list) == 0
@@ -153,9 +157,8 @@
         for snap in snaps:
             try:
                 client.delete_snapshot(snap['id'])
-            except Exception as e:
-                LOG.exception("Delete Snapshot exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Snapshot exception.")
 
     def dry_run(self):
         snaps = self.list()
@@ -180,9 +183,8 @@
         for server in servers:
             try:
                 client.delete_server(server['id'])
-            except Exception as e:
-                LOG.exception("Delete Server exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Server exception.")
 
     def dry_run(self):
         servers = self.list()
@@ -193,7 +195,7 @@
 
     def list(self):
         client = self.client
-        sgs = client.list_server_groups()
+        sgs = client.list_server_groups()['server_groups']
         LOG.debug("List count, %s Server Groups" % len(sgs))
         return sgs
 
@@ -203,9 +205,8 @@
         for sg in sgs:
             try:
                 client.delete_server_group(sg['id'])
-            except Exception as e:
-                LOG.exception("Delete Server Group exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Server Group exception.")
 
     def dry_run(self):
         sgs = self.list()
@@ -219,7 +220,7 @@
 
     def list(self):
         client = self.client
-        stacks = client.list_stacks()
+        stacks = client.list_stacks()['stacks']
         LOG.debug("List count, %s Stacks" % len(stacks))
         return stacks
 
@@ -229,9 +230,8 @@
         for stack in stacks:
             try:
                 client.delete_stack(stack['id'])
-            except Exception as e:
-                LOG.exception("Delete Stack exception: %s " % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Stack exception.")
 
     def dry_run(self):
         stacks = self.list()
@@ -245,7 +245,7 @@
 
     def list(self):
         client = self.client
-        keypairs = client.list_keypairs()
+        keypairs = client.list_keypairs()['keypairs']
         LOG.debug("List count, %s Keypairs" % len(keypairs))
         return keypairs
 
@@ -256,9 +256,8 @@
             try:
                 name = k['keypair']['name']
                 client.delete_keypair(name)
-            except Exception as e:
-                LOG.exception("Delete Keypairs exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Keypairs exception.")
 
     def dry_run(self):
         keypairs = self.list()
@@ -272,7 +271,7 @@
 
     def list(self):
         client = self.client
-        secgrps = client.list_security_groups()
+        secgrps = client.list_security_groups()['security_groups']
         secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
         LOG.debug("List count, %s Security Groups" % len(secgrp_del))
         return secgrp_del
@@ -283,8 +282,8 @@
         for g in secgrp_del:
             try:
                 client.delete_security_group(g['id'])
-            except Exception as e:
-                LOG.exception("Delete Security Groups exception: %s" % e)
+            except Exception:
+                LOG.exception("Delete Security Groups exception.")
 
     def dry_run(self):
         secgrp_del = self.list()
@@ -298,7 +297,7 @@
 
     def list(self):
         client = self.client
-        floating_ips = client.list_floating_ips()
+        floating_ips = client.list_floating_ips()['floating_ips']
         LOG.debug("List count, %s Floating IPs" % len(floating_ips))
         return floating_ips
 
@@ -308,9 +307,8 @@
         for f in floating_ips:
             try:
                 client.delete_floating_ip(f['id'])
-            except Exception as e:
-                LOG.exception("Delete Floating IPs exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Floating IPs exception.")
 
     def dry_run(self):
         floating_ips = self.list()
@@ -334,9 +332,8 @@
         for v in vols:
             try:
                 client.delete_volume(v['id'])
-            except Exception as e:
-                LOG.exception("Delete Volume exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Volume exception.")
 
     def dry_run(self):
         vols = self.list()
@@ -352,9 +349,8 @@
         client = self.client
         try:
             client.delete_quota_set(self.tenant_id)
-        except Exception as e:
-            LOG.exception("Delete Volume Quotas exception: %s" % e)
-            pass
+        except Exception:
+            LOG.exception("Delete Volume Quotas exception.")
 
     def dry_run(self):
         quotas = self.client.show_quota_usage(self.tenant_id)
@@ -371,9 +367,8 @@
         client = self.client
         try:
             client.delete_quota_set(self.tenant_id)
-        except Exception as e:
-            LOG.exception("Delete Quotas exception: %s" % e)
-            pass
+        except Exception:
+            LOG.exception("Delete Quotas exception.")
 
     def dry_run(self):
         client = self.limits_client
@@ -396,8 +391,8 @@
 
     def list(self):
         client = self.client
-        networks = client.list_networks()
-        networks = self._filter_by_tenant_id(networks['networks'])
+        networks = client.list_networks(**self.tenant_filter)
+        networks = networks['networks']
         # filter out networks declared in tempest.conf
         if self.is_preserve:
             networks = [network for network in networks
@@ -411,9 +406,8 @@
         for n in networks:
             try:
                 client.delete_network(n['id'])
-            except Exception as e:
-                LOG.exception("Delete Network exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Network exception.")
 
     def dry_run(self):
         networks = self.list()
@@ -424,9 +418,8 @@
 
     def list(self):
         client = self.client
-        flips = client.list_floatingips()
+        flips = client.list_floatingips(**self.tenant_filter)
         flips = flips['floatingips']
-        flips = self._filter_by_tenant_id(flips)
         LOG.debug("List count, %s Network Floating IPs" % len(flips))
         return flips
 
@@ -436,9 +429,8 @@
         for flip in flips:
             try:
                 client.delete_floatingip(flip['id'])
-            except Exception as e:
-                LOG.exception("Delete Network Floating IP exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Network Floating IP exception.")
 
     def dry_run(self):
         flips = self.list()
@@ -449,9 +441,8 @@
 
     def list(self):
         client = self.client
-        routers = client.list_routers()
+        routers = client.list_routers(**self.tenant_filter)
         routers = routers['routers']
-        routers = self._filter_by_tenant_id(routers)
         if self.is_preserve:
             routers = [router for router in routers
                        if router['id'] != CONF_PUB_ROUTER]
@@ -465,15 +456,15 @@
         for router in routers:
             try:
                 rid = router['id']
-                ports = client.list_router_interfaces(rid)
-                ports = ports['ports']
+                ports = [port for port
+                         in client.list_router_interfaces(rid)['ports']
+                         if port["device_owner"] == "network:router_interface"]
                 for port in ports:
-                    subid = port['fixed_ips'][0]['subnet_id']
-                    client.remove_router_interface_with_subnet_id(rid, subid)
+                    client.remove_router_interface_with_port_id(rid,
+                                                                port['id'])
                 client.delete_router(rid)
-            except Exception as e:
-                LOG.exception("Delete Router exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Router exception.")
 
     def dry_run(self):
         routers = self.list()
@@ -496,9 +487,8 @@
         for hm in hms:
             try:
                 client.delete_health_monitor(hm['id'])
-            except Exception as e:
-                LOG.exception("Delete Health Monitor exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Health Monitor exception.")
 
     def dry_run(self):
         hms = self.list()
@@ -521,9 +511,8 @@
         for member in members:
             try:
                 client.delete_member(member['id'])
-            except Exception as e:
-                LOG.exception("Delete Member exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Member exception.")
 
     def dry_run(self):
         members = self.list()
@@ -546,9 +535,8 @@
         for vip in vips:
             try:
                 client.delete_vip(vip['id'])
-            except Exception as e:
-                LOG.exception("Delete VIP exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete VIP exception.")
 
     def dry_run(self):
         vips = self.list()
@@ -571,9 +559,8 @@
         for pool in pools:
             try:
                 client.delete_pool(pool['id'])
-            except Exception as e:
-                LOG.exception("Delete Pool exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Pool exception.")
 
     def dry_run(self):
         pools = self.list()
@@ -596,9 +583,8 @@
         for rule in rules:
             try:
                 client.delete_metering_label_rule(rule['id'])
-            except Exception as e:
-                LOG.exception("Delete Metering Label Rule exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Metering Label Rule exception.")
 
     def dry_run(self):
         rules = self.list()
@@ -621,9 +607,8 @@
         for label in labels:
             try:
                 client.delete_metering_label(label['id'])
-            except Exception as e:
-                LOG.exception("Delete Metering Label exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Metering Label exception.")
 
     def dry_run(self):
         labels = self.list()
@@ -634,11 +619,14 @@
 
     def list(self):
         client = self.client
-        ports = client.list_ports()
-        ports = ports['ports']
-        ports = self._filter_by_tenant_id(ports)
+        ports = [port for port in
+                 client.list_ports(**self.tenant_filter)['ports']
+                 if port["device_owner"] == "" or
+                 port["device_owner"].startswith("compute:")]
+
         if self.is_preserve:
             ports = self._filter_by_conf_networks(ports)
+
         LOG.debug("List count, %s Ports" % len(ports))
         return ports
 
@@ -648,22 +636,48 @@
         for port in ports:
             try:
                 client.delete_port(port['id'])
-            except Exception as e:
-                LOG.exception("Delete Port exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Port exception.")
 
     def dry_run(self):
         ports = self.list()
         self.data['ports'] = ports
 
 
+class NetworkSecGroupService(NetworkService):
+    def list(self):
+        client = self.client
+        filter = self.tenant_filter
+        # cannot delete default sec group so never show it.
+        secgroups = [secgroup for secgroup in
+                     client.list_security_groups(**filter)['security_groups']
+                     if secgroup['name'] != 'default']
+
+        if self.is_preserve:
+            secgroups = self._filter_by_conf_networks(secgroups)
+        LOG.debug("List count, %s securtiy_groups" % len(secgroups))
+        return secgroups
+
+    def delete(self):
+        client = self.client
+        secgroups = self.list()
+        for secgroup in secgroups:
+            try:
+                client.delete_secgroup(secgroup['id'])
+            except Exception:
+                LOG.exception("Delete security_group exception.")
+
+    def dry_run(self):
+        secgroups = self.list()
+        self.data['secgroups'] = secgroups
+
+
 class NetworkSubnetService(NetworkService):
 
     def list(self):
         client = self.client
-        subnets = client.list_subnets()
+        subnets = client.list_subnets(**self.tenant_filter)
         subnets = subnets['subnets']
-        subnets = self._filter_by_tenant_id(subnets)
         if self.is_preserve:
             subnets = self._filter_by_conf_networks(subnets)
         LOG.debug("List count, %s Subnets" % len(subnets))
@@ -675,9 +689,8 @@
         for subnet in subnets:
             try:
                 client.delete_subnet(subnet['id'])
-            except Exception as e:
-                LOG.exception("Delete Subnet exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Subnet exception.")
 
     def dry_run(self):
         subnets = self.list()
@@ -702,9 +715,8 @@
         for alarm in alarms:
             try:
                 client.delete_alarm(alarm['id'])
-            except Exception as e:
-                LOG.exception("Delete Alarms exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Alarms exception.")
 
     def dry_run(self):
         alarms = self.list()
@@ -719,7 +731,7 @@
 
     def list(self):
         client = self.client
-        flavors = client.list_flavors({"is_public": None})
+        flavors = client.list_flavors({"is_public": None})['flavors']
         if not self.is_save_state:
             # recreate list removing saved flavors
             flavors = [flavor for flavor in flavors if flavor['id']
@@ -737,9 +749,8 @@
         for flavor in flavors:
             try:
                 client.delete_flavor(flavor['id'])
-            except Exception as e:
-                LOG.exception("Delete Flavor exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Flavor exception.")
 
     def dry_run(self):
         flavors = self.list()
@@ -759,7 +770,7 @@
 
     def list(self):
         client = self.client
-        images = client.list_images({"all_tenants": True})
+        images = client.list_images({"all_tenants": True})['images']
         if not self.is_save_state:
             images = [image for image in images if image['id']
                       not in self.saved_state_json['images'].keys()]
@@ -775,17 +786,16 @@
         for image in images:
             try:
                 client.delete_image(image['id'])
-            except Exception as e:
-                LOG.exception("Delete Image exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Image exception.")
 
     def dry_run(self):
         images = self.list()
         self.data['images'] = images
 
     def save_state(self):
-        images = self.list()
         self.data['images'] = {}
+        images = self.list()
         for image in images:
             self.data['images'][image['id']] = image['name']
 
@@ -823,9 +833,8 @@
         for user in users:
             try:
                 client.delete_user(user['id'])
-            except Exception as e:
-                LOG.exception("Delete User exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete User exception.")
 
     def dry_run(self):
         users = self.list()
@@ -852,8 +861,8 @@
                           and role['name'] != CONF.identity.admin_role)]
                 LOG.debug("List count, %s Roles after reconcile" % len(roles))
             return roles
-        except Exception as ex:
-            LOG.exception("Cannot retrieve Roles, exception: %s" % ex)
+        except Exception:
+            LOG.exception("Cannot retrieve Roles.")
             return []
 
     def delete(self):
@@ -862,9 +871,8 @@
         for role in roles:
             try:
                 client.delete_role(role['id'])
-            except Exception as e:
-                LOG.exception("Delete Role exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Role exception.")
 
     def dry_run(self):
         roles = self.list()
@@ -881,7 +889,7 @@
 
     def list(self):
         client = self.client
-        tenants = client.list_tenants()
+        tenants = client.list_tenants()['tenants']
         if not self.is_save_state:
             tenants = [tenant for tenant in tenants if (tenant['id']
                        not in self.saved_state_json['tenants'].keys()
@@ -900,9 +908,8 @@
         for tenant in tenants:
             try:
                 client.delete_tenant(tenant['id'])
-            except Exception as e:
-                LOG.exception("Delete Tenant exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Tenant exception.")
 
     def dry_run(self):
         tenants = self.list()
@@ -938,9 +945,8 @@
             try:
                 client.update_domain(domain['id'], enabled=False)
                 client.delete_domain(domain['id'])
-            except Exception as e:
-                LOG.exception("Delete Domain exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Domain exception.")
 
     def dry_run(self):
         domains = self.list()
@@ -955,7 +961,6 @@
 
 def get_tenant_cleanup_services():
     tenant_services = []
-
     if IS_CEILOMETER:
         tenant_services.append(TelemetryAlarmService)
     if IS_NOVA:
@@ -969,14 +974,15 @@
     if IS_HEAT:
         tenant_services.append(StackService)
     if IS_NEUTRON:
+        tenant_services.append(NetworkFloatingIpService)
         if test.is_extension_enabled('metering', 'network'):
             tenant_services.append(NetworkMeteringLabelRuleService)
             tenant_services.append(NetworkMeteringLabelService)
         tenant_services.append(NetworkRouterService)
-        tenant_services.append(NetworkFloatingIpService)
         tenant_services.append(NetworkPortService)
         tenant_services.append(NetworkSubnetService)
         tenant_services.append(NetworkService)
+        tenant_services.append(NetworkSecGroupService)
     if IS_CINDER:
         tenant_services.append(SnapshotService)
         tenant_services.append(VolumeService)
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
new file mode 100644
index 0000000..289b978
--- /dev/null
+++ b/tempest/cmd/init.py
@@ -0,0 +1,139 @@
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import subprocess
+import sys
+
+from cliff import command
+from oslo_log import log as logging
+from six import moves
+
+LOG = logging.getLogger(__name__)
+
+TESTR_CONF = """[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \\
+    OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \\
+    OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-500} \\
+    ${PYTHON:-python} -m subunit.run discover -t %s %s $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
+group_regex=([^\.]*\.)*
+"""
+
+
+def get_tempest_default_config_dir():
+    """Returns the correct default config dir to support both cases of
+    tempest being or not installed in a virtualenv.
+    Cases considered:
+    - no virtual env, python2: real_prefix and base_prefix not set
+    - no virtual env, python3: real_prefix not set, base_prefix set and
+      identical to prefix
+    - virtualenv, python2: real_prefix and prefix are set and different
+    - virtualenv, python3: real_prefix not set, base_prefix and prefix are
+      set and identical
+    - pyvenv, any python version: real_prefix not set, base_prefix and prefix
+      are set and different
+
+    :return: default config dir
+    """
+    real_prefix = getattr(sys, 'real_prefix', None)
+    base_prefix = getattr(sys, 'base_prefix', None)
+    prefix = sys.prefix
+    if real_prefix is None and base_prefix is None:
+        # Not running in a virtual environnment of any kind
+        return '/etc/tempest'
+    elif (real_prefix is None and base_prefix is not None and
+            base_prefix == prefix):
+        # Probably not running in a virtual environment
+        # NOTE(andreaf) we cannot distinguish this case from the case of
+        # a virtual environment created with virtualenv, and running python3.
+        return '/etc/tempest'
+    else:
+        return os.path.join(sys.prefix, 'etc/tempest')
+
+
+class TempestInit(command.Command):
+    """Setup a local working environment for running tempest"""
+
+    def get_parser(self, prog_name):
+        parser = super(TempestInit, self).get_parser(prog_name)
+        parser.add_argument('dir', nargs='?', default=os.getcwd())
+        parser.add_argument('--config-dir', '-c', default=None)
+        return parser
+
+    def generate_testr_conf(self, local_path):
+        testr_conf_path = os.path.join(local_path, '.testr.conf')
+        top_level_path = os.path.dirname(os.path.dirname(__file__))
+        discover_path = os.path.join(top_level_path, 'test_discover')
+        testr_conf = TESTR_CONF % (top_level_path, discover_path)
+        with open(testr_conf_path, 'w+') as testr_conf_file:
+            testr_conf_file.write(testr_conf)
+
+    def update_local_conf(self, conf_path, lock_dir, log_dir):
+        config_parse = moves.configparser.SafeConfigParser()
+        config_parse.optionxform = str
+        with open(conf_path, 'w+') as conf_file:
+            config_parse.readfp(conf_file)
+            # Set local lock_dir in tempest conf
+            if not config_parse.has_section('oslo_concurrency'):
+                config_parse.add_section('oslo_concurrency')
+            config_parse.set('oslo_concurrency', 'lock_path', lock_dir)
+            # Set local log_dir in tempest conf
+            config_parse.set('DEFAULT', 'log_dir', log_dir)
+            # Set default log filename to tempest.log
+            config_parse.set('DEFAULT', 'log_file', 'tempest.log')
+
+    def copy_config(self, etc_dir, config_dir):
+        shutil.copytree(config_dir, etc_dir)
+
+    def generate_sample_config(self, local_dir):
+        subprocess.call(['oslo-config-generator', '--config-file',
+                         'tools/config/config-generator.tempest.conf'],
+                        cwd=local_dir)
+
+    def create_working_dir(self, local_dir, config_dir):
+        # Create local dir if missing
+        if not os.path.isdir(local_dir):
+            LOG.debug('Creating local working dir: %s' % local_dir)
+            os.mkdir(local_dir)
+        lock_dir = os.path.join(local_dir, 'tempest_lock')
+        etc_dir = os.path.join(local_dir, 'etc')
+        config_path = os.path.join(etc_dir, 'tempest.conf')
+        log_dir = os.path.join(local_dir, 'logs')
+        testr_dir = os.path.join(local_dir, '.testrepository')
+        # Create lock dir
+        if not os.path.isdir(lock_dir):
+            LOG.debug('Creating lock dir: %s' % lock_dir)
+            os.mkdir(lock_dir)
+        # Create log dir
+        if not os.path.isdir(log_dir):
+            LOG.debug('Creating log dir: %s' % log_dir)
+            os.mkdir(log_dir)
+        # Create and copy local etc dir
+        self.copy_config(etc_dir, config_dir)
+        # Generate the sample config file
+        self.generate_sample_config(local_dir)
+        # Update local confs to reflect local paths
+        self.update_local_conf(config_path, lock_dir, log_dir)
+        # Generate a testr conf file
+        self.generate_testr_conf(local_dir)
+        # setup local testr working dir
+        if not os.path.isdir(testr_dir):
+            subprocess.call(['testr', 'init'], cwd=local_dir)
+
+    def take_action(self, parsed_args):
+        config_dir = parsed_args.config_dir or get_tempest_default_config_dir()
+        self.create_working_dir(parsed_args.dir, config_dir)
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index f091cd3..7f896d1 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -119,9 +119,11 @@
 from tempest_lib import exceptions as lib_exc
 import yaml
 
+from tempest.common import waiters
 from tempest import config
 from tempest.services.compute.json import flavors_client
 from tempest.services.compute.json import floating_ips_client
+from tempest.services.compute.json import security_group_rules_client
 from tempest.services.compute.json import security_groups_client
 from tempest.services.compute.json import servers_client
 from tempest.services.identity.v2.json import identity_client
@@ -202,6 +204,8 @@
             _auth, **compute_params)
         self.secgroups = security_groups_client.SecurityGroupsClient(
             _auth, **compute_params)
+        self.secrules = security_group_rules_client.SecurityGroupRulesClient(
+            _auth, **compute_params)
         self.objects = object_client.ObjectClient(_auth,
                                                   **object_storage_params)
         self.containers = container_client.ContainerClient(
@@ -270,7 +274,7 @@
     Don't create the tenants if they already exist.
     """
     admin = keystone_admin()
-    body = admin.identity.list_tenants()
+    body = admin.identity.list_tenants()['tenants']
     existing = [x['name'] for x in body]
     for tenant in tenants:
         if tenant not in existing:
@@ -500,7 +504,7 @@
     def check_telemetry(self):
         """Check that ceilometer provides a sane sample.
 
-        Confirm that there are more than one sample and that they have the
+        Confirm that there is more than one sample and that they have the
         expected metadata.
 
         If in check mode confirm that the oldest sample available is from
@@ -677,7 +681,7 @@
 
         response = _get_image_by_name(client, image['name'])
         if not response:
-            LOG.info("Image '%s' does not exists" % image['name'])
+            LOG.info("Image '%s' does not exist" % image['name'])
             continue
         client.images.delete_image(response['id'])
 
@@ -726,7 +730,7 @@
         # only create a network if the name isn't here
         body = client.networks.list_networks()
         if any(item['name'] == network['name'] for item in body['networks']):
-            LOG.warning("Dupplicated network name: %s" % network['name'])
+            LOG.warning("Duplicated network name: %s" % network['name'])
             continue
 
         client.networks.create_network(name=network['name'])
@@ -778,7 +782,7 @@
         # only create a router if the name isn't here
         body = client.networks.list_routers()
         if any(item['name'] == router['name'] for item in body['routers']):
-            LOG.warning("Dupplicated router name: %s" % router['name'])
+            LOG.warning("Duplicated router name: %s" % router['name'])
             continue
 
         client.networks.create_router(router['name'])
@@ -810,7 +814,7 @@
             # connect routers to their subnets
             client.networks.add_router_interface_with_subnet_id(router_id,
                                                                 subnet_id)
-        # connect routers to exteral network if set to "gateway"
+        # connect routers to external network if set to "gateway"
         if router['gateway']:
             if CONF.network.public_network_id:
                 ext_net = CONF.network.public_network_id
@@ -836,7 +840,7 @@
 
 
 def _get_flavor_by_name(client, name):
-    body = client.flavors.list_flavors()
+    body = client.flavors.list_flavors()['flavors']
     for flavor in body:
         if name == flavor['name']:
             return flavor
@@ -868,13 +872,13 @@
             server['name'], image_id, flavor_id, **kwargs)
         server_id = body['id']
         client.servers.wait_for_server_status(server_id, 'ACTIVE')
-        # create to security group(s) after server spawning
+        # create security group(s) after server spawning
         for secgroup in server['secgroups']:
             client.servers.add_security_group(server_id, secgroup)
         if CONF.compute.use_floatingip_for_ssh:
             floating_ip_pool = server.get('floating_ip_pool')
             floating_ip = client.floating_ips.create_floating_ip(
-                pool_name=floating_ip_pool)
+                pool_name=floating_ip_pool)['floating_ip']
             client.floating_ips.associate_floating_ip_to_server(
                 floating_ip['ip'], server_id)
 
@@ -893,8 +897,8 @@
 
         # TODO(EmilienM): disassociate floating IP from server and release it.
         client.servers.delete_server(response['id'])
-        client.servers.wait_for_server_termination(response['id'],
-                                                   ignore_error=True)
+        waiters.wait_for_server_termination(client.servers, response['id'],
+                                            ignore_error=True)
 
 
 def create_secgroups(secgroups):
@@ -905,20 +909,22 @@
         # only create a security group if the name isn't here
         # i.e. a security group may be used by another server
         # only create a router if the name isn't here
-        body = client.secgroups.list_security_groups()
+        body = client.secgroups.list_security_groups()['security_groups']
         if any(item['name'] == secgroup['name'] for item in body):
             LOG.warning("Security group '%s' already exists" %
                         secgroup['name'])
             continue
 
         body = client.secgroups.create_security_group(
-            secgroup['name'], secgroup['description'])
+            name=secgroup['name'],
+            description=secgroup['description'])['security_group']
         secgroup_id = body['id']
         # for each security group, create the rules
         for rule in secgroup['rules']:
             ip_proto, from_port, to_port, cidr = rule.split()
-            client.secgroups.create_security_group_rule(
-                secgroup_id, ip_proto, from_port, to_port, cidr=cidr)
+            client.secrules.create_security_group_rule(
+                parent_group_id=secgroup_id, ip_protocol=ip_proto,
+                from_port=from_port, to_port=to_port, cidr=cidr)
 
 
 def destroy_secgroups(secgroups):
@@ -991,7 +997,7 @@
 def create_resources():
     LOG.info("Creating Resources")
     # first create keystone level resources, and we need to be admin
-    # for those.
+    # for this.
     create_tenants(RES['tenants'])
     create_users(RES['users'])
     collect_users(RES['users'])
@@ -1011,7 +1017,7 @@
     create_volumes(RES['volumes'])
 
     # Only attempt attaching the volumes if servers are defined in the
-    # resourcefile
+    # resource file
     if 'servers' in RES:
         create_servers(RES['servers'])
         attach_volumes(RES['volumes'])
diff --git a/tempest/common/accounts.py b/tempest/common/accounts.py
index 78e0e72..27b44f6 100644
--- a/tempest/common/accounts.py
+++ b/tempest/common/accounts.py
@@ -216,7 +216,7 @@
             if ('user_domain_name' in init_attributes and 'user_domain_name'
                     not in hash_attributes):
                 # Allow for the case of domain_name populated from config
-                domain_name = CONF.identity.admin_domain_name
+                domain_name = CONF.auth.default_credentials_domain_name
                 hash_attributes['user_domain_name'] = domain_name
             if all([getattr(creds, k) == hash_attributes[k] for
                    k in init_attributes]):
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 06e3493..05ea393 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -26,7 +26,7 @@
 LOG = logging.getLogger(__name__)
 
 
-def create_test_server(clients, validatable, validation_resources=None,
+def create_test_server(clients, validatable=False, validation_resources=None,
                        tenant_network=None, **kwargs):
     """Common wrapper utility returning a test server.
 
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
index 2b7e0db..783a5fc 100644
--- a/tempest/common/cred_provider.py
+++ b/tempest/common/cred_provider.py
@@ -84,9 +84,9 @@
         domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
                             if 'domain' in x)
         if not domain_fields.intersection(kwargs.keys()):
-            # TODO(andreaf) It might be better here to use a dedicated config
-            # option such as CONF.auth.tenant_isolation_domain_name
-            params['user_domain_name'] = CONF.identity.admin_domain_name
+            domain_name = CONF.auth.default_credentials_domain_name
+            params['user_domain_name'] = domain_name
+
         auth_url = CONF.identity.uri_v3
     else:
         auth_url = CONF.identity.uri
diff --git a/tempest/common/fixed_network.py b/tempest/common/fixed_network.py
index 9ec0ec6..b81830a 100644
--- a/tempest/common/fixed_network.py
+++ b/tempest/common/fixed_network.py
@@ -40,7 +40,7 @@
     if not name:
         raise exceptions.InvalidConfiguration()
 
-    networks = compute_networks_client.list_networks()
+    networks = compute_networks_client.list_networks()['networks']
     networks = [n for n in networks if n['label'] == name]
 
     # Check that a network exists, else raise an InvalidConfigurationException
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index 4be3da1..868a3e9 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -330,7 +330,7 @@
             try:
                 self.context.load_verify_locations(self.ca_certs)
             except Exception as e:
-                msg = 'Unable to load CA from "%s"' % (self.ca_certs, e)
+                msg = 'Unable to load CA from "%s" %s' % (self.ca_certs, e)
                 raise exc.SSLConfigurationError(msg)
         else:
             self.context.set_default_verify_paths()
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index ff4eda9..6dca3a3 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -45,6 +45,8 @@
     def create_user(self, username, password, project, email):
         user = self.identity_client.create_user(
             username, password, project['id'], email)
+        if 'user' in user:
+            user = user['user']
         return user
 
     @abc.abstractmethod
@@ -113,7 +115,7 @@
             # Domain names must be unique, in any case a list is returned,
             # selecting the first (and only) element
             self.creds_domain = self.identity_client.list_domains(
-                params={'name': domain_name})[0]
+                params={'name': domain_name})['domains'][0]
         except lib_exc.NotFound:
             # TODO(andrea) we could probably create the domain on the fly
             msg = "Configured domain %s could not be found" % domain_name
@@ -122,7 +124,7 @@
     def create_project(self, name, description):
         project = self.identity_client.create_project(
             name=name, description=description,
-            domain_id=self.creds_domain['id'])
+            domain_id=self.creds_domain['id'])['project']
         return project
 
     def get_credentials(self, user, project, password):
@@ -136,6 +138,10 @@
     def delete_project(self, project_id):
         self.identity_client.delete_project(project_id)
 
+    def _list_roles(self):
+        roles = self.identity_client.list_roles()['roles']
+        return roles
+
 
 def get_creds_client(identity_client, project_domain_name=None):
     if isinstance(identity_client, v2_identity.IdentityClient):
@@ -163,8 +169,8 @@
         self.creds_domain_name = None
         if self.identity_version == 'v3':
             self.creds_domain_name = (
-                CONF.auth.tenant_isolation_domain_name or
-                self.default_admin_creds.project_domain_name)
+                self.default_admin_creds.project_domain_name or
+                CONF.auth.default_credentials_domain_name)
         self.creds_client = get_creds_client(
             self.identity_admin_client, self.creds_domain_name)
 
@@ -206,6 +212,8 @@
         email = data_utils.rand_name(root) + suffix + "@example.com"
         user = self.creds_client.create_user(
             username, user_password, project, email)
+        if 'user' in user:
+            user = user['user']
         role_assigned = False
         if admin:
             self.creds_client.assign_user_role(user, project,
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 93c2c10..a567c6a 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -106,7 +106,8 @@
 
     def get_nic_name(self, address):
         cmd = "ip -o addr | awk '/%s/ {print $2}'" % address
-        return self.exec_command(cmd)
+        nic = self.exec_command(cmd)
+        return nic.strip().strip(":").lower()
 
     def get_ip_list(self):
         cmd = "ip address"
@@ -144,7 +145,6 @@
         """Renews DHCP lease via udhcpc client. """
         file_path = '/var/run/udhcpc.'
         nic_name = self.get_nic_name(fixed_ip)
-        nic_name = nic_name.strip().lower()
         pid = self.exec_command('cat {path}{nic}.pid'.
                                 format(path=file_path, nic=nic_name))
         pid = pid.strip()
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
index 18f0b1d..debc200 100644
--- a/tempest/common/validation_resources.py
+++ b/tempest/common/validation_resources.py
@@ -23,16 +23,19 @@
 
 
 def create_ssh_security_group(os, add_rule=False):
-    security_group_client = os.security_groups_client
+    security_groups_client = os.security_groups_client
+    security_group_rules_client = os.security_group_rules_client
     sg_name = data_utils.rand_name('securitygroup-')
     sg_description = data_utils.rand_name('description-')
-    security_group = \
-        security_group_client.create_security_group(sg_name, sg_description)
+    security_group = security_groups_client.create_security_group(
+        name=sg_name, description=sg_description)['security_group']
     if add_rule:
-        security_group_client.create_security_group_rule(security_group['id'],
-                                                         'tcp', 22, 22)
-        security_group_client.create_security_group_rule(security_group['id'],
-                                                         'icmp', -1, -1)
+        security_group_rules_client.create_security_group_rule(
+            parent_group_id=security_group['id'], ip_protocol='tcp',
+            from_port=22, to_port=22)
+        security_group_rules_client.create_security_group_rule(
+            parent_group_id=security_group['id'], ip_protocol='icmp',
+            from_port=-1, to_port=-1)
     LOG.debug("SSH Validation resource security group with tcp and icmp "
               "rules %s created"
               % sg_name)
@@ -45,8 +48,8 @@
     if validation_resources:
         if validation_resources['keypair']:
             keypair_name = data_utils.rand_name('keypair')
-            validation_data['keypair'] = \
-                os.keypairs_client.create_keypair(keypair_name)
+            validation_data.update(os.keypairs_client.create_keypair(
+                name=keypair_name))
             LOG.debug("Validation resource key %s created" % keypair_name)
         add_rule = False
         if validation_resources['security_group']:
@@ -56,8 +59,7 @@
                 create_ssh_security_group(os, add_rule)
         if validation_resources['floating_ip']:
             floating_client = os.floating_ips_client
-            validation_data['floating_ip'] = \
-                floating_client.create_floating_ip()
+            validation_data.update(floating_client.create_floating_ip())
     return validation_data
 
 
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 85a03cf..803ad6c 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -15,6 +15,7 @@
 
 from oslo_log import log as logging
 from tempest_lib.common.utils import misc as misc_utils
+from tempest_lib import exceptions as lib_exc
 
 from tempest import config
 from tempest import exceptions
@@ -50,7 +51,7 @@
                     return
                 # NOTE(afazekas): The instance is in "ready for action state"
                 # when no task in progress
-                # NOTE(afazekas): Converted to string bacuse of the XML
+                # NOTE(afazekas): Converted to string because of the XML
                 # responses
                 if str(task_state) == "None":
                     # without state api extension 3 sec usually enough
@@ -96,6 +97,25 @@
         old_task_state = task_state
 
 
+def wait_for_server_termination(client, server_id, ignore_error=False):
+    """Waits for server to reach termination."""
+    start_time = int(time.time())
+    while True:
+        try:
+            body = client.show_server(server_id)
+        except lib_exc.NotFound:
+            return
+
+        server_status = body['status']
+        if server_status == 'ERROR' and not ignore_error:
+            raise exceptions.BuildErrorException(server_id=server_id)
+
+        if int(time.time()) - start_time >= client.build_timeout:
+            raise exceptions.TimeoutException
+
+        time.sleep(client.build_interval)
+
+
 def wait_for_image_status(client, image_id, status):
     """Waits for an image to reach a given status.
 
@@ -103,11 +123,19 @@
     The client should also have build_interval and build_timeout attributes.
     """
     image = client.show_image(image_id)
+    # Compute image client return response wrapped in 'image' element
+    # which is not case with glance image client.
+    if 'image' in image:
+        image = image['image']
     start = int(time.time())
 
     while image['status'] != status:
         time.sleep(client.build_interval)
         image = client.show_image(image_id)
+        # Compute image client return response wrapped in 'image' element
+        # which is not case with glance image client.
+        if 'image' in image:
+            image = image['image']
         status_curr = image['status']
         if status_curr == 'ERROR':
             raise exceptions.AddImageException(image_id=image_id)
@@ -144,6 +172,8 @@
         volume_status = body['status']
         if volume_status == 'error':
             raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
+        if volume_status == 'error_restoring':
+            raise exceptions.VolumeRestoreErrorException(volume_id=volume_id)
 
         if int(time.time()) - start >= client.build_timeout:
             message = ('Volume %s failed to reach %s status (current %s) '
diff --git a/tempest/config.py b/tempest/config.py
index 7382088..e495695 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -22,6 +22,8 @@
 
 from oslo_log import log as logging
 
+from tempest.test_discover import plugins
+
 
 # TODO(marun) Replace use of oslo_config's global ConfigOpts
 # (cfg.CONF) instance with a local instance (cfg.ConfigOpts()) once
@@ -65,12 +67,13 @@
     cfg.ListOpt('tempest_roles',
                 help="Roles to assign to all users created by tempest",
                 default=[]),
-    cfg.StrOpt('tenant_isolation_domain_name',
-               default=None,
-               help="Only applicable when identity.auth_version is v3."
-                    "Domain within which isolated credentials are provisioned."
-                    "The default \"None\" means that the domain from the"
-                    "admin user is used instead."),
+    cfg.StrOpt('default_credentials_domain_name',
+               default='Default',
+               help="Default domain used when getting v3 credentials. "
+                    "This is the name keystone uses for v2 compatibility.",
+               deprecated_opts=[cfg.DeprecatedOpt(
+                                'tenant_isolation_domain_name',
+                                group='auth')]),
     cfg.BoolOpt('create_isolated_networks',
                 default=True,
                 help="If allow_tenant_isolation is set to True and Neutron is "
@@ -109,11 +112,30 @@
                     "services' region name unless they are set explicitly. "
                     "If no such region is found in the service catalog, the "
                     "first found one is used."),
-    cfg.StrOpt('endpoint_type',
+    cfg.StrOpt('v2_admin_endpoint_type',
+               default='adminURL',
+               choices=['public', 'admin', 'internal',
+                        'publicURL', 'adminURL', 'internalURL'],
+               help="The admin endpoint type to use for OpenStack Identity "
+                    "(Keystone) API v2",
+               deprecated_opts=[cfg.DeprecatedOpt('endpoint_type',
+                                                  group='identity')]),
+    cfg.StrOpt('v2_public_endpoint_type',
                default='publicURL',
                choices=['public', 'admin', 'internal',
                         'publicURL', 'adminURL', 'internalURL'],
-               help="The endpoint type to use for the identity service."),
+               help="The public endpoint type to use for OpenStack Identity "
+                    "(Keystone) API v2",
+               deprecated_opts=[cfg.DeprecatedOpt('endpoint_type',
+                                                  group='identity')]),
+    cfg.StrOpt('v3_endpoint_type',
+               default='adminURL',
+               choices=['public', 'admin', 'internal',
+                        'publicURL', 'adminURL', 'internalURL'],
+               help="The endpoint type to use for OpenStack Identity "
+                    "(Keystone) API v3",
+               deprecated_opts=[cfg.DeprecatedOpt('endpoint_type',
+                                                  group='identity')]),
     cfg.StrOpt('username',
                help="Username to use for Nova API requests."),
     cfg.StrOpt('tenant_name',
@@ -328,6 +350,10 @@
                 default=True,
                 help="Does the test environment support live migration "
                      "available?"),
+    cfg.BoolOpt('metadata_service',
+                default=True,
+                help="Does the test environment support metadata service? "
+                     "Ignored unless validation.run_validation=true."),
     cfg.BoolOpt('block_migration_for_live_migration',
                 default=False,
                 help="Does the test environment use block devices for live "
@@ -388,6 +414,14 @@
                      'encrypted volume to a running server instance? This may '
                      'depend on the combination of compute_driver in nova and '
                      'the volume_driver(s) in cinder.'),
+    # TODO(mriedem): Remove allow_duplicate_networks once kilo-eol happens
+    # since the option was removed from nova in Liberty and is the default
+    # behavior starting in Liberty.
+    cfg.BoolOpt('allow_duplicate_networks',
+                default=False,
+                help='Does the test environment support creating instances '
+                     'with multiple ports on the same network? This is only '
+                     'valid when using Neutron.'),
 ]
 
 
@@ -828,6 +862,16 @@
 ]
 
 
+telemetry_feature_group = cfg.OptGroup(name='telemetry-feature-enabled',
+                                       title='Enabled Ceilometer Features')
+
+TelemetryFeaturesGroup = [
+    cfg.BoolOpt('events',
+                default=False,
+                help="Runs Ceilometer event-related tests"),
+]
+
+
 dashboard_group = cfg.OptGroup(name="dashboard",
                                title="Dashboard options")
 
@@ -837,7 +881,8 @@
                help="Where the dashboard can be found"),
     cfg.StrOpt('login_url',
                default='http://localhost/auth/login/',
-               help="Login page for the dashboard"),
+               help="Login page for the dashboard",
+               deprecated_for_removal=True),
 ]
 
 
@@ -1168,6 +1213,7 @@
     (database_group, DatabaseGroup),
     (orchestration_group, OrchestrationGroup),
     (telemetry_group, TelemetryGroup),
+    (telemetry_feature_group, TelemetryFeaturesGroup),
     (dashboard_group, DashboardGroup),
     (data_processing_group, DataProcessingGroup),
     (data_processing_feature_group, DataProcessingFeaturesGroup),
@@ -1184,8 +1230,12 @@
 
 
 def register_opts():
+    ext_plugins = plugins.TempestTestPluginManager()
+    # Register in-tree tempest config options
     for g, o in _opts:
         register_opt_group(_CONF, g, o)
+    # Call external plugin config option registration
+    ext_plugins.register_plugin_opts(_CONF)
 
 
 def list_opts():
@@ -1194,7 +1244,10 @@
     The purpose of this is to allow tools like the Oslo sample config file
     generator to discover the options exposed to users.
     """
-    return [(getattr(g, 'name', None), o) for g, o in _opts]
+    ext_plugins = plugins.TempestTestPluginManager()
+    opt_list = [(getattr(g, 'name', None), o) for g, o in _opts]
+    opt_list.extend(ext_plugins.get_plugin_options_list())
+    return opt_list
 
 
 # this should never be called outside of this class
@@ -1231,6 +1284,7 @@
         self.orchestration = _CONF.orchestration
         self.messaging = _CONF.messaging
         self.telemetry = _CONF.telemetry
+        self.telemetry_feature_enabled = _CONF['telemetry-feature-enabled']
         self.dashboard = _CONF.dashboard
         self.data_processing = _CONF.data_processing
         self.data_processing_feature_enabled = _CONF[
@@ -1243,9 +1297,11 @@
         self.baremetal = _CONF.baremetal
         self.input_scenario = _CONF['input-scenario']
         self.negative = _CONF.negative
-        _CONF.set_default('domain_name', self.identity.admin_domain_name,
+        _CONF.set_default('domain_name',
+                          self.auth.default_credentials_domain_name,
                           group='identity')
-        _CONF.set_default('alt_domain_name', self.identity.admin_domain_name,
+        _CONF.set_default('alt_domain_name',
+                          self.auth.default_credentials_domain_name,
                           group='identity')
 
     def __init__(self, parse_conf=True, config_path=None):
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 15617c6..15482ab 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -92,6 +92,10 @@
     message = "Volume %(volume_id)s failed to build and is in ERROR status"
 
 
+class VolumeRestoreErrorException(TempestException):
+    message = "Volume %(volume_id)s failed to restore and is in ERROR status"
+
+
 class SnapshotBuildErrorException(TempestException):
     message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
 
diff --git a/tempest/openstack/common/__init__.py b/tempest/openstack/common/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/openstack/common/__init__.py
+++ /dev/null
diff --git a/tempest/openstack/common/_i18n.py b/tempest/openstack/common/_i18n.py
deleted file mode 100644
index 5bbc77d..0000000
--- a/tempest/openstack/common/_i18n.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""oslo.i18n integration module.
-
-See http://docs.openstack.org/developer/oslo.i18n/usage.html
-
-"""
-
-try:
-    import oslo_i18n
-
-    # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
-    # application name when this module is synced into the separate
-    # repository. It is OK to have more than one translation function
-    # using the same domain, since there will still only be one message
-    # catalog.
-    _translators = oslo_i18n.TranslatorFactory(domain='tempest')
-
-    # The primary translation function using the well-known name "_"
-    _ = _translators.primary
-
-    # Translators for log levels.
-    #
-    # The abbreviated names are meant to reflect the usual use of a short
-    # name like '_'. The "L" is for "log" and the other letter comes from
-    # the level.
-    _LI = _translators.log_info
-    _LW = _translators.log_warning
-    _LE = _translators.log_error
-    _LC = _translators.log_critical
-except ImportError:
-    # NOTE(dims): Support for cases where a project wants to use
-    # code from oslo-incubator, but is not ready to be internationalized
-    # (like tempest)
-    _ = _LI = _LW = _LE = _LC = lambda x: x
diff --git a/tempest/openstack/common/versionutils.py b/tempest/openstack/common/versionutils.py
deleted file mode 100644
index 12d2e14..0000000
--- a/tempest/openstack/common/versionutils.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Helpers for comparing version strings.
-"""
-
-import copy
-import functools
-import inspect
-import logging
-
-from oslo_config import cfg
-import pkg_resources
-import six
-
-from tempest.openstack.common._i18n import _
-from oslo_log import log as logging
-
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-
-deprecated_opts = [
-    cfg.BoolOpt('fatal_deprecations',
-                default=False,
-                help='Enables or disables fatal status of deprecations.'),
-]
-
-
-def list_opts():
-    """Entry point for oslo.config-generator.
-    """
-    return [(None, copy.deepcopy(deprecated_opts))]
-
-
-class deprecated(object):
-    """A decorator to mark callables as deprecated.
-
-    This decorator logs a deprecation message when the callable it decorates is
-    used. The message will include the release where the callable was
-    deprecated, the release where it may be removed and possibly an optional
-    replacement.
-
-    Examples:
-
-    1. Specifying the required deprecated release
-
-    >>> @deprecated(as_of=deprecated.ICEHOUSE)
-    ... def a(): pass
-
-    2. Specifying a replacement:
-
-    >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
-    ... def b(): pass
-
-    3. Specifying the release where the functionality may be removed:
-
-    >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
-    ... def c(): pass
-
-    4. Specifying the deprecated functionality will not be removed:
-    >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
-    ... def d(): pass
-
-    5. Specifying a replacement, deprecated functionality will not be removed:
-    >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
-    ... def e(): pass
-
-    """
-
-    # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
-    # expected we maintain a gap between Bexar and Folsom in this list.
-    BEXAR = 'B'
-    FOLSOM = 'F'
-    GRIZZLY = 'G'
-    HAVANA = 'H'
-    ICEHOUSE = 'I'
-    JUNO = 'J'
-    KILO = 'K'
-    LIBERTY = 'L'
-
-    _RELEASES = {
-        # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
-        # expected we maintain a gap between Bexar and Folsom in this list.
-        'B': 'Bexar',
-        'F': 'Folsom',
-        'G': 'Grizzly',
-        'H': 'Havana',
-        'I': 'Icehouse',
-        'J': 'Juno',
-        'K': 'Kilo',
-        'L': 'Liberty',
-    }
-
-    _deprecated_msg_with_alternative = _(
-        '%(what)s is deprecated as of %(as_of)s in favor of '
-        '%(in_favor_of)s and may be removed in %(remove_in)s.')
-
-    _deprecated_msg_no_alternative = _(
-        '%(what)s is deprecated as of %(as_of)s and may be '
-        'removed in %(remove_in)s. It will not be superseded.')
-
-    _deprecated_msg_with_alternative_no_removal = _(
-        '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
-
-    _deprecated_msg_with_no_alternative_no_removal = _(
-        '%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
-
-    def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
-        """Initialize decorator
-
-        :param as_of: the release deprecating the callable. Constants
-            are define in this class for convenience.
-        :param in_favor_of: the replacement for the callable (optional)
-        :param remove_in: an integer specifying how many releases to wait
-            before removing (default: 2)
-        :param what: name of the thing being deprecated (default: the
-            callable's name)
-
-        """
-        self.as_of = as_of
-        self.in_favor_of = in_favor_of
-        self.remove_in = remove_in
-        self.what = what
-
-    def __call__(self, func_or_cls):
-        if not self.what:
-            self.what = func_or_cls.__name__ + '()'
-        msg, details = self._build_message()
-
-        if inspect.isfunction(func_or_cls):
-
-            @six.wraps(func_or_cls)
-            def wrapped(*args, **kwargs):
-                report_deprecated_feature(LOG, msg, details)
-                return func_or_cls(*args, **kwargs)
-            return wrapped
-        elif inspect.isclass(func_or_cls):
-            orig_init = func_or_cls.__init__
-
-            # TODO(tsufiev): change `functools` module to `six` as
-            # soon as six 1.7.4 (with fix for passing `assigned`
-            # argument to underlying `functools.wraps`) is released
-            # and added to the oslo-incubator requrements
-            @functools.wraps(orig_init, assigned=('__name__', '__doc__'))
-            def new_init(self, *args, **kwargs):
-                report_deprecated_feature(LOG, msg, details)
-                orig_init(self, *args, **kwargs)
-            func_or_cls.__init__ = new_init
-            return func_or_cls
-        else:
-            raise TypeError('deprecated can be used only with functions or '
-                            'classes')
-
-    def _get_safe_to_remove_release(self, release):
-        # TODO(dstanek): this method will have to be reimplemented once
-        #    when we get to the X release because once we get to the Y
-        #    release, what is Y+2?
-        new_release = chr(ord(release) + self.remove_in)
-        if new_release in self._RELEASES:
-            return self._RELEASES[new_release]
-        else:
-            return new_release
-
-    def _build_message(self):
-        details = dict(what=self.what,
-                       as_of=self._RELEASES[self.as_of],
-                       remove_in=self._get_safe_to_remove_release(self.as_of))
-
-        if self.in_favor_of:
-            details['in_favor_of'] = self.in_favor_of
-            if self.remove_in > 0:
-                msg = self._deprecated_msg_with_alternative
-            else:
-                # There are no plans to remove this function, but it is
-                # now deprecated.
-                msg = self._deprecated_msg_with_alternative_no_removal
-        else:
-            if self.remove_in > 0:
-                msg = self._deprecated_msg_no_alternative
-            else:
-                # There are no plans to remove this function, but it is
-                # now deprecated.
-                msg = self._deprecated_msg_with_no_alternative_no_removal
-        return msg, details
-
-
-def is_compatible(requested_version, current_version, same_major=True):
-    """Determine whether `requested_version` is satisfied by
-    `current_version`; in other words, `current_version` is >=
-    `requested_version`.
-
-    :param requested_version: version to check for compatibility
-    :param current_version: version to check against
-    :param same_major: if True, the major version must be identical between
-        `requested_version` and `current_version`. This is used when a
-        major-version difference indicates incompatibility between the two
-        versions. Since this is the common-case in practice, the default is
-        True.
-    :returns: True if compatible, False if not
-    """
-    requested_parts = pkg_resources.parse_version(requested_version)
-    current_parts = pkg_resources.parse_version(current_version)
-
-    if same_major and (requested_parts[0] != current_parts[0]):
-        return False
-
-    return current_parts >= requested_parts
-
-
-# Track the messages we have sent already. See
-# report_deprecated_feature().
-_deprecated_messages_sent = {}
-
-
-def report_deprecated_feature(logger, msg, *args, **kwargs):
-    """Call this function when a deprecated feature is used.
-
-    If the system is configured for fatal deprecations then the message
-    is logged at the 'critical' level and :class:`DeprecatedConfig` will
-    be raised.
-
-    Otherwise, the message will be logged (once) at the 'warn' level.
-
-    :raises: :class:`DeprecatedConfig` if the system is configured for
-             fatal deprecations.
-    """
-    stdmsg = _("Deprecated: %s") % msg
-    CONF.register_opts(deprecated_opts)
-    if CONF.fatal_deprecations:
-        logger.critical(stdmsg, *args, **kwargs)
-        raise DeprecatedConfig(msg=stdmsg)
-
-    # Using a list because a tuple with dict can't be stored in a set.
-    sent_args = _deprecated_messages_sent.setdefault(msg, list())
-
-    if args in sent_args:
-        # Already logged this message, so don't log it again.
-        return
-
-    sent_args.append(args)
-    logger.warn(stdmsg, *args, **kwargs)
-
-
-class DeprecatedConfig(Exception):
-    message = _("Fatal call to deprecated config: %(msg)s")
-
-    def __init__(self, msg):
-        super(Exception, self).__init__(self.message % dict(msg=msg))
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 03e572f..c216d9f 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -18,6 +18,7 @@
 
 import netaddr
 from oslo_log import log
+from oslo_serialization import jsonutils as json
 import six
 from tempest_lib.common.utils import misc as misc_utils
 from tempest_lib import exceptions as lib_exc
@@ -54,6 +55,8 @@
         cls.keypairs_client = cls.manager.keypairs_client
         # Nova security groups client
         cls.security_groups_client = cls.manager.security_groups_client
+        cls.security_group_rules_client = (
+            cls.manager.security_group_rules_client)
         cls.servers_client = cls.manager.servers_client
         cls.volumes_client = cls.manager.volumes_client
         cls.snapshots_client = cls.manager.snapshots_client
@@ -94,10 +97,11 @@
 
     def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
                              cleanup_callable, cleanup_args=None,
-                             cleanup_kwargs=None, ignore_error=True):
+                             cleanup_kwargs=None, waiter_client=None):
         """Adds wait for async resource deletion at the end of cleanups
 
         @param waiter_callable: callable to wait for the resource to delete
+            with the following waiter_client if specified.
         @param thing_id: the id of the resource to be cleaned-up
         @param thing_id_param: the name of the id param in the waiter
         @param cleanup_callable: method to load pass to self.addCleanup with
@@ -113,6 +117,8 @@
             'waiter_callable': waiter_callable,
             thing_id_param: thing_id
         }
+        if waiter_client:
+            wait_dict['client'] = waiter_client
         self.cleanup_waits.append(wait_dict)
 
     def _wait_for_cleanups(self):
@@ -138,9 +144,9 @@
             client = self.keypairs_client
         name = data_utils.rand_name(self.__class__.__name__)
         # We don't need to create a keypair by pubkey in scenario
-        body = client.create_keypair(name)
+        body = client.create_keypair(name=name)
         self.addCleanup(client.delete_keypair, name)
-        return body
+        return body['keypair']
 
     def create_server(self, name=None, image=None, flavor=None,
                       wait_on_boot=True, wait_on_delete=True,
@@ -170,13 +176,15 @@
         server = self.servers_client.create_server(name, image, flavor,
                                                    **create_kwargs)
         if wait_on_delete:
-            self.addCleanup(self.servers_client.wait_for_server_termination,
+            self.addCleanup(waiters.wait_for_server_termination,
+                            self.servers_client,
                             server['id'])
         self.addCleanup_with_wait(
-            waiter_callable=self.servers_client.wait_for_server_termination,
+            waiter_callable=waiters.wait_for_server_termination,
             thing_id=server['id'], thing_id_param='server_id',
             cleanup_callable=self.delete_wrapper,
-            cleanup_args=[self.servers_client.delete_server, server['id']])
+            cleanup_args=[self.servers_client.delete_server, server['id']],
+            waiter_client=self.servers_client)
         if wait_on_boot:
             waiters.wait_for_server_status(self.servers_client,
                                            server_id=server['id'],
@@ -217,8 +225,9 @@
 
     def _create_loginable_secgroup_rule(self, secgroup_id=None):
         _client = self.security_groups_client
+        _client_rules = self.security_group_rules_client
         if secgroup_id is None:
-            sgs = _client.list_security_groups()
+            sgs = _client.list_security_groups()['security_groups']
             for sg in sgs:
                 if sg['name'] == 'default':
                     secgroup_id = sg['id']
@@ -230,14 +239,14 @@
         rulesets = [
             {
                 # ssh
-                'ip_proto': 'tcp',
+                'ip_protocol': 'tcp',
                 'from_port': 22,
                 'to_port': 22,
                 'cidr': '0.0.0.0/0',
             },
             {
                 # ping
-                'ip_proto': 'icmp',
+                'ip_protocol': 'icmp',
                 'from_port': -1,
                 'to_port': -1,
                 'cidr': '0.0.0.0/0',
@@ -245,10 +254,10 @@
         ]
         rules = list()
         for ruleset in rulesets:
-            sg_rule = _client.create_security_group_rule(secgroup_id,
-                                                         **ruleset)
+            sg_rule = _client_rules.create_security_group_rule(
+                parent_group_id=secgroup_id, **ruleset)['security_group_rule']
             self.addCleanup(self.delete_wrapper,
-                            _client.delete_security_group_rule,
+                            _client_rules.delete_security_group_rule,
                             sg_rule['id'])
             rules.append(sg_rule)
         return rules
@@ -258,7 +267,7 @@
         sg_name = data_utils.rand_name(self.__class__.__name__)
         sg_desc = sg_name + " description"
         secgroup = self.security_groups_client.create_security_group(
-            sg_name, sg_desc)
+            name=sg_name, description=sg_desc)['security_group']
         self.assertEqual(secgroup['name'], sg_name)
         self.assertEqual(secgroup['description'], sg_desc)
         self.addCleanup(self.delete_wrapper,
@@ -339,7 +348,7 @@
             'is_public': 'False',
         }
         params['properties'] = properties
-        image = self.image_client.create_image(**params)
+        image = self.image_client.create_image(**params)['image']
         self.addCleanup(self.image_client.delete_image, image['id'])
         self.assertEqual("queued", image['status'])
         self.image_client.update_image(image['id'], data=image_file)
@@ -399,7 +408,7 @@
         if name is None:
             name = data_utils.rand_name('scenario-snapshot')
         LOG.debug("Creating a snapshot image for server: %s", server['name'])
-        image = _images_client.create_image(server['id'], name)
+        image = _images_client.create_image(server['id'], name=name)
         image_id = image.response['location'].split('images/')[1]
         _image_client.wait_for_image_status(image_id, 'active')
         self.addCleanup_with_wait(
@@ -408,6 +417,19 @@
             cleanup_callable=self.delete_wrapper,
             cleanup_args=[_image_client.delete_image, image_id])
         snapshot_image = _image_client.get_image_meta(image_id)
+
+        bdm = snapshot_image.get('properties', {}).get('block_device_mapping')
+        if bdm:
+            bdm = json.loads(bdm)
+            if bdm and 'snapshot_id' in bdm[0]:
+                snapshot_id = bdm[0]['snapshot_id']
+                self.addCleanup(
+                    self.snapshots_client.wait_for_resource_deletion,
+                    snapshot_id)
+                self.addCleanup(
+                    self.delete_wrapper, self.snapshots_client.delete_snapshot,
+                    snapshot_id)
+
         image_name = snapshot_image['name']
         self.assertEqual(name, image_name)
         LOG.debug("Created snapshot image %s for server %s",
@@ -416,7 +438,7 @@
 
     def nova_volume_attach(self):
         volume = self.servers_client.attach_volume(
-            self.server['id'], self.volume['id'], '/dev/%s'
+            self.server['id'], volumeId=self.volume['id'], device='/dev/%s'
             % CONF.compute.volume_device_name)
         self.assertEqual(self.volume['id'], volume['id'])
         self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
@@ -441,9 +463,10 @@
 
         LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
                   server_id, image, preserve_ephemeral)
-        self.servers_client.rebuild(server_id=server_id, image_ref=image,
-                                    preserve_ephemeral=preserve_ephemeral,
-                                    **rebuild_kwargs)
+        self.servers_client.rebuild_server(
+            server_id=server_id, image_ref=image,
+            preserve_ephemeral=preserve_ephemeral,
+            **rebuild_kwargs)
         if wait:
             waiters.wait_for_server_status(self.servers_client,
                                            server_id, 'ACTIVE')
@@ -513,7 +536,8 @@
         Nova clients
         """
 
-        floating_ip = self.floating_ips_client.create_floating_ip(pool_name)
+        floating_ip = (self.floating_ips_client.create_floating_ip(pool_name)
+                       ['floating_ip'])
         self.addCleanup(self.delete_wrapper,
                         self.floating_ips_client.delete_floating_ip,
                         floating_ip['id'])
@@ -660,14 +684,18 @@
     def _get_server_port_id_and_ip4(self, server, ip_addr=None):
         ports = self._list_ports(device_id=server['id'],
                                  fixed_ip=ip_addr)
-        self.assertEqual(len(ports), 1,
-                         "Unable to determine which port to target.")
         # it might happen here that this port has more then one ip address
         # as in case of dual stack- when this port is created on 2 subnets
-        for ip46 in ports[0]['fixed_ips']:
-            ip = ip46['ip_address']
-            if netaddr.valid_ipv4(ip):
-                return ports[0]['id'], ip
+        port_map = [(p["id"], fxip["ip_address"])
+                    for p in ports
+                    for fxip in p["fixed_ips"]
+                    if netaddr.valid_ipv4(fxip["ip_address"])]
+
+        self.assertEqual(len(port_map), 1,
+                         "Found multiple IPv4 addresses: %s. "
+                         "Unable to determine which port to target."
+                         % port_map)
+        return port_map[0]
 
     def _get_network_by_name(self, network_name):
         net = self._list_networks(name=network_name)
@@ -1080,6 +1108,7 @@
                 ports.append({'port': port.id})
             if ports:
                 create_kwargs['networks'] = ports
+            self.ports = ports
 
         return super(NetworkScenarioTest, self).create_server(
             name=name, image=image, flavor=flavor,
@@ -1274,7 +1303,7 @@
         randomized_name = data_utils.rand_name('scenario-type-' + name)
         LOG.debug("Creating a volume type: %s", randomized_name)
         body = client.create_volume_type(
-            randomized_name)
+            randomized_name)['volume_type']
         self.assertIn('id', body)
         self.addCleanup(client.delete_volume_type, body['id'])
         return body
@@ -1290,7 +1319,7 @@
         LOG.debug("Creating an encryption type for volume type: %s", type_id)
         client.create_encryption_type(
             type_id, provider=provider, key_size=key_size, cipher=cipher,
-            control_location=control_location)
+            control_location=control_location)['encryption']
 
 
 class SwiftScenarioTest(ScenarioTest):
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 02d1171..22d2603 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -46,7 +46,8 @@
         cls.hosts_client = cls.manager.hosts_client
 
     def _create_aggregate(self, **kwargs):
-        aggregate = self.aggregates_client.create_aggregate(**kwargs)
+        aggregate = (self.aggregates_client.create_aggregate(**kwargs)
+                     ['aggregate'])
         self.addCleanup(self._delete_aggregate, aggregate)
         aggregate_name = kwargs['name']
         availability_zone = kwargs['availability_zone']
@@ -58,23 +59,25 @@
         self.aggregates_client.delete_aggregate(aggregate['id'])
 
     def _get_host_name(self):
-        hosts = self.hosts_client.list_hosts()
+        hosts = self.hosts_client.list_hosts()['hosts']
         self.assertTrue(len(hosts) >= 1)
         computes = [x for x in hosts if x['service'] == 'compute']
         return computes[0]['host_name']
 
     def _add_host(self, aggregate_id, host):
-        aggregate = self.aggregates_client.add_host(aggregate_id, host)
+        aggregate = (self.aggregates_client.add_host(aggregate_id, host=host)
+                     ['aggregate'])
         self.addCleanup(self._remove_host, aggregate['id'], host)
         self.assertIn(host, aggregate['hosts'])
 
     def _remove_host(self, aggregate_id, host):
-        aggregate = self.aggregates_client.remove_host(aggregate_id, host)
-        self.assertNotIn(host, aggregate['hosts'])
+        aggregate = self.aggregates_client.remove_host(aggregate_id, host=host)
+        self.assertNotIn(host, aggregate['aggregate']['hosts'])
 
     def _check_aggregate_details(self, aggregate, aggregate_name, azone,
                                  hosts, metadata):
-        aggregate = self.aggregates_client.show_aggregate(aggregate['id'])
+        aggregate = (self.aggregates_client.show_aggregate(aggregate['id'])
+                     ['aggregate'])
         self.assertEqual(aggregate_name, aggregate['name'])
         self.assertEqual(azone, aggregate['availability_zone'])
         self.assertEqual(hosts, aggregate['hosts'])
@@ -85,16 +88,17 @@
 
     def _set_aggregate_metadata(self, aggregate, meta):
         aggregate = self.aggregates_client.set_metadata(aggregate['id'],
-                                                        meta)
+                                                        metadata=meta)
 
         for key, value in meta.items():
-            self.assertEqual(meta[key], aggregate['metadata'][key])
+            self.assertEqual(meta[key],
+                             aggregate['aggregate']['metadata'][key])
 
     def _update_aggregate(self, aggregate, aggregate_name,
                           availability_zone):
         aggregate = self.aggregates_client.update_aggregate(
             aggregate['id'], name=aggregate_name,
-            availability_zone=availability_zone)
+            availability_zone=availability_zone)['aggregate']
         self.assertEqual(aggregate['name'], aggregate_name)
         self.assertEqual(aggregate['availability_zone'], availability_zone)
         return aggregate
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index 346f56b..c0b5a44 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -101,14 +101,15 @@
     def get_flavor_ephemeral_size(self):
         """Returns size of the ephemeral partition in GiB."""
         f_id = self.instance['flavor']['id']
-        flavor = self.flavors_client.show_flavor(f_id)
+        flavor = self.flavors_client.show_flavor(f_id)['flavor']
         ephemeral = flavor.get('OS-FLV-EXT-DATA:ephemeral')
         if not ephemeral or ephemeral == 'N/A':
             return None
         return int(ephemeral)
 
     def add_floating_ip(self):
-        floating_ip = self.floating_ips_client.create_floating_ip()
+        floating_ip = (self.floating_ips_client.create_floating_ip()
+                       ['floating_ip'])
         self.floating_ips_client.associate_floating_ip_to_server(
             floating_ip['ip'], self.instance['id'])
         return floating_ip['ip']
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index eb018eb..8e91a6d 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -26,6 +26,7 @@
 class HorizonHTMLParser(HTMLParser.HTMLParser):
     csrf_token = None
     region = None
+    login = None
 
     def _find_name(self, attrs, name):
         for attrpair in attrs:
@@ -39,12 +40,20 @@
                 return attrpair[1]
         return None
 
+    def _find_attr_value(self, attrs, attr_name):
+        for attrpair in attrs:
+            if attrpair[0] == attr_name:
+                return attrpair[1]
+        return None
+
     def handle_starttag(self, tag, attrs):
         if tag == 'input':
             if self._find_name(attrs, 'csrfmiddlewaretoken'):
                 self.csrf_token = self._find_value(attrs)
             if self._find_name(attrs, 'region'):
                 self.region = self._find_value(attrs)
+        if tag == 'form':
+            self.login = self._find_attr_value(attrs, 'action')
 
 
 class TestDashboardBasicOps(manager.ScenarioTest):
@@ -79,8 +88,12 @@
         parser = HorizonHTMLParser()
         parser.feed(response)
 
+        # construct login url for dashboard, discovery accomodates non-/ web
+        # root for dashboard
+        login_url = CONF.dashboard.dashboard_url + parser.login[1:]
+
         # Prepare login form request
-        req = request.Request(CONF.dashboard.login_url)
+        req = request.Request(login_url)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('Referer', CONF.dashboard.dashboard_url)
         params = {'username': username,
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index fa70d3f..2ad5d2d 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -87,7 +87,8 @@
         # Since no traffic is tested, we don't need to actually add rules to
         # secgroup
         secgroup = self.security_groups_client.create_security_group(
-            'secgroup-%s' % name, 'secgroup-desc-%s' % name)
+            name='secgroup-%s' % name,
+            description='secgroup-desc-%s' % name)['security_group']
         self.addCleanupClass(self.security_groups_client.delete_security_group,
                              secgroup['id'])
         create_kwargs = {
@@ -109,8 +110,8 @@
         for server in self.servers:
             # after deleting all servers - wait for all servers to clear
             # before cleanup continues
-            self.addCleanupClass(self.servers_client.
-                                 wait_for_server_termination,
+            self.addCleanupClass(waiters.wait_for_server_termination,
+                                 self.servers_client,
                                  server['id'])
         for server in self.servers:
             self.addCleanupClass(self.servers_client.delete_server,
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index f868382..c38ce2d 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -79,7 +79,7 @@
         self.assertEqual(self.volume, volume)
 
     def nova_reboot(self):
-        self.servers_client.reboot(self.server['id'], 'SOFT')
+        self.servers_client.reboot_server(self.server['id'], 'SOFT')
         self._wait_for_server_status('ACTIVE')
 
     def check_partitions(self):
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 3d233d8..71c48fe 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -111,7 +111,8 @@
     @test.services('compute', 'network')
     def test_server_connectivity_reboot(self):
         self._setup_network_and_servers()
-        self.servers_client.reboot(self.server['id'], reboot_type='SOFT')
+        self.servers_client.reboot_server(self.server['id'],
+                                          reboot_type='SOFT')
         self._wait_server_status_and_check_network_connectivity()
 
     @test.idempotent_id('88a529c2-1daa-4c85-9aec-d541ba3eb699')
@@ -119,8 +120,8 @@
     def test_server_connectivity_rebuild(self):
         self._setup_network_and_servers()
         image_ref_alt = CONF.compute.image_ref_alt
-        self.servers_client.rebuild(self.server['id'],
-                                    image_ref=image_ref_alt)
+        self.servers_client.rebuild_server(self.server['id'],
+                                           image_ref=image_ref_alt)
         self._wait_server_status_and_check_network_connectivity()
 
     @test.idempotent_id('2b2642db-6568-4b35-b812-eceed3fa20ce')
@@ -159,7 +160,8 @@
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
             raise self.skipException(msg)
         self._setup_network_and_servers()
-        self.servers_client.resize(self.server['id'], flavor_ref=resize_flavor)
+        self.servers_client.resize_server(self.server['id'],
+                                          flavor_ref=resize_flavor)
         waiters.wait_for_server_status(self.servers_client, self.server['id'],
                                        'VERIFY_RESIZE')
         self.servers_client.confirm_resize(self.server['id'])
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index ff58eea..b262c17 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -20,6 +20,7 @@
 import testtools
 
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import exceptions
 from tempest.scenario import manager
@@ -107,10 +108,12 @@
         self.network, self.subnet, self.router = self.create_networks(**kwargs)
         self.check_networks()
 
+        self.ports = []
         self.port_id = None
         if boot_with_port:
             # create a port on the network and boot with that
             self.port_id = self._create_port(self.network['id']).id
+            self.ports.append({'port': self.port_id})
 
         name = data_utils.rand_name('server-smoke')
         server = self._create_server(name, self.network, self.port_id)
@@ -244,7 +247,7 @@
         old_port = port_list[0]
         interface = self.interface_client.create_interface(
             server_id=server['id'],
-            network_id=self.new_net.id)
+            net_id=self.new_net.id)['interfaceAttachment']
         self.addCleanup(self.network_client.wait_for_resource_deletion,
                         'port',
                         interface['port_id'])
@@ -338,8 +341,8 @@
 
         for remote_ip in address_list:
             if should_connect:
-                msg = "Timed out waiting for "
-                "%s to become reachable" % remote_ip
+                msg = ("Timed out waiting for %s to become "
+                       "reachable") % remote_ip
             else:
                 msg = "ip address %s is reachable" % remote_ip
             try:
@@ -631,7 +634,11 @@
         # Setup the network, create a port and boot the server from that port.
         self._setup_network_and_servers(boot_with_port=True)
         _, server = self.floating_ip_tuple
-        self.assertIsNotNone(self.port_id,
+        self.assertEqual(1, len(self.ports),
+                         'There should only be one port created for '
+                         'server %s.' % server['id'])
+        port_id = self.ports[0]['port']
+        self.assertIsNotNone(port_id,
                              'Server should have been created from a '
                              'pre-existing port.')
         # Assert the port is bound to the server.
@@ -640,13 +647,13 @@
         self.assertEqual(1, len(port_list),
                          'There should only be one port created for '
                          'server %s.' % server['id'])
-        self.assertEqual(self.port_id, port_list[0]['id'])
+        self.assertEqual(port_id, port_list[0]['id'])
         # Delete the server.
         self.servers_client.delete_server(server['id'])
-        self.servers_client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
         # Assert the port still exists on the network but is unbound from
         # the deleted server.
-        port = self.network_client.show_port(self.port_id)['port']
+        port = self.network_client.show_port(port_id)['port']
         self.assertEqual(self.network['id'], port['network_id'])
         self.assertEqual('', port['device_id'])
         self.assertEqual('', port['device_owner'])
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index fba839a..a9394cb 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -27,13 +27,17 @@
 
 
 class TestGettingAddress(manager.NetworkScenarioTest):
-    """Create network with subnets: one IPv4 and
-    one or few IPv6 in a given address mode
-    Boot 2 VMs on this network
-    Allocate and assign 2 FIP4
-    Check that vNICs of all VMs gets all addresses actually assigned
-    Ping4 to one VM from another one
-    If ping6 available in VM, do ping6 to all v6 addresses
+    """Test Summary:
+
+    1. Create network with subnets:
+        1.1. one IPv4 and
+        1.2. one or more IPv6 in a given address mode
+    2. Boot 2 VMs on this network
+    3. Allocate and assign 2 FIP4
+    4. Check that vNICs of all VMs gets all addresses actually assigned
+    5. Each VM will ping the other's v4 private address
+    6. If ping6 available in VM, each VM will ping all of the other's  v6
+       addresses as well as the router's
     """
 
     @classmethod
@@ -65,23 +69,30 @@
             'key_name': self.keypair['name'],
             'security_groups': [{'name': self.sec_grp['name']}]}
 
-    def prepare_network(self, address6_mode, n_subnets6=1):
+    def prepare_network(self, address6_mode, n_subnets6=1, dualnet=False):
         """Creates network with
          given number of IPv6 subnets in the given mode and
          one IPv4 subnet
          Creates router with ports on all subnets
+         if dualnet - create IPv6 subnets on a different network
+         :return: list of created networks
         """
         self.network = self._create_network(tenant_id=self.tenant_id)
+        if dualnet:
+            self.network_v6 = self._create_network(tenant_id=self.tenant_id)
+
         sub4 = self._create_subnet(network=self.network,
                                    namestart='sub4',
-                                   ip_version=4,)
+                                   ip_version=4)
 
         router = self._get_router(tenant_id=self.tenant_id)
         sub4.add_to_router(router_id=router['id'])
         self.addCleanup(sub4.delete)
 
+        self.subnets_v6 = []
         for _ in range(n_subnets6):
-            sub6 = self._create_subnet(network=self.network,
+            net6 = self.network_v6 if dualnet else self.network
+            sub6 = self._create_subnet(network=net6,
                                        namestart='sub6',
                                        ip_version=6,
                                        ipv6_ra_mode=address6_mode,
@@ -89,6 +100,9 @@
 
             sub6.add_to_router(router_id=router['id'])
             self.addCleanup(sub6.delete)
+            self.subnets_v6.append(sub6)
+
+        return [self.network, self.network_v6] if dualnet else [self.network]
 
     @staticmethod
     def define_server_ips(srv):
@@ -101,11 +115,12 @@
                     ips['4'] = nic['addr']
         return ips
 
-    def prepare_server(self):
+    def prepare_server(self, networks=None):
         username = CONF.compute.image_ssh_user
 
         create_kwargs = self.srv_kwargs
-        create_kwargs['networks'] = [{'uuid': self.network.id}]
+        networks = networks or [self.network]
+        create_kwargs['networks'] = [{'uuid': n.id} for n in networks]
 
         srv = self.create_server(create_kwargs=create_kwargs)
         fip = self.create_floating_ip(thing=srv)
@@ -113,18 +128,42 @@
         ssh = self.get_remote_client(
             server_or_ip=fip.floating_ip_address,
             username=username)
-        return ssh, ips
+        return ssh, ips, srv["id"]
 
-    def _prepare_and_test(self, address6_mode, n_subnets6=1):
-        self.prepare_network(address6_mode=address6_mode,
-                             n_subnets6=n_subnets6)
+    def turn_nic6_on(self, ssh, sid):
+        """Turns the IPv6 vNIC on
 
-        sshv4_1, ips_from_api_1 = self.prepare_server()
-        sshv4_2, ips_from_api_2 = self.prepare_server()
+        Required because guest images usually set only the first vNIC on boot.
+        Searches for the IPv6 vNIC's MAC and brings it up.
+
+        @param ssh: RemoteClient ssh instance to server
+        @param sid: server uuid
+        """
+        ports = [p["mac_address"] for p in
+                 self._list_ports(device_id=sid,
+                                  network_id=self.network_v6.id)]
+        self.assertEqual(1, len(ports),
+                         message="Multiple IPv6 ports found on network %s"
+                         % self.network_v6)
+        mac6 = ports[0]
+        ssh.turn_nic_on(ssh.get_nic_name(mac6))
+
+    def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
+        net_list = self.prepare_network(address6_mode=address6_mode,
+                                        n_subnets6=n_subnets6,
+                                        dualnet=dualnet)
+
+        sshv4_1, ips_from_api_1, sid1 = self.prepare_server(networks=net_list)
+        sshv4_2, ips_from_api_2, sid2 = self.prepare_server(networks=net_list)
 
         def guest_has_address(ssh, addr):
             return addr in ssh.get_ip_list()
 
+        # Turn on 2nd NIC for Cirros when dualnet
+        if dualnet:
+            self.turn_nic6_on(sshv4_1, sid1)
+            self.turn_nic6_on(sshv4_2, sid2)
+
         # get addresses assigned to vNIC as reported by 'ip address' utility
         ips_from_ip_1 = sshv4_1.get_ip_list()
         ips_from_ip_2 = sshv4_2.get_ip_list()
@@ -145,23 +184,32 @@
             self.assertTrue(test.call_until_true(srv2_v6_addr_assigned,
                                                  CONF.compute.ping_timeout, 1))
 
-        result = sshv4_1.ping_host(ips_from_api_2['4'])
-        self.assertIn('0% packet loss', result)
-        result = sshv4_2.ping_host(ips_from_api_1['4'])
-        self.assertIn('0% packet loss', result)
+        self._check_connectivity(sshv4_1, ips_from_api_2['4'])
+        self._check_connectivity(sshv4_2, ips_from_api_1['4'])
 
         # Some VM (like cirros) may not have ping6 utility
         result = sshv4_1.exec_command('whereis ping6')
         is_ping6 = False if result == 'ping6:\n' else True
         if is_ping6:
             for i in range(n_subnets6):
-                result = sshv4_1.ping_host(ips_from_api_2['6'][i])
-                self.assertIn('0% packet loss', result)
-                result = sshv4_2.ping_host(ips_from_api_1['6'][i])
-                self.assertIn('0% packet loss', result)
+                self._check_connectivity(sshv4_1,
+                                         ips_from_api_2['6'][i])
+                self._check_connectivity(sshv4_1,
+                                         self.subnets_v6[i].gateway_ip)
+                self._check_connectivity(sshv4_2,
+                                         ips_from_api_1['6'][i])
+                self._check_connectivity(sshv4_2,
+                                         self.subnets_v6[i].gateway_ip)
         else:
             LOG.warning('Ping6 is not available, skipping')
 
+    def _check_connectivity(self, source, dest):
+        self.assertTrue(
+            self._check_remote_connectivity(source, dest),
+            "Timed out waiting for %s to become reachable from %s" %
+            (dest, source.ssh_client.host)
+        )
+
     @test.idempotent_id('2c92df61-29f0-4eaa-bee3-7c65bef62a43')
     @test.services('compute', 'network')
     def test_slaac_from_os(self):
@@ -181,3 +229,25 @@
     @test.services('compute', 'network')
     def test_multi_prefix_slaac(self):
         self._prepare_and_test(address6_mode='slaac', n_subnets6=2)
+
+    @test.idempotent_id('b6399d76-4438-4658-bcf5-0d6c8584fde2')
+    @test.services('compute', 'network')
+    def test_dualnet_slaac_from_os(self):
+        self._prepare_and_test(address6_mode='slaac', dualnet=True)
+
+    @test.idempotent_id('76f26acd-9688-42b4-bc3e-cd134c4cb09e')
+    @test.services('compute', 'network')
+    def test_dualnet_dhcp6_stateless_from_os(self):
+        self._prepare_and_test(address6_mode='dhcpv6-stateless', dualnet=True)
+
+    @test.idempotent_id('cf1c4425-766b-45b8-be35-e2959728eb00')
+    @test.services('compute', 'network')
+    def test_dualnet_multi_prefix_dhcpv6_stateless(self):
+        self._prepare_and_test(address6_mode='dhcpv6-stateless', n_subnets6=2,
+                               dualnet=True)
+
+    @test.idempotent_id('9178ad42-10e4-47e9-8987-e02b170cc5cd')
+    @test.services('compute', 'network')
+    def test_dualnet_multi_prefix_slaac(self):
+        self._prepare_and_test(address6_mode='slaac', n_subnets6=2,
+                               dualnet=True)
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 18fd09d..1db1ac2 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -161,9 +161,6 @@
 
         cls.floating_ip_access = not CONF.network.public_router_id
 
-    def cleanup_wrapper(self, resource):
-        self.cleanup_resource(resource, self.__class__.__name__)
-
     def setUp(self):
         super(TestSecurityGroupsBasicOps, self).setUp()
         self._deploy_tenant(self.primary_tenant)
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index e405cf5..8693fb3 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -58,7 +58,7 @@
         resize_flavor = CONF.compute.flavor_ref_alt
         LOG.debug("Resizing instance %s from flavor %s to flavor %s",
                   instance['id'], instance['flavor']['id'], resize_flavor)
-        self.servers_client.resize(instance_id, resize_flavor)
+        self.servers_client.resize_server(instance_id, resize_flavor)
         waiters.wait_for_server_status(self.servers_client, instance_id,
                                        'VERIFY_RESIZE')
 
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d9918f3..0f27cd1 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -16,6 +16,7 @@
 from oslo_log import log as logging
 
 from tempest import config
+from tempest import exceptions
 from tempest.scenario import manager
 from tempest.scenario import utils as test_utils
 from tempest import test
@@ -37,6 +38,7 @@
      * Add simple permissive rules to the security group
      * Launch an instance
      * Perform ssh to instance
+     * Verify metadata service
      * Terminate the instance
     """
 
@@ -81,19 +83,42 @@
     def verify_ssh(self):
         if self.run_ssh:
             # Obtain a floating IP
-            floating_ip = self.floating_ips_client.create_floating_ip()
+            self.floating_ip = (self.floating_ips_client.create_floating_ip()
+                                ['floating_ip'])
             self.addCleanup(self.delete_wrapper,
                             self.floating_ips_client.delete_floating_ip,
-                            floating_ip['id'])
+                            self.floating_ip['id'])
             # Attach a floating IP
             self.floating_ips_client.associate_floating_ip_to_server(
-                floating_ip['ip'], self.instance['id'])
+                self.floating_ip['ip'], self.instance['id'])
             # Check ssh
-            self.get_remote_client(
-                server_or_ip=floating_ip['ip'],
+            self.ssh_client = self.get_remote_client(
+                server_or_ip=self.floating_ip['ip'],
                 username=self.image_utils.ssh_user(self.image_ref),
                 private_key=self.keypair['private_key'])
 
+    def verify_metadata(self):
+        if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
+            # Verify metadata service
+            md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
+
+            def exec_cmd_and_verify_output():
+                cmd = 'curl ' + md_url
+                floating_ip = self.floating_ip['ip']
+                result = self.ssh_client.exec_command(cmd)
+                if result:
+                    msg = ('Failed while verifying metadata on server. Result '
+                           'of command "%s" is NOT "%s".' % (cmd, floating_ip))
+                    self.assertEqual(floating_ip, result, msg)
+                    return 'Verification is successful!'
+
+            if not test.call_until_true(exec_cmd_and_verify_output,
+                                        CONF.compute.build_timeout,
+                                        CONF.compute.build_interval):
+                raise exceptions.TimeoutException('Timed out while waiting to '
+                                                  'verify metadata on server. '
+                                                  '%s is empty.' % md_url)
+
     @test.idempotent_id('7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba')
     @test.attr(type='smoke')
     @test.services('compute', 'network')
@@ -102,4 +127,5 @@
         self.security_group = self._create_security_group()
         self.boot_instance()
         self.verify_ssh()
+        self.verify_metadata()
         self.servers_client.delete_server(self.instance['id'])
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index 02ee7b9..022306e 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -82,7 +82,8 @@
                                     create_kwargs=create_kwargs)
 
         if CONF.compute.use_floatingip_for_ssh:
-            floating_ip = self.floating_ips_client.create_floating_ip()
+            floating_ip = (self.floating_ips_client.create_floating_ip()
+                           ['floating_ip'])
             self.addCleanup(self.delete_wrapper,
                             self.floating_ips_client.delete_floating_ip,
                             floating_ip['id'])
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c1d9a1b..a7bdba3 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -80,12 +80,13 @@
     def _create_volume_snapshot(self, volume):
         snapshot_name = data_utils.rand_name('scenario-snapshot')
         _, snapshot = self.snapshots_client.create_snapshot(
-            volume['id'], display_name=snapshot_name)
+            volume['id'], display_name=snapshot_name)['snapshot']
 
         def cleaner():
             self.snapshots_client.delete_snapshot(snapshot['id'])
             try:
-                while self.snapshots_client.show_snapshot(snapshot['id']):
+                while self.snapshots_client.show_snapshot(
+                    snapshot['id'])['snapshot']:
                     time.sleep(1)
             except lib_exc.NotFound:
                 pass
@@ -104,7 +105,7 @@
 
     def _attach_volume(self, server, volume):
         attached_volume = self.servers_client.attach_volume(
-            server['id'], volume['id'], device='/dev/%s'
+            server['id'], volumeId=volume['id'], device='/dev/%s'
             % CONF.compute.volume_device_name)
         self.assertEqual(volume['id'], attached_volume['id'])
         self._wait_for_volume_status(attached_volume, 'in-use')
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 45b7b74..4912033 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -48,7 +48,7 @@
         vol_name = data_utils.rand_name('volume-origin')
         return self.create_volume(name=vol_name, imageRef=img_uuid)
 
-    def _boot_instance_from_volume(self, vol_id, keypair, security_group):
+    def _get_bdm(self, vol_id, delete_on_termination=False):
         # NOTE(gfidente): the syntax for block_device_mapping is
         # dev_name=id:type:size:delete_on_terminate
         # where type needs to be "snap" if the server is booted
@@ -56,12 +56,20 @@
         bd_map = [{
             'device_name': 'vda',
             'volume_id': vol_id,
-            'delete_on_termination': '0'}]
-        create_kwargs = {
-            'block_device_mapping': bd_map,
-            'key_name': keypair['name'],
-            'security_groups': [{'name': security_group['name']}]
-        }
+            'delete_on_termination': str(int(delete_on_termination))}]
+        return {'block_device_mapping': bd_map}
+
+    def _boot_instance_from_volume(self, vol_id, keypair=None,
+                                   security_group=None,
+                                   delete_on_termination=False):
+        create_kwargs = dict()
+        if keypair:
+            create_kwargs['key_name'] = keypair['name']
+        if security_group:
+            create_kwargs['security_groups'] = [
+                {'name': security_group['name']}]
+        create_kwargs.update(self._get_bdm(
+            vol_id, delete_on_termination=delete_on_termination))
         return self.create_server(image='', create_kwargs=create_kwargs)
 
     def _create_snapshot_from_volume(self, vol_id):
@@ -69,7 +77,7 @@
         snap = self.snapshots_client.create_snapshot(
             volume_id=vol_id,
             force=True,
-            display_name=snap_name)
+            display_name=snap_name)['snapshot']
         self.addCleanup(
             self.snapshots_client.wait_for_resource_deletion, snap['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snap['id'])
@@ -89,16 +97,10 @@
             waiters.wait_for_server_status(self.servers_client,
                                            i['id'], 'SHUTOFF')
 
-    def _detach_volumes(self, volumes):
-        # NOTE(gfidente): two loops so we do not wait for the status twice
-        for v in volumes:
-            self.volumes_client.detach_volume(v['id'])
-        for v in volumes:
-            self.volumes_client.wait_for_volume_status(v['id'], 'available')
-
     def _ssh_to_server(self, server, keypair):
         if CONF.compute.use_floatingip_for_ssh:
-            floating_ip = self.floating_ips_client.create_floating_ip()
+            floating_ip = (self.floating_ips_client.create_floating_ip()
+                           ['floating_ip'])
             self.addCleanup(self.delete_wrapper,
                             self.floating_ips_client.delete_floating_ip,
                             floating_ip['id'])
@@ -106,8 +108,7 @@
                 floating_ip['ip'], server['id'])
             ip = floating_ip['ip']
         else:
-            network_name_for_ssh = CONF.compute.network_for_ssh
-            ip = server.networks[network_name_for_ssh][0]
+            ip = server
 
         return self.get_remote_client(ip, private_key=keypair['private_key'],
                                       log_console_of_servers=[server])
@@ -123,7 +124,7 @@
 
     def _delete_server(self, server):
         self.servers_client.delete_server(server['id'])
-        self.servers_client.wait_for_server_termination(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
 
     def _check_content_of_written_file(self, ssh_client, expected):
         actual = self._get_content(ssh_client)
@@ -175,18 +176,34 @@
         # deletion operations to succeed
         self._stop_instances([instance_2nd, instance_from_snapshot])
 
+    @test.idempotent_id('36c34c67-7b54-4b59-b188-02a2f458a63b')
+    @test.services('compute', 'volume', 'image')
+    def test_create_ebs_image_and_check_boot(self):
+        # create an instance from volume
+        volume_origin = self._create_volume_from_image()
+        instance = self._boot_instance_from_volume(volume_origin['id'],
+                                                   delete_on_termination=True)
+        # create EBS image
+        name = data_utils.rand_name('image')
+        image = self.create_server_snapshot(instance, name=name)
+
+        # delete instance
+        self._delete_server(instance)
+
+        # boot instance from EBS image
+        instance = self.create_server(image=image['id'])
+        # just ensure that instance booted
+
+        # delete instance
+        self._delete_server(instance)
+
 
 class TestVolumeBootPatternV2(TestVolumeBootPattern):
-    def _boot_instance_from_volume(self, vol_id, keypair, security_group):
+    def _get_bdm(self, vol_id, delete_on_termination=False):
         bd_map_v2 = [{
             'uuid': vol_id,
             'source_type': 'volume',
             'destination_type': 'volume',
             'boot_index': 0,
-            'delete_on_termination': False}]
-        create_kwargs = {
-            'block_device_mapping_v2': bd_map_v2,
-            'key_name': keypair['name'],
-            'security_groups': [{'name': security_group['name']}]
-        }
-        return self.create_server(image='', create_kwargs=create_kwargs)
+            'delete_on_termination': delete_on_termination}]
+        return {'block_device_mapping_v2': bd_map_v2}
diff --git a/tempest/scenario/utils.py b/tempest/scenario/utils.py
index b1246d2..0a7d492 100644
--- a/tempest/scenario/utils.py
+++ b/tempest/scenario/utils.py
@@ -44,7 +44,7 @@
         self.flavors_client = os.flavors_client
 
     def ssh_user(self, image_id):
-        _image = self.images_client.show_image(image_id)
+        _image = self.images_client.show_image(image_id)['image']
         for regex, user in self.ssh_users:
             # First match wins
             if re.match(regex, _image['name']) is not None:
@@ -57,15 +57,15 @@
                              string=str(image['name']))
 
     def is_sshable_image(self, image_id):
-        _image = self.images_client.show_image(image_id)
+        _image = self.images_client.show_image(image_id)['image']
         return self._is_sshable_image(_image)
 
     def _is_flavor_enough(self, flavor, image):
         return image['minDisk'] <= flavor['disk']
 
     def is_flavor_enough(self, flavor_id, image_id):
-        _image = self.images_client.show_image(image_id)
-        _flavor = self.flavors_client.show_flavor(flavor_id)
+        _image = self.images_client.show_image(image_id)['image']
+        _flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
         return self._is_flavor_enough(_flavor, _image)
 
 
@@ -131,7 +131,7 @@
             return []
         if not hasattr(self, '_scenario_images'):
             try:
-                images = self.images_client.list_images()
+                images = self.images_client.list_images()['images']
                 self._scenario_images = [
                     (self._normalize_name(i['name']), dict(image_ref=i['id']))
                     for i in images if re.search(self.image_pattern,
@@ -148,7 +148,7 @@
         """
         if not hasattr(self, '_scenario_flavors'):
             try:
-                flavors = self.flavors_client.list_flavors()
+                flavors = self.flavors_client.list_flavors()['flavors']
                 self._scenario_flavors = [
                     (self._normalize_name(f['name']), dict(flavor_ref=f['id']))
                     for f in flavors if re.search(self.flavor_pattern,
diff --git a/tempest/services/compute/json/agents_client.py b/tempest/services/compute/json/agents_client.py
index 1269991..d38c8cd 100644
--- a/tempest/services/compute/json/agents_client.py
+++ b/tempest/services/compute/json/agents_client.py
@@ -32,7 +32,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_agents, resp, body)
-        return service_client.ResponseBodyList(resp, body['agents'])
+        return service_client.ResponseBody(resp, body)
 
     def create_agent(self, **kwargs):
         """Create an agent build."""
@@ -40,7 +40,7 @@
         resp, body = self.post('os-agents', post_body)
         body = json.loads(body)
         self.validate_response(schema.create_agent, resp, body)
-        return service_client.ResponseBody(resp, body['agent'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_agent(self, agent_id):
         """Delete an existing agent build."""
@@ -52,4 +52,5 @@
         """Update an agent build."""
         put_body = json.dumps({'para': kwargs})
         resp, body = self.put('os-agents/%s' % agent_id, put_body)
-        return service_client.ResponseBody(resp, self._parse_resp(body))
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 28d4ff5..c9895db 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -27,14 +27,14 @@
         resp, body = self.get("os-aggregates")
         body = json.loads(body)
         self.validate_response(schema.list_aggregates, resp, body)
-        return service_client.ResponseBodyList(resp, body['aggregates'])
+        return service_client.ResponseBody(resp, body)
 
     def show_aggregate(self, aggregate_id):
         """Get details of the given aggregate."""
         resp, body = self.get("os-aggregates/%s" % aggregate_id)
         body = json.loads(body)
         self.validate_response(schema.get_aggregate, resp, body)
-        return service_client.ResponseBody(resp, body['aggregate'])
+        return service_client.ResponseBody(resp, body)
 
     def create_aggregate(self, **kwargs):
         """Creates a new aggregate."""
@@ -43,20 +43,16 @@
 
         body = json.loads(body)
         self.validate_response(schema.create_aggregate, resp, body)
-        return service_client.ResponseBody(resp, body['aggregate'])
+        return service_client.ResponseBody(resp, body)
 
-    def update_aggregate(self, aggregate_id, name, availability_zone=None):
+    def update_aggregate(self, aggregate_id, **kwargs):
         """Update a aggregate."""
-        put_body = {
-            'name': name,
-            'availability_zone': availability_zone
-        }
-        put_body = json.dumps({'aggregate': put_body})
+        put_body = json.dumps({'aggregate': kwargs})
         resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body)
 
         body = json.loads(body)
         self.validate_response(schema.update_aggregate, resp, body)
-        return service_client.ResponseBody(resp, body['aggregate'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_aggregate(self, aggregate_id):
         """Deletes the given aggregate."""
@@ -76,38 +72,29 @@
         """Returns the primary type of resource this client works with."""
         return 'aggregate'
 
-    def add_host(self, aggregate_id, host):
+    def add_host(self, aggregate_id, **kwargs):
         """Adds a host to the given aggregate."""
-        post_body = {
-            'host': host,
-        }
-        post_body = json.dumps({'add_host': post_body})
+        post_body = json.dumps({'add_host': kwargs})
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
         self.validate_response(schema.aggregate_add_remove_host, resp, body)
-        return service_client.ResponseBody(resp, body['aggregate'])
+        return service_client.ResponseBody(resp, body)
 
-    def remove_host(self, aggregate_id, host):
+    def remove_host(self, aggregate_id, **kwargs):
         """Removes a host from the given aggregate."""
-        post_body = {
-            'host': host,
-        }
-        post_body = json.dumps({'remove_host': post_body})
+        post_body = json.dumps({'remove_host': kwargs})
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
         self.validate_response(schema.aggregate_add_remove_host, resp, body)
-        return service_client.ResponseBody(resp, body['aggregate'])
+        return service_client.ResponseBody(resp, body)
 
-    def set_metadata(self, aggregate_id, meta):
+    def set_metadata(self, aggregate_id, **kwargs):
         """Replaces the aggregate's existing metadata with new metadata."""
-        post_body = {
-            'metadata': meta,
-        }
-        post_body = json.dumps({'set_metadata': post_body})
+        post_body = json.dumps({'set_metadata': kwargs})
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
         self.validate_response(schema.aggregate_set_metadata, resp, body)
-        return service_client.ResponseBody(resp, body['aggregate'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/availability_zone_client.py b/tempest/services/compute/json/availability_zone_client.py
index c74fd10..0012637 100644
--- a/tempest/services/compute/json/availability_zone_client.py
+++ b/tempest/services/compute/json/availability_zone_client.py
@@ -32,5 +32,4 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema_list, resp, body)
-        return service_client.ResponseBodyList(resp,
-                                               body['availabilityZoneInfo'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/baremetal_nodes_client.py b/tempest/services/compute/json/baremetal_nodes_client.py
index 8165292..15f883a 100644
--- a/tempest/services/compute/json/baremetal_nodes_client.py
+++ b/tempest/services/compute/json/baremetal_nodes_client.py
@@ -33,7 +33,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_baremetal_nodes, resp, body)
-        return service_client.ResponseBodyList(resp, body['nodes'])
+        return service_client.ResponseBody(resp, body)
 
     def show_baremetal_node(self, baremetal_node_id):
         """Returns the details of a single baremetal node."""
@@ -41,4 +41,4 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.get_baremetal_node, resp, body)
-        return service_client.ResponseBody(resp, body['node'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/certificates_client.py b/tempest/services/compute/json/certificates_client.py
index c25b273..d6c72f4 100644
--- a/tempest/services/compute/json/certificates_client.py
+++ b/tempest/services/compute/json/certificates_client.py
@@ -26,7 +26,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.get_certificate, resp, body)
-        return service_client.ResponseBody(resp, body['certificate'])
+        return service_client.ResponseBody(resp, body)
 
     def create_certificate(self):
         """create certificates."""
@@ -34,4 +34,4 @@
         resp, body = self.post(url, None)
         body = json.loads(body)
         self.validate_response(schema.create_certificate, resp, body)
-        return service_client.ResponseBody(resp, body['certificate'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/extensions_client.py b/tempest/services/compute/json/extensions_client.py
index da342a8..4741812 100644
--- a/tempest/services/compute/json/extensions_client.py
+++ b/tempest/services/compute/json/extensions_client.py
@@ -26,9 +26,9 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_extensions, resp, body)
-        return service_client.ResponseBodyList(resp, body['extensions'])
+        return service_client.ResponseBody(resp, body)
 
     def show_extension(self, extension_alias):
         resp, body = self.get('extensions/%s' % extension_alias)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['extension'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/fixed_ips_client.py b/tempest/services/compute/json/fixed_ips_client.py
index 69de79f..23401c3 100644
--- a/tempest/services/compute/json/fixed_ips_client.py
+++ b/tempest/services/compute/json/fixed_ips_client.py
@@ -26,11 +26,11 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.get_fixed_ip, resp, body)
-        return service_client.ResponseBody(resp, body['fixed_ip'])
+        return service_client.ResponseBody(resp, body)
 
-    def reserve_fixed_ip(self, fixed_ip, body):
+    def reserve_fixed_ip(self, fixed_ip, **kwargs):
         """This reserves and unreserves fixed ips."""
         url = "os-fixed-ips/%s/action" % fixed_ip
-        resp, body = self.post(url, json.dumps(body))
+        resp, body = self.post(url, json.dumps(kwargs))
         self.validate_response(schema.reserve_fixed_ip, resp, body)
         return service_client.ResponseBody(resp)
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index b928f9f..2c32d30 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -39,37 +39,32 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(_schema, resp, body)
-        return service_client.ResponseBodyList(resp, body['flavors'])
+        return service_client.ResponseBody(resp, body)
 
     def show_flavor(self, flavor_id):
         resp, body = self.get("flavors/%s" % flavor_id)
         body = json.loads(body)
         self.validate_response(schema.create_get_flavor_details, resp, body)
-        return service_client.ResponseBody(resp, body['flavor'])
+        return service_client.ResponseBody(resp, body)
 
-    def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
-        """Creates a new flavor or instance type."""
-        post_body = {
-            'name': name,
-            'ram': ram,
-            'vcpus': vcpus,
-            'disk': disk,
-            'id': flavor_id,
-        }
+    def create_flavor(self, **kwargs):
+        """Creates a new flavor or instance type.
+        Most parameters except the following are passed to the API without
+        any changes.
+        :param ephemeral: The name is changed to OS-FLV-EXT-DATA:ephemeral
+        :param is_public: The name is changed to os-flavor-access:is_public
+        """
         if kwargs.get('ephemeral'):
-            post_body['OS-FLV-EXT-DATA:ephemeral'] = kwargs.get('ephemeral')
-        if kwargs.get('swap'):
-            post_body['swap'] = kwargs.get('swap')
-        if kwargs.get('rxtx'):
-            post_body['rxtx_factor'] = kwargs.get('rxtx')
+            kwargs['OS-FLV-EXT-DATA:ephemeral'] = kwargs.pop('ephemeral')
         if kwargs.get('is_public'):
-            post_body['os-flavor-access:is_public'] = kwargs.get('is_public')
-        post_body = json.dumps({'flavor': post_body})
+            kwargs['os-flavor-access:is_public'] = kwargs.pop('is_public')
+
+        post_body = json.dumps({'flavor': kwargs})
         resp, body = self.post('flavors', post_body)
 
         body = json.loads(body)
         self.validate_response(schema.create_get_flavor_details, resp, body)
-        return service_client.ResponseBody(resp, body['flavor'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_flavor(self, flavor_id):
         """Deletes the given flavor."""
@@ -81,7 +76,7 @@
         # Did not use show_flavor(id) for verification as it gives
         # 200 ok even for deleted id. LP #981263
         # we can remove the loop here and use get by ID when bug gets sortedout
-        flavors = self.list_flavors(detail=True)
+        flavors = self.list_flavors(detail=True)['flavors']
         for flavor in flavors:
             if flavor['id'] == id:
                 return False
@@ -92,15 +87,15 @@
         """Returns the primary type of resource this client works with."""
         return 'flavor'
 
-    def set_flavor_extra_spec(self, flavor_id, specs):
+    def set_flavor_extra_spec(self, flavor_id, **kwargs):
         """Sets extra Specs to the mentioned flavor."""
-        post_body = json.dumps({'extra_specs': specs})
+        post_body = json.dumps({'extra_specs': kwargs})
         resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
                                post_body)
         body = json.loads(body)
         self.validate_response(schema_extra_specs.set_get_flavor_extra_specs,
                                resp, body)
-        return service_client.ResponseBody(resp, body['extra_specs'])
+        return service_client.ResponseBody(resp, body)
 
     def list_flavor_extra_specs(self, flavor_id):
         """Gets extra Specs details of the mentioned flavor."""
@@ -108,7 +103,7 @@
         body = json.loads(body)
         self.validate_response(schema_extra_specs.set_get_flavor_extra_specs,
                                resp, body)
-        return service_client.ResponseBody(resp, body['extra_specs'])
+        return service_client.ResponseBody(resp, body)
 
     def show_flavor_extra_spec(self, flavor_id, key):
         """Gets extra Specs key-value of the mentioned flavor and key."""
@@ -143,7 +138,7 @@
         body = json.loads(body)
         self.validate_response(schema_access.add_remove_list_flavor_access,
                                resp, body)
-        return service_client.ResponseBodyList(resp, body['flavor_access'])
+        return service_client.ResponseBody(resp, body)
 
     def add_flavor_access(self, flavor_id, tenant_id):
         """Add flavor access for the specified tenant."""
@@ -157,7 +152,7 @@
         body = json.loads(body)
         self.validate_response(schema_access.add_remove_list_flavor_access,
                                resp, body)
-        return service_client.ResponseBodyList(resp, body['flavor_access'])
+        return service_client.ResponseBody(resp, body)
 
     def remove_flavor_access(self, flavor_id, tenant_id):
         """Remove flavor access from the specified tenant."""
@@ -171,4 +166,4 @@
         body = json.loads(body)
         self.validate_response(schema_access.add_remove_list_flavor_access,
                                resp, body)
-        return service_client.ResponseBody(resp, body['flavor_access'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/floating_ip_pools_client.py b/tempest/services/compute/json/floating_ip_pools_client.py
index 1cc411b..7a4434f 100644
--- a/tempest/services/compute/json/floating_ip_pools_client.py
+++ b/tempest/services/compute/json/floating_ip_pools_client.py
@@ -21,10 +21,10 @@
 from tempest.common import service_client
 
 
-class FloatingIpPoolsClient(service_client.ServiceClient):
+class FloatingIPPoolsClient(service_client.ServiceClient):
 
     def list_floating_ip_pools(self, params=None):
-        """Returns a list of all floating IP Pools."""
+        """Gets all floating IP Pools list."""
         url = 'os-floating-ip-pools'
         if params:
             url += '?%s' % urllib.urlencode(params)
@@ -32,4 +32,4 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_floating_ip_pools, resp, body)
-        return service_client.ResponseBodyList(resp, body['floating_ip_pools'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/floating_ips_bulk_client.py b/tempest/services/compute/json/floating_ips_bulk_client.py
index c8e7350..c51f77e 100644
--- a/tempest/services/compute/json/floating_ips_bulk_client.py
+++ b/tempest/services/compute/json/floating_ips_bulk_client.py
@@ -19,7 +19,7 @@
 from tempest.common import service_client
 
 
-class FloatingIpsBulkClient(service_client.ServiceClient):
+class FloatingIPsBulkClient(service_client.ServiceClient):
 
     def create_floating_ips_bulk(self, ip_range, pool, interface):
         """Allocate floating IPs in bulk."""
@@ -32,18 +32,17 @@
         resp, body = self.post('os-floating-ips-bulk', post_body)
         body = json.loads(body)
         self.validate_response(schema.create_floating_ips_bulk, resp, body)
-        return service_client.ResponseBody(resp,
-                                           body['floating_ips_bulk_create'])
+        return service_client.ResponseBody(resp, body)
 
     def list_floating_ips_bulk(self):
-        """Returns a list of all floating IPs bulk."""
+        """Gets all floating IPs in bulk."""
         resp, body = self.get('os-floating-ips-bulk')
         body = json.loads(body)
         self.validate_response(schema.list_floating_ips_bulk, resp, body)
-        return service_client.ResponseBodyList(resp, body['floating_ip_info'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_floating_ips_bulk(self, ip_range):
-        """Deletes the provided floating IPs bulk."""
+        """Deletes the provided floating IPs in bulk."""
         post_body = json.dumps({'ip_range': ip_range})
         resp, body = self.put('os-floating-ips-bulk/delete', post_body)
         body = json.loads(body)
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
index 2193949..69d06a3 100644
--- a/tempest/services/compute/json/floating_ips_client.py
+++ b/tempest/services/compute/json/floating_ips_client.py
@@ -32,7 +32,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_floating_ips, resp, body)
-        return service_client.ResponseBodyList(resp, body['floating_ips'])
+        return service_client.ResponseBody(resp, body)
 
     def show_floating_ip(self, floating_ip_id):
         """Get the details of a floating IP."""
@@ -40,7 +40,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.create_get_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body['floating_ip'])
+        return service_client.ResponseBody(resp, body)
 
     def create_floating_ip(self, pool_name=None):
         """Allocate a floating IP to the project."""
@@ -50,7 +50,7 @@
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.validate_response(schema.create_get_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body['floating_ip'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_floating_ip(self, floating_ip_id):
         """Deletes the provided floating IP from the project."""
diff --git a/tempest/services/compute/json/hosts_client.py b/tempest/services/compute/json/hosts_client.py
index 752af68..3d3cb18 100644
--- a/tempest/services/compute/json/hosts_client.py
+++ b/tempest/services/compute/json/hosts_client.py
@@ -31,7 +31,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_hosts, resp, body)
-        return service_client.ResponseBodyList(resp, body['hosts'])
+        return service_client.ResponseBody(resp, body)
 
     def show_host(self, hostname):
         """Show detail information for the host."""
@@ -39,7 +39,7 @@
         resp, body = self.get("os-hosts/%s" % hostname)
         body = json.loads(body)
         self.validate_response(schema.get_host_detail, resp, body)
-        return service_client.ResponseBodyList(resp, body['host'])
+        return service_client.ResponseBody(resp, body)
 
     def update_host(self, hostname, **kwargs):
         """Update a host."""
@@ -62,7 +62,7 @@
         resp, body = self.get("os-hosts/%s/startup" % hostname)
         body = json.loads(body)
         self.validate_response(schema.startup_host, resp, body)
-        return service_client.ResponseBody(resp, body['host'])
+        return service_client.ResponseBody(resp, body)
 
     def shutdown_host(self, hostname):
         """Shutdown a host."""
@@ -70,7 +70,7 @@
         resp, body = self.get("os-hosts/%s/shutdown" % hostname)
         body = json.loads(body)
         self.validate_response(schema.shutdown_host, resp, body)
-        return service_client.ResponseBody(resp, body['host'])
+        return service_client.ResponseBody(resp, body)
 
     def reboot_host(self, hostname):
         """reboot a host."""
@@ -78,4 +78,4 @@
         resp, body = self.get("os-hosts/%s/reboot" % hostname)
         body = json.loads(body)
         self.validate_response(schema.reboot_host, resp, body)
-        return service_client.ResponseBody(resp, body['host'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/hypervisor_client.py b/tempest/services/compute/json/hypervisor_client.py
index e894a5c..ba06f23 100644
--- a/tempest/services/compute/json/hypervisor_client.py
+++ b/tempest/services/compute/json/hypervisor_client.py
@@ -32,39 +32,39 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(_schema, resp, body)
-        return service_client.ResponseBodyList(resp, body['hypervisors'])
+        return service_client.ResponseBody(resp, body)
 
     def show_hypervisor(self, hypervisor_id):
         """Display the details of the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s' % hypervisor_id)
         body = json.loads(body)
         self.validate_response(schema.get_hypervisor, resp, body)
-        return service_client.ResponseBody(resp, body['hypervisor'])
+        return service_client.ResponseBody(resp, body)
 
     def list_servers_on_hypervisor(self, hypervisor_name):
         """List instances belonging to the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/servers' % hypervisor_name)
         body = json.loads(body)
         self.validate_response(schema.get_hypervisors_servers, resp, body)
-        return service_client.ResponseBodyList(resp, body['hypervisors'])
+        return service_client.ResponseBody(resp, body)
 
     def show_hypervisor_statistics(self):
         """Get hypervisor statistics over all compute nodes."""
         resp, body = self.get('os-hypervisors/statistics')
         body = json.loads(body)
         self.validate_response(schema.get_hypervisor_statistics, resp, body)
-        return service_client.ResponseBody(resp, body['hypervisor_statistics'])
+        return service_client.ResponseBody(resp, body)
 
     def show_hypervisor_uptime(self, hypervisor_id):
         """Display the uptime of the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/uptime' % hypervisor_id)
         body = json.loads(body)
         self.validate_response(schema.get_hypervisor_uptime, resp, body)
-        return service_client.ResponseBody(resp, body['hypervisor'])
+        return service_client.ResponseBody(resp, body)
 
     def search_hypervisor(self, hypervisor_name):
         """Search specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/search' % hypervisor_name)
         body = json.loads(body)
         self.validate_response(schema.list_search_hypervisors, resp, body)
-        return service_client.ResponseBodyList(resp, body['hypervisors'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index b0ce2dc..99fdfe6 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -23,18 +23,10 @@
 
 class ImagesClient(service_client.ServiceClient):
 
-    def create_image(self, server_id, name, meta=None):
+    def create_image(self, server_id, **kwargs):
         """Creates an image of the original server."""
 
-        post_body = {
-            'createImage': {
-                'name': name,
-            }
-        }
-
-        if meta is not None:
-            post_body['createImage']['metadata'] = meta
-
+        post_body = {'createImage': kwargs}
         post_body = json.dumps(post_body)
         resp, body = self.post('servers/%s/action' % server_id,
                                post_body)
@@ -55,7 +47,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(_schema, resp, body)
-        return service_client.ResponseBodyList(resp, body['images'])
+        return service_client.ResponseBody(resp, body)
 
     def show_image(self, image_id):
         """Returns the details of a single image."""
@@ -63,7 +55,7 @@
         self.expected_success(200, resp.status)
         body = json.loads(body)
         self.validate_response(schema.get_image, resp, body)
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_image(self, image_id):
         """Deletes the provided image."""
@@ -76,7 +68,7 @@
         resp, body = self.get("images/%s/metadata" % image_id)
         body = json.loads(body)
         self.validate_response(schema.image_metadata, resp, body)
-        return service_client.ResponseBody(resp, body['metadata'])
+        return service_client.ResponseBody(resp, body)
 
     def set_image_metadata(self, image_id, meta):
         """Sets the metadata for an image."""
@@ -84,7 +76,7 @@
         resp, body = self.put('images/%s/metadata' % image_id, post_body)
         body = json.loads(body)
         self.validate_response(schema.image_metadata, resp, body)
-        return service_client.ResponseBody(resp, body['metadata'])
+        return service_client.ResponseBody(resp, body)
 
     def update_image_metadata(self, image_id, meta):
         """Updates the metadata for an image."""
@@ -92,14 +84,14 @@
         resp, body = self.post('images/%s/metadata' % image_id, post_body)
         body = json.loads(body)
         self.validate_response(schema.image_metadata, resp, body)
-        return service_client.ResponseBody(resp, body['metadata'])
+        return service_client.ResponseBody(resp, body)
 
     def show_image_metadata_item(self, image_id, key):
         """Returns the value for a specific image metadata key."""
         resp, body = self.get("images/%s/metadata/%s" % (image_id, key))
         body = json.loads(body)
         self.validate_response(schema.image_meta_item, resp, body)
-        return service_client.ResponseBody(resp, body['meta'])
+        return service_client.ResponseBody(resp, body)
 
     def set_image_metadata_item(self, image_id, key, meta):
         """Sets the value for a specific image metadata key."""
@@ -108,7 +100,7 @@
                               post_body)
         body = json.loads(body)
         self.validate_response(schema.image_meta_item, resp, body)
-        return service_client.ResponseBody(resp, body['meta'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_image_metadata_item(self, image_id, key):
         """Deletes a single image metadata key/value pair."""
diff --git a/tempest/services/compute/json/instance_usage_audit_log_client.py b/tempest/services/compute/json/instance_usage_audit_log_client.py
index f06a675..4d9625e 100644
--- a/tempest/services/compute/json/instance_usage_audit_log_client.py
+++ b/tempest/services/compute/json/instance_usage_audit_log_client.py
@@ -28,13 +28,11 @@
         body = json.loads(body)
         self.validate_response(schema.list_instance_usage_audit_log,
                                resp, body)
-        return service_client.ResponseBody(resp,
-                                           body["instance_usage_audit_logs"])
+        return service_client.ResponseBody(resp, body)
 
     def show_instance_usage_audit_log(self, time_before):
         url = 'os-instance_usage_audit_log/%s' % time_before
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.get_instance_usage_audit_log, resp, body)
-        return service_client.ResponseBody(resp,
-                                           body["instance_usage_audit_log"])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index e8b2b64..2e66082 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -26,32 +26,23 @@
         resp, body = self.get('servers/%s/os-interface' % server_id)
         body = json.loads(body)
         self.validate_response(schema.list_interfaces, resp, body)
-        return service_client.ResponseBodyList(resp,
-                                               body['interfaceAttachments'])
+        return service_client.ResponseBody(resp, body)
 
-    def create_interface(self, server_id, port_id=None, network_id=None,
-                         fixed_ip=None):
-        post_body = dict(interfaceAttachment=dict())
-        if port_id:
-            post_body['interfaceAttachment']['port_id'] = port_id
-        if network_id:
-            post_body['interfaceAttachment']['net_id'] = network_id
-        if fixed_ip:
-            fip = dict(ip_address=fixed_ip)
-            post_body['interfaceAttachment']['fixed_ips'] = [fip]
+    def create_interface(self, server_id, **kwargs):
+        post_body = {'interfaceAttachment': kwargs}
         post_body = json.dumps(post_body)
         resp, body = self.post('servers/%s/os-interface' % server_id,
                                body=post_body)
         body = json.loads(body)
         self.validate_response(schema.get_create_interfaces, resp, body)
-        return service_client.ResponseBody(resp, body['interfaceAttachment'])
+        return service_client.ResponseBody(resp, body)
 
     def show_interface(self, server_id, port_id):
         resp, body = self.get('servers/%s/os-interface/%s' % (server_id,
                                                               port_id))
         body = json.loads(body)
         self.validate_response(schema.get_create_interfaces, resp, body)
-        return service_client.ResponseBody(resp, body['interfaceAttachment'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_interface(self, server_id, port_id):
         resp, body = self.delete('servers/%s/os-interface/%s' % (server_id,
@@ -59,26 +50,18 @@
         self.validate_response(schema.delete_interface, resp, body)
         return service_client.ResponseBody(resp, body)
 
-    def add_fixed_ip(self, server_id, network_id):
+    def add_fixed_ip(self, server_id, **kwargs):
         """Add a fixed IP to input server instance."""
-        post_body = json.dumps({
-            'addFixedIp': {
-                'networkId': network_id
-            }
-        })
+        post_body = json.dumps({'addFixedIp': kwargs})
         resp, body = self.post('servers/%s/action' % server_id,
                                post_body)
         self.validate_response(servers_schema.server_actions_common_schema,
                                resp, body)
         return service_client.ResponseBody(resp, body)
 
-    def remove_fixed_ip(self, server_id, ip_address):
+    def remove_fixed_ip(self, server_id, **kwargs):
         """Remove input fixed IP from input server instance."""
-        post_body = json.dumps({
-            'removeFixedIp': {
-                'address': ip_address
-            }
-        })
+        post_body = json.dumps({'removeFixedIp': kwargs})
         resp, body = self.post('servers/%s/action' % server_id,
                                post_body)
         self.validate_response(servers_schema.server_actions_common_schema,
diff --git a/tempest/services/compute/json/keypairs_client.py b/tempest/services/compute/json/keypairs_client.py
index 6f819ae..2e22bc6 100644
--- a/tempest/services/compute/json/keypairs_client.py
+++ b/tempest/services/compute/json/keypairs_client.py
@@ -24,29 +24,21 @@
     def list_keypairs(self):
         resp, body = self.get("os-keypairs")
         body = json.loads(body)
-        # Each returned keypair is embedded within an unnecessary 'keypair'
-        # element which is a deviation from other resources like floating-ips,
-        # servers, etc. A bug?
-        # For now we shall adhere to the spec, but the spec for keypairs
-        # is yet to be found
         self.validate_response(schema.list_keypairs, resp, body)
-        return service_client.ResponseBodyList(resp, body['keypairs'])
+        return service_client.ResponseBody(resp, body)
 
     def show_keypair(self, keypair_name):
         resp, body = self.get("os-keypairs/%s" % keypair_name)
         body = json.loads(body)
         self.validate_response(schema.get_keypair, resp, body)
-        return service_client.ResponseBody(resp, body['keypair'])
+        return service_client.ResponseBody(resp, body)
 
-    def create_keypair(self, name, pub_key=None):
-        post_body = {'keypair': {'name': name}}
-        if pub_key:
-            post_body['keypair']['public_key'] = pub_key
-        post_body = json.dumps(post_body)
+    def create_keypair(self, **kwargs):
+        post_body = json.dumps({'keypair': kwargs})
         resp, body = self.post("os-keypairs", body=post_body)
         body = json.loads(body)
         self.validate_response(schema.create_keypair, resp, body)
-        return service_client.ResponseBody(resp, body['keypair'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_keypair(self, keypair_name):
         resp, body = self.delete("os-keypairs/%s" % keypair_name)
diff --git a/tempest/services/compute/json/networks_client.py b/tempest/services/compute/json/networks_client.py
index 6373f01..dd20ee5 100644
--- a/tempest/services/compute/json/networks_client.py
+++ b/tempest/services/compute/json/networks_client.py
@@ -24,10 +24,10 @@
         resp, body = self.get("os-networks")
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['networks'])
+        return service_client.ResponseBody(resp, body)
 
     def show_network(self, network_id):
         resp, body = self.get("os-networks/%s" % network_id)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['network'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 4ea47ed..88d0567 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -41,59 +41,11 @@
         self.validate_response(schema.get_quota_set, resp, body)
         return service_client.ResponseBody(resp, body['quota_set'])
 
-    def update_quota_set(self, tenant_id, user_id=None,
-                         force=None, injected_file_content_bytes=None,
-                         metadata_items=None, ram=None, floating_ips=None,
-                         fixed_ips=None, key_pairs=None, instances=None,
-                         security_group_rules=None, injected_files=None,
-                         cores=None, injected_file_path_bytes=None,
-                         security_groups=None):
+    def update_quota_set(self, tenant_id, user_id=None, **kwargs):
         """
         Updates the tenant's quota limits for one or more resources
         """
-        post_body = {}
-
-        if force is not None:
-            post_body['force'] = force
-
-        if injected_file_content_bytes is not None:
-            post_body['injected_file_content_bytes'] = \
-                injected_file_content_bytes
-
-        if metadata_items is not None:
-            post_body['metadata_items'] = metadata_items
-
-        if ram is not None:
-            post_body['ram'] = ram
-
-        if floating_ips is not None:
-            post_body['floating_ips'] = floating_ips
-
-        if fixed_ips is not None:
-            post_body['fixed_ips'] = fixed_ips
-
-        if key_pairs is not None:
-            post_body['key_pairs'] = key_pairs
-
-        if instances is not None:
-            post_body['instances'] = instances
-
-        if security_group_rules is not None:
-            post_body['security_group_rules'] = security_group_rules
-
-        if injected_files is not None:
-            post_body['injected_files'] = injected_files
-
-        if cores is not None:
-            post_body['cores'] = cores
-
-        if injected_file_path_bytes is not None:
-            post_body['injected_file_path_bytes'] = injected_file_path_bytes
-
-        if security_groups is not None:
-            post_body['security_groups'] = security_groups
-
-        post_body = json.dumps({'quota_set': post_body})
+        post_body = json.dumps({'quota_set': kwargs})
 
         if user_id:
             resp, body = self.put('os-quota-sets/%s?user_id=%s' %
diff --git a/tempest/services/compute/json/security_group_default_rules_client.py b/tempest/services/compute/json/security_group_default_rules_client.py
index fcc715a..6e4d1e4 100644
--- a/tempest/services/compute/json/security_group_default_rules_client.py
+++ b/tempest/services/compute/json/security_group_default_rules_client.py
@@ -22,8 +22,7 @@
 
 class SecurityGroupDefaultRulesClient(service_client.ServiceClient):
 
-    def create_security_default_group_rule(self, ip_protocol, from_port,
-                                           to_port, **kwargs):
+    def create_security_default_group_rule(self, **kwargs):
         """
         Creating security group default rules.
         ip_protocol : ip_protocol (icmp, tcp, udp).
@@ -31,20 +30,13 @@
         to_port  : Port at end of range.
         cidr     : CIDR for address range.
         """
-        post_body = {
-            'ip_protocol': ip_protocol,
-            'from_port': from_port,
-            'to_port': to_port,
-            'cidr': kwargs.get('cidr'),
-        }
-        post_body = json.dumps({'security_group_default_rule': post_body})
+        post_body = json.dumps({'security_group_default_rule': kwargs})
         url = 'os-security-group-default-rules'
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.validate_response(schema.create_get_security_group_default_rule,
                                resp, body)
-        rule = body['security_group_default_rule']
-        return service_client.ResponseBody(resp, rule)
+        return service_client.ResponseBody(resp, body)
 
     def delete_security_group_default_rule(self,
                                            security_group_default_rule_id):
@@ -61,8 +53,7 @@
         body = json.loads(body)
         self.validate_response(schema.list_security_group_default_rules,
                                resp, body)
-        rules = body['security_group_default_rules']
-        return service_client.ResponseBodyList(resp, rules)
+        return service_client.ResponseBody(resp, body)
 
     def show_security_group_default_rule(self, security_group_default_rule_id):
         """Return the details of provided Security Group default rule."""
@@ -71,5 +62,4 @@
         body = json.loads(body)
         self.validate_response(schema.create_get_security_group_default_rule,
                                resp, body)
-        rule = body['security_group_default_rule']
-        return service_client.ResponseBody(resp, rule)
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/security_group_rules_client.py b/tempest/services/compute/json/security_group_rules_client.py
new file mode 100644
index 0000000..c9096d0
--- /dev/null
+++ b/tempest/services/compute/json/security_group_rules_client.py
@@ -0,0 +1,59 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from tempest_lib import exceptions as lib_exc
+
+from tempest.api_schema.response.compute.v2_1 import security_groups as schema
+from tempest.common import service_client
+
+
+class SecurityGroupRulesClient(service_client.ServiceClient):
+
+    def create_security_group_rule(self, **kwargs):
+        """
+        Creating a new security group rules.
+        parent_group_id :ID of Security group
+        ip_protocol : ip_proto (icmp, tcp, udp).
+        from_port: Port at start of range.
+        to_port  : Port at end of range.
+        Following optional keyword arguments are accepted:
+        cidr     : CIDR for address range.
+        group_id : ID of the Source group
+        """
+        post_body = json.dumps({'security_group_rule': kwargs})
+        url = 'os-security-group-rules'
+        resp, body = self.post(url, post_body)
+        body = json.loads(body)
+        self.validate_response(schema.create_security_group_rule, resp, body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_security_group_rule(self, group_rule_id):
+        """Deletes the provided Security Group rule."""
+        resp, body = self.delete('os-security-group-rules/%s' %
+                                 group_rule_id)
+        self.validate_response(schema.delete_security_group_rule, resp, body)
+        return service_client.ResponseBody(resp, body)
+
+    def list_security_group_rules(self, security_group_id):
+        """List all rules for a security group."""
+        resp, body = self.get('os-security-groups')
+        body = json.loads(body)
+        self.validate_response(schema.list_security_groups, resp, body)
+        for sg in body['security_groups']:
+            if sg['id'] == security_group_id:
+                return service_client.ResponseBody(resp, sg)
+        raise lib_exc.NotFound('No such Security Group')
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 5a3d771..083d03a 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -33,7 +33,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_security_groups, resp, body)
-        return service_client.ResponseBodyList(resp, body['security_groups'])
+        return service_client.ResponseBody(resp, body)
 
     def show_security_group(self, security_group_id):
         """Get the details of a Security Group."""
@@ -41,43 +41,33 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.get_security_group, resp, body)
-        return service_client.ResponseBody(resp, body['security_group'])
+        return service_client.ResponseBody(resp, body)
 
-    def create_security_group(self, name, description):
+    def create_security_group(self, **kwargs):
         """
         Creates a new security group.
         name (Required): Name of security group.
         description (Required): Description of security group.
         """
-        post_body = {
-            'name': name,
-            'description': description,
-        }
-        post_body = json.dumps({'security_group': post_body})
+        post_body = json.dumps({'security_group': kwargs})
         resp, body = self.post('os-security-groups', post_body)
         body = json.loads(body)
         self.validate_response(schema.get_security_group, resp, body)
-        return service_client.ResponseBody(resp, body['security_group'])
+        return service_client.ResponseBody(resp, body)
 
-    def update_security_group(self, security_group_id, name=None,
-                              description=None):
+    def update_security_group(self, security_group_id, **kwargs):
         """
         Update a security group.
         security_group_id: a security_group to update
         name: new name of security group
         description: new description of security group
         """
-        post_body = {}
-        if name:
-            post_body['name'] = name
-        if description:
-            post_body['description'] = description
-        post_body = json.dumps({'security_group': post_body})
+        post_body = json.dumps({'security_group': kwargs})
         resp, body = self.put('os-security-groups/%s' % security_group_id,
                               post_body)
         body = json.loads(body)
         self.validate_response(schema.update_security_group, resp, body)
-        return service_client.ResponseBody(resp, body['security_group'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_security_group(self, security_group_id):
         """Deletes the provided Security Group."""
@@ -86,50 +76,6 @@
         self.validate_response(schema.delete_security_group, resp, body)
         return service_client.ResponseBody(resp, body)
 
-    def create_security_group_rule(self, parent_group_id, ip_proto, from_port,
-                                   to_port, **kwargs):
-        """
-        Creating a new security group rules.
-        parent_group_id :ID of Security group
-        ip_protocol : ip_proto (icmp, tcp, udp).
-        from_port: Port at start of range.
-        to_port  : Port at end of range.
-        Following optional keyword arguments are accepted:
-        cidr     : CIDR for address range.
-        group_id : ID of the Source group
-        """
-        post_body = {
-            'parent_group_id': parent_group_id,
-            'ip_protocol': ip_proto,
-            'from_port': from_port,
-            'to_port': to_port,
-            'cidr': kwargs.get('cidr'),
-            'group_id': kwargs.get('group_id'),
-        }
-        post_body = json.dumps({'security_group_rule': post_body})
-        url = 'os-security-group-rules'
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.create_security_group_rule, resp, body)
-        return service_client.ResponseBody(resp, body['security_group_rule'])
-
-    def delete_security_group_rule(self, group_rule_id):
-        """Deletes the provided Security Group rule."""
-        resp, body = self.delete('os-security-group-rules/%s' %
-                                 group_rule_id)
-        self.validate_response(schema.delete_security_group_rule, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_security_group_rules(self, security_group_id):
-        """List all rules for a security group."""
-        resp, body = self.get('os-security-groups')
-        body = json.loads(body)
-        self.validate_response(schema.list_security_groups, resp, body)
-        for sg in body['security_groups']:
-            if sg['id'] == security_group_id:
-                return service_client.ResponseBodyList(resp, sg['rules'])
-        raise lib_exc.NotFound('No such Security Group')
-
     def is_resource_deleted(self, id):
         try:
             self.show_security_group(id)
diff --git a/tempest/services/compute/json/server_groups_client.py b/tempest/services/compute/json/server_groups_client.py
index 9fec930..30e9e5b 100644
--- a/tempest/services/compute/json/server_groups_client.py
+++ b/tempest/services/compute/json/server_groups_client.py
@@ -38,7 +38,7 @@
 
         body = json.loads(body)
         self.validate_response(schema.create_get_server_group, resp, body)
-        return service_client.ResponseBody(resp, body['server_group'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_server_group(self, server_group_id):
         """Delete the given server-group."""
@@ -51,11 +51,11 @@
         resp, body = self.get("os-server-groups")
         body = json.loads(body)
         self.validate_response(schema.list_server_groups, resp, body)
-        return service_client.ResponseBodyList(resp, body['server_groups'])
+        return service_client.ResponseBody(resp, body)
 
     def get_server_group(self, server_group_id):
         """Get the details of given server_group."""
         resp, body = self.get("os-server-groups/%s" % server_group_id)
         body = json.loads(body)
         self.validate_response(schema.create_get_server_group, resp, body)
-        return service_client.ResponseBody(resp, body['server_group'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 1159a58..671450a 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -14,15 +14,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import time
-
 from oslo_serialization import jsonutils as json
 from six.moves.urllib import parse as urllib
-from tempest_lib import exceptions as lib_exc
 
 from tempest.api_schema.response.compute.v2_1 import servers as schema
 from tempest.common import service_client
-from tempest import exceptions
 
 
 class ServersClient(service_client.ServiceClient):
@@ -165,24 +161,6 @@
         self.validate_response(_schema, resp, body)
         return service_client.ResponseBody(resp, body)
 
-    def wait_for_server_termination(self, server_id, ignore_error=False):
-        """Waits for server to reach termination."""
-        start_time = int(time.time())
-        while True:
-            try:
-                body = self.show_server(server_id)
-            except lib_exc.NotFound:
-                return
-
-            server_status = body['status']
-            if server_status == 'ERROR' and not ignore_error:
-                raise exceptions.BuildErrorException(server_id=server_id)
-
-            if int(time.time()) - start_time >= self.build_timeout:
-                raise exceptions.TimeoutException
-
-            time.sleep(self.build_interval)
-
     def list_addresses(self, server_id):
         """Lists all addresses for a server."""
         resp, body = self.get("servers/%s/ips" % server_id)
@@ -206,15 +184,7 @@
                                post_body)
         if response_key is not None:
             body = json.loads(body)
-            # Check for Schema as 'None' because if we do not have any server
-            # action schema implemented yet then they can pass 'None' to skip
-            # the validation.Once all server action has their schema
-            # implemented then, this check can be removed if every actions are
-            # supposed to validate their response.
-            # TODO(GMann): Remove the below 'if' check once all server actions
-            # schema are implemented.
-            if schema is not None:
-                self.validate_response(schema, resp, body)
+            self.validate_response(schema, resp, body)
             body = body[response_key]
         else:
             self.validate_response(schema, resp, body)
@@ -251,16 +221,19 @@
                                resp, body)
         return service_client.ResponseBody(resp, body)
 
-    def reboot(self, server_id, reboot_type):
+    def reboot_server(self, server_id, reboot_type):
         """Reboots a server."""
         return self.action(server_id, 'reboot', None, type=reboot_type)
 
-    def rebuild(self, server_id, image_ref, **kwargs):
-        """Rebuilds a server with a new image."""
+    def rebuild_server(self, server_id, image_ref, **kwargs):
+        """Rebuilds a server with a new image.
+        Most parameters except the following are passed to the API without
+        any changes.
+        :param disk_config: The name is changed to OS-DCF:diskConfig
+        """
         kwargs['imageRef'] = image_ref
         if 'disk_config' in kwargs:
-            kwargs['OS-DCF:diskConfig'] = kwargs['disk_config']
-            del kwargs['disk_config']
+            kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
         if self.enable_instance_password:
             rebuild_schema = schema.rebuild_server_with_admin_pass
         else:
@@ -268,12 +241,15 @@
         return self.action(server_id, 'rebuild', 'server',
                            rebuild_schema, **kwargs)
 
-    def resize(self, server_id, flavor_ref, **kwargs):
-        """Changes the flavor of a server."""
+    def resize_server(self, server_id, flavor_ref, **kwargs):
+        """Changes the flavor of a server.
+        Most parameters except the following are passed to the API without
+        any changes.
+        :param disk_config: The name is changed to OS-DCF:diskConfig
+        """
         kwargs['flavorRef'] = flavor_ref
         if 'disk_config' in kwargs:
-            kwargs['OS-DCF:diskConfig'] = kwargs['disk_config']
-            del kwargs['disk_config']
+            kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
         return self.action(server_id, 'resize', None, **kwargs)
 
     def confirm_resize(self, server_id, **kwargs):
@@ -341,14 +317,9 @@
     def start(self, server_id, **kwargs):
         return self.action(server_id, 'os-start', None, **kwargs)
 
-    def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
+    def attach_volume(self, server_id, **kwargs):
         """Attaches a volume to a server instance."""
-        post_body = json.dumps({
-            'volumeAttachment': {
-                'volumeId': volume_id,
-                'device': device,
-            }
-        })
+        post_body = json.dumps({'volumeAttachment': kwargs})
         resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
                                post_body)
         body = json.loads(body)
@@ -386,16 +357,10 @@
         """Removes a security group from the server."""
         return self.action(server_id, 'removeSecurityGroup', None, name=name)
 
-    def live_migrate_server(self, server_id, dest_host, use_block_migration):
+    def live_migrate_server(self, server_id, **kwargs):
         """This should be called with administrator privileges ."""
 
-        migrate_params = {
-            "disk_over_commit": False,
-            "block_migration": use_block_migration,
-            "host": dest_host
-        }
-
-        req_body = json.dumps({'os-migrateLive': migrate_params})
+        req_body = json.dumps({'os-migrateLive': kwargs})
 
         resp, body = self.post("servers/%s/action" % server_id, req_body)
         self.validate_response(schema.server_actions_common_schema,
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
index 699d3e7..232b301 100644
--- a/tempest/services/compute/json/services_client.py
+++ b/tempest/services/compute/json/services_client.py
@@ -31,7 +31,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_services, resp, body)
-        return service_client.ResponseBodyList(resp, body['services'])
+        return service_client.ResponseBody(resp, body)
 
     def enable_service(self, host_name, binary):
         """
@@ -43,7 +43,7 @@
         resp, body = self.put('os-services/enable', post_body)
         body = json.loads(body)
         self.validate_response(schema.enable_service, resp, body)
-        return service_client.ResponseBody(resp, body['service'])
+        return service_client.ResponseBody(resp, body)
 
     def disable_service(self, host_name, binary):
         """
@@ -54,4 +54,4 @@
         post_body = json.dumps({'binary': binary, 'host': host_name})
         resp, body = self.put('os-services/disable', post_body)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['service'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/tenant_usages_client.py b/tempest/services/compute/json/tenant_usages_client.py
index 72fcde2..73b4706 100644
--- a/tempest/services/compute/json/tenant_usages_client.py
+++ b/tempest/services/compute/json/tenant_usages_client.py
@@ -30,7 +30,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.list_tenant_usage, resp, body)
-        return service_client.ResponseBodyList(resp, body['tenant_usages'][0])
+        return service_client.ResponseBody(resp, body)
 
     def show_tenant_usage(self, tenant_id, **params):
         url = 'os-simple-tenant-usage/%s' % tenant_id
@@ -40,4 +40,4 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.get_tenant_usage, resp, body)
-        return service_client.ResponseBodyList(resp, body['tenant_usage'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index ac55049..c86aaff 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -23,12 +23,6 @@
 
 class VolumesExtensionsClient(service_client.ServiceClient):
 
-    def __init__(self, auth_provider, service, region,
-                 default_volume_size=1, **kwargs):
-        super(VolumesExtensionsClient, self).__init__(
-            auth_provider, service, region, **kwargs)
-        self.default_volume_size = default_volume_size
-
     def list_volumes(self, detail=False, **params):
         """List all the volumes created."""
         url = 'os-volumes'
@@ -51,7 +45,7 @@
         self.validate_response(schema.create_get_volume, resp, body)
         return service_client.ResponseBody(resp, body['volume'])
 
-    def create_volume(self, size=None, **kwargs):
+    def create_volume(self, **kwargs):
         """
         Creates a new Volume.
         size(Required): Size of volume in GB.
@@ -59,14 +53,7 @@
         display_name: Optional Volume Name.
         metadata: A dictionary of values to be used as metadata.
         """
-        if size is None:
-            size = self.default_volume_size
-        post_body = {
-            'size': size
-        }
-        post_body.update(kwargs)
-
-        post_body = json.dumps({'volume': post_body})
+        post_body = json.dumps({'volume': kwargs})
         resp, body = self.post('os-volumes', post_body)
         body = json.loads(body)
         self.validate_response(schema.create_get_volume, resp, body)
diff --git a/tempest/services/data_processing/v1_1/data_processing_client.py b/tempest/services/data_processing/v1_1/data_processing_client.py
index bbc0f2a..cba4c42 100644
--- a/tempest/services/data_processing/v1_1/data_processing_client.py
+++ b/tempest/services/data_processing/v1_1/data_processing_client.py
@@ -39,8 +39,8 @@
         self.expected_success(resp_status, resp.status)
         return resp, body
 
-    def _request_check_and_parse_resp(self, request_func, uri, resp_status,
-                                      resource_name, *args, **kwargs):
+    def _request_check_and_parse_resp(self, request_func, uri,
+                                      resp_status, *args, **kwargs):
         """Make a request using specified request_func, check response status
         code and parse response body.
 
@@ -50,36 +50,19 @@
         resp, body = request_func(uri, headers=headers, *args, **kwargs)
         self.expected_success(resp_status, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body[resource_name])
-
-    def _request_check_and_parse_resp_list(self, request_func, uri,
-                                           resp_status, resource_name,
-                                           *args, **kwargs):
-        """Make a request using specified request_func, check response status
-        code and parse response body.
-
-        It returns a ResponseBodyList.
-        """
-        headers = {'Content-Type': 'application/json'}
-        resp, body = request_func(uri, headers=headers, *args, **kwargs)
-        self.expected_success(resp_status, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body[resource_name])
+        return service_client.ResponseBody(resp, body)
 
     def list_node_group_templates(self):
         """List all node group templates for a user."""
 
         uri = 'node-group-templates'
-        return self._request_check_and_parse_resp_list(self.get, uri,
-                                                       200,
-                                                       'node_group_templates')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_node_group_template(self, tmpl_id):
         """Returns the details of a single node group template."""
 
         uri = 'node-group-templates/%s' % tmpl_id
-        return self._request_check_and_parse_resp(self.get, uri,
-                                                  200, 'node_group_template')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_node_group_template(self, name, plugin_name, hadoop_version,
                                    node_processes, flavor_id,
@@ -100,7 +83,6 @@
             'node_configs': node_configs or dict(),
         })
         return self._request_check_and_parse_resp(self.post, uri, 202,
-                                                  'node_group_template',
                                                   body=json.dumps(body))
 
     def delete_node_group_template(self, tmpl_id):
@@ -113,8 +95,7 @@
         """List all enabled plugins."""
 
         uri = 'plugins'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200, 'plugins')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_plugin(self, plugin_name, plugin_version=None):
         """Returns the details of a single plugin."""
@@ -122,22 +103,19 @@
         uri = 'plugins/%s' % plugin_name
         if plugin_version:
             uri += '/%s' % plugin_version
-        return self._request_check_and_parse_resp(self.get, uri, 200, 'plugin')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def list_cluster_templates(self):
         """List all cluster templates for a user."""
 
         uri = 'cluster-templates'
-        return self._request_check_and_parse_resp_list(self.get, uri,
-                                                       200,
-                                                       'cluster_templates')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_cluster_template(self, tmpl_id):
         """Returns the details of a single cluster template."""
 
         uri = 'cluster-templates/%s' % tmpl_id
-        return self._request_check_and_parse_resp(self.get,
-                                                  uri, 200, 'cluster_template')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_cluster_template(self, name, plugin_name, hadoop_version,
                                 node_groups, cluster_configs=None,
@@ -157,7 +135,6 @@
             'cluster_configs': cluster_configs or dict(),
         })
         return self._request_check_and_parse_resp(self.post, uri, 202,
-                                                  'cluster_template',
                                                   body=json.dumps(body))
 
     def delete_cluster_template(self, tmpl_id):
@@ -170,16 +147,13 @@
         """List all data sources for a user."""
 
         uri = 'data-sources'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200,
-                                                       'data_sources')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_data_source(self, source_id):
         """Returns the details of a single data source."""
 
         uri = 'data-sources/%s' % source_id
-        return self._request_check_and_parse_resp(self.get,
-                                                  uri, 200, 'data_source')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_data_source(self, name, data_source_type, url, **kwargs):
         """Creates data source with specified params.
@@ -195,8 +169,7 @@
             'url': url
         })
         return self._request_check_and_parse_resp(self.post, uri,
-                                                  202, 'data_source',
-                                                  body=json.dumps(body))
+                                                  202, body=json.dumps(body))
 
     def delete_data_source(self, source_id):
         """Deletes the specified data source by id."""
@@ -208,22 +181,19 @@
         """List all job binary internals for a user."""
 
         uri = 'job-binary-internals'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200, 'binaries')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_job_binary_internal(self, job_binary_id):
         """Returns the details of a single job binary internal."""
 
         uri = 'job-binary-internals/%s' % job_binary_id
-        return self._request_check_and_parse_resp(self.get, uri,
-                                                  200, 'job_binary_internal')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_job_binary_internal(self, name, data):
         """Creates job binary internal with specified params."""
 
         uri = 'job-binary-internals/%s' % name
-        return self._request_check_and_parse_resp(self.put, uri, 202,
-                                                  'job_binary_internal', data)
+        return self._request_check_and_parse_resp(self.put, uri, 202, data)
 
     def delete_job_binary_internal(self, job_binary_id):
         """Deletes the specified job binary internal by id."""
@@ -241,15 +211,13 @@
         """List all job binaries for a user."""
 
         uri = 'job-binaries'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200, 'binaries')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_job_binary(self, job_binary_id):
         """Returns the details of a single job binary."""
 
         uri = 'job-binaries/%s' % job_binary_id
-        return self._request_check_and_parse_resp(self.get,
-                                                  uri, 200, 'job_binary')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_job_binary(self, name, url, extra=None, **kwargs):
         """Creates job binary with specified params.
@@ -265,8 +233,7 @@
             'extra': extra or dict(),
         })
         return self._request_check_and_parse_resp(self.post, uri,
-                                                  202, 'job_binary',
-                                                  body=json.dumps(body))
+                                                  202, body=json.dumps(body))
 
     def delete_job_binary(self, job_binary_id):
         """Deletes the specified job binary by id."""
@@ -284,14 +251,13 @@
         """List all jobs for a user."""
 
         uri = 'jobs'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200, 'jobs')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_job(self, job_id):
         """Returns the details of a single job."""
 
         uri = 'jobs/%s' % job_id
-        return self._request_check_and_parse_resp(self.get, uri, 200, 'job')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_job(self, name, job_type, mains, libs=None, **kwargs):
         """Creates job with specified params.
@@ -307,8 +273,8 @@
             'mains': mains,
             'libs': libs or list(),
         })
-        return self._request_check_and_parse_resp(self.post, uri, 202,
-                                                  'job', body=json.dumps(body))
+        return self._request_check_and_parse_resp(self.post, uri,
+                                                  202, body=json.dumps(body))
 
     def delete_job(self, job_id):
         """Deletes the specified job by id."""
diff --git a/tempest/services/database/json/limits_client.py b/tempest/services/database/json/limits_client.py
index 830b67f..9358a33 100644
--- a/tempest/services/database/json/limits_client.py
+++ b/tempest/services/database/json/limits_client.py
@@ -27,4 +27,4 @@
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, self._parse_resp(body))
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/database/json/versions_client.py b/tempest/services/database/json/versions_client.py
index ed1ba81..43d253b 100644
--- a/tempest/services/database/json/versions_client.py
+++ b/tempest/services/database/json/versions_client.py
@@ -43,4 +43,4 @@
 
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, self._parse_resp(body))
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/identity_client.py b/tempest/services/identity/v2/json/identity_client.py
index 1076fca..e6416d6 100644
--- a/tempest/services/identity/v2/json/identity_client.py
+++ b/tempest/services/identity/v2/json/identity_client.py
@@ -56,7 +56,7 @@
         resp, body = self.get('OS-KSADM/roles/%s' % role_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
+        return service_client.ResponseBody(resp, body)
 
     def create_tenant(self, name, **kwargs):
         """
@@ -125,10 +125,10 @@
         resp, body = self.get('tenants')
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['tenants'])
+        return service_client.ResponseBody(resp, body)
 
     def get_tenant_by_name(self, tenant_name):
-        tenants = self.list_tenants()
+        tenants = self.list_tenants()['tenants']
         for tenant in tenants:
             if tenant['name'] == tenant_name:
                 return tenant
@@ -259,6 +259,33 @@
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
+    def create_endpoint(self, service_id, region_id, **kwargs):
+        """Create an endpoint for service."""
+        post_body = {
+            'service_id': service_id,
+            'region': region_id,
+            'publicurl': kwargs.get('publicurl'),
+            'adminurl': kwargs.get('adminurl'),
+            'internalurl': kwargs.get('internalurl')
+        }
+        post_body = json.dumps({'endpoint': post_body})
+        resp, body = self.post('/endpoints', post_body)
+        self.expected_success(200, resp.status)
+        return service_client.ResponseBody(resp, self._parse_resp(body))
+
+    def list_endpoints(self):
+        """List Endpoints - Returns Endpoints."""
+        resp, body = self.get('/endpoints')
+        self.expected_success(200, resp.status)
+        return service_client.ResponseBodyList(resp, self._parse_resp(body))
+
+    def delete_endpoint(self, endpoint_id):
+        """Delete an endpoint."""
+        url = '/endpoints/%s' % endpoint_id
+        resp, body = self.delete(url)
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
     def update_user_password(self, user_id, new_pass):
         """Update User Password."""
         put_body = {
diff --git a/tempest/services/identity/v3/json/credentials_client.py b/tempest/services/identity/v3/json/credentials_client.py
index e27f960..decf3a8 100644
--- a/tempest/services/identity/v3/json/credentials_client.py
+++ b/tempest/services/identity/v3/json/credentials_client.py
@@ -36,11 +36,11 @@
         self.expected_success(201, resp.status)
         body = json.loads(body)
         body['credential']['blob'] = json.loads(body['credential']['blob'])
-        return service_client.ResponseBody(resp, body['credential'])
+        return service_client.ResponseBody(resp, body)
 
     def update_credential(self, credential_id, **kwargs):
         """Updates a credential."""
-        body = self.get_credential(credential_id)
+        body = self.get_credential(credential_id)['credential']
         cred_type = kwargs.get('type', body['type'])
         access_key = kwargs.get('access_key', body['blob']['access'])
         secret_key = kwargs.get('secret_key', body['blob']['secret'])
@@ -59,7 +59,7 @@
         self.expected_success(200, resp.status)
         body = json.loads(body)
         body['credential']['blob'] = json.loads(body['credential']['blob'])
-        return service_client.ResponseBody(resp, body['credential'])
+        return service_client.ResponseBody(resp, body)
 
     def get_credential(self, credential_id):
         """To GET Details of a credential."""
@@ -67,14 +67,14 @@
         self.expected_success(200, resp.status)
         body = json.loads(body)
         body['credential']['blob'] = json.loads(body['credential']['blob'])
-        return service_client.ResponseBody(resp, body['credential'])
+        return service_client.ResponseBody(resp, body)
 
     def list_credentials(self):
         """Lists out all the available credentials."""
         resp, body = self.get('credentials')
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['credentials'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_credential(self, credential_id):
         """Deletes a credential."""
diff --git a/tempest/services/identity/v3/json/endpoints_client.py b/tempest/services/identity/v3/json/endpoints_client.py
index f93fb74..6bdf8b3 100644
--- a/tempest/services/identity/v3/json/endpoints_client.py
+++ b/tempest/services/identity/v3/json/endpoints_client.py
@@ -26,7 +26,7 @@
         resp, body = self.get('endpoints')
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['endpoints'])
+        return service_client.ResponseBody(resp, body)
 
     def create_endpoint(self, service_id, interface, url, **kwargs):
         """Create endpoint.
@@ -51,7 +51,7 @@
         resp, body = self.post('endpoints', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['endpoint'])
+        return service_client.ResponseBody(resp, body)
 
     def update_endpoint(self, endpoint_id, service_id=None, interface=None,
                         url=None, region=None, enabled=None, **kwargs):
@@ -78,7 +78,7 @@
         resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['endpoint'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_endpoint(self, endpoint_id):
         """Delete endpoint."""
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 87d4b79..9a60a24 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -49,11 +49,11 @@
         resp, body = self.post('users', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['user'])
+        return service_client.ResponseBody(resp, body)
 
     def update_user(self, user_id, name, **kwargs):
         """Updates a user."""
-        body = self.get_user(user_id)
+        body = self.get_user(user_id)['user']
         email = kwargs.get('email', body['email'])
         en = kwargs.get('enabled', body['enabled'])
         project_id = kwargs.get('project_id', body['project_id'])
@@ -78,7 +78,7 @@
         resp, body = self.patch('users/%s' % user_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['user'])
+        return service_client.ResponseBody(resp, body)
 
     def update_user_password(self, user_id, password, original_password):
         """Updates a user password."""
@@ -96,7 +96,7 @@
         resp, body = self.get('users/%s/projects' % user_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['projects'])
+        return service_client.ResponseBody(resp, body)
 
     def get_users(self, params=None):
         """Get the list of users."""
@@ -106,14 +106,14 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['users'])
+        return service_client.ResponseBody(resp, body)
 
     def get_user(self, user_id):
         """GET a user."""
         resp, body = self.get("users/%s" % user_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['user'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_user(self, user_id):
         """Deletes a User."""
@@ -136,7 +136,7 @@
         resp, body = self.post('projects', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['project'])
+        return service_client.ResponseBody(resp, body)
 
     def list_projects(self, params=None):
         url = "projects"
@@ -145,10 +145,10 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['projects'])
+        return service_client.ResponseBody(resp, body)
 
     def update_project(self, project_id, **kwargs):
-        body = self.get_project(project_id)
+        body = self.get_project(project_id)['project']
         name = kwargs.get('name', body['name'])
         desc = kwargs.get('description', body['description'])
         en = kwargs.get('enabled', body['enabled'])
@@ -164,14 +164,14 @@
         resp, body = self.patch('projects/%s' % project_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['project'])
+        return service_client.ResponseBody(resp, body)
 
     def get_project(self, project_id):
         """GET a Project."""
         resp, body = self.get("projects/%s" % project_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['project'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_project(self, project_id):
         """Delete a project."""
@@ -188,21 +188,21 @@
         resp, body = self.post('roles', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
+        return service_client.ResponseBody(resp, body)
 
     def get_role(self, role_id):
         """GET a Role."""
         resp, body = self.get('roles/%s' % str(role_id))
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
+        return service_client.ResponseBody(resp, body)
 
     def list_roles(self):
         """Get the list of Roles."""
         resp, body = self.get("roles")
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
+        return service_client.ResponseBody(resp, body)
 
     def update_role(self, name, role_id):
         """Create a Role."""
@@ -213,7 +213,7 @@
         resp, body = self.patch('roles/%s' % str(role_id), post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_role(self, role_id):
         """Delete a role."""
@@ -241,7 +241,7 @@
         resp, body = self.post('domains', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['domain'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_domain(self, domain_id):
         """Delete a domain."""
@@ -257,11 +257,11 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['domains'])
+        return service_client.ResponseBody(resp, body)
 
     def update_domain(self, domain_id, **kwargs):
         """Updates a domain."""
-        body = self.get_domain(domain_id)
+        body = self.get_domain(domain_id)['domain']
         description = kwargs.get('description', body['description'])
         en = kwargs.get('enabled', body['enabled'])
         name = kwargs.get('name', body['name'])
@@ -274,14 +274,14 @@
         resp, body = self.patch('domains/%s' % domain_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['domain'])
+        return service_client.ResponseBody(resp, body)
 
     def get_domain(self, domain_id):
         """Get Domain details."""
         resp, body = self.get('domains/%s' % domain_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['domain'])
+        return service_client.ResponseBody(resp, body)
 
     def get_token(self, resp_token):
         """Get token details."""
@@ -289,7 +289,7 @@
         resp, body = self.get("auth/tokens", headers=headers)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['token'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_token(self, resp_token):
         """Deletes token."""
@@ -313,25 +313,25 @@
         resp, body = self.post('groups', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['group'])
+        return service_client.ResponseBody(resp, body)
 
     def get_group(self, group_id):
         """Get group details."""
         resp, body = self.get('groups/%s' % group_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['group'])
+        return service_client.ResponseBody(resp, body)
 
     def list_groups(self):
         """Lists the groups."""
         resp, body = self.get('groups')
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['groups'])
+        return service_client.ResponseBody(resp, body)
 
     def update_group(self, group_id, **kwargs):
         """Updates a group."""
-        body = self.get_group(group_id)
+        body = self.get_group(group_id)['group']
         name = kwargs.get('name', body['name'])
         description = kwargs.get('description', body['description'])
         post_body = {
@@ -342,7 +342,7 @@
         resp, body = self.patch('groups/%s' % group_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['group'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_group(self, group_id):
         """Delete a group."""
@@ -362,14 +362,14 @@
         resp, body = self.get('groups/%s/users' % group_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['users'])
+        return service_client.ResponseBody(resp, body)
 
     def list_user_groups(self, user_id):
         """Lists groups which a user belongs to."""
         resp, body = self.get('users/%s/groups' % user_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['groups'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_group_user(self, group_id, user_id):
         """Delete user in group."""
@@ -397,7 +397,7 @@
                               (project_id, user_id))
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
+        return service_client.ResponseBody(resp, body)
 
     def list_user_roles_on_domain(self, domain_id, user_id):
         """list roles of a user on a domain."""
@@ -405,7 +405,7 @@
                               (domain_id, user_id))
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
+        return service_client.ResponseBody(resp, body)
 
     def revoke_role_from_user_on_project(self, project_id, user_id, role_id):
         """Delete role of a user on a project."""
@@ -441,7 +441,7 @@
                               (project_id, group_id))
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
+        return service_client.ResponseBody(resp, body)
 
     def list_group_roles_on_domain(self, domain_id, group_id):
         """list roles of a user on a domain."""
@@ -449,7 +449,7 @@
                               (domain_id, group_id))
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
+        return service_client.ResponseBody(resp, body)
 
     def revoke_role_from_group_on_project(self, project_id, group_id, role_id):
         """Delete role of a user on a project."""
@@ -481,7 +481,7 @@
         resp, body = self.post('OS-TRUST/trusts', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['trust'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_trust(self, trust_id):
         """Deletes a trust."""
@@ -501,21 +501,21 @@
             resp, body = self.get("OS-TRUST/trusts")
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['trusts'])
+        return service_client.ResponseBody(resp, body)
 
     def get_trust(self, trust_id):
         """GET trust."""
         resp, body = self.get("OS-TRUST/trusts/%s" % trust_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['trust'])
+        return service_client.ResponseBody(resp, body)
 
     def get_trust_roles(self, trust_id):
         """GET roles delegated by a trust."""
         resp, body = self.get("OS-TRUST/trusts/%s/roles" % trust_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['roles'])
+        return service_client.ResponseBody(resp, body)
 
     def get_trust_role(self, trust_id, role_id):
         """GET role delegated by a trust."""
@@ -523,7 +523,7 @@
                               % (trust_id, role_id))
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['role'])
+        return service_client.ResponseBody(resp, body)
 
     def check_trust_role(self, trust_id, role_id):
         """HEAD Check if role is delegated by a trust."""
diff --git a/tempest/services/identity/v3/json/policy_client.py b/tempest/services/identity/v3/json/policy_client.py
index f820598..3231bb0 100644
--- a/tempest/services/identity/v3/json/policy_client.py
+++ b/tempest/services/identity/v3/json/policy_client.py
@@ -31,14 +31,14 @@
         resp, body = self.post('policies', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['policy'])
+        return service_client.ResponseBody(resp, body)
 
     def list_policies(self):
         """Lists the policies."""
         resp, body = self.get('policies')
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['policies'])
+        return service_client.ResponseBody(resp, body)
 
     def get_policy(self, policy_id):
         """Lists out the given policy."""
@@ -46,7 +46,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['policy'])
+        return service_client.ResponseBody(resp, body)
 
     def update_policy(self, policy_id, **kwargs):
         """Updates a policy."""
@@ -59,7 +59,7 @@
         resp, body = self.patch(url, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['policy'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_policy(self, policy_id):
         """Deletes the policy."""
diff --git a/tempest/services/identity/v3/json/region_client.py b/tempest/services/identity/v3/json/region_client.py
index 43226be..24c6f33 100644
--- a/tempest/services/identity/v3/json/region_client.py
+++ b/tempest/services/identity/v3/json/region_client.py
@@ -37,7 +37,7 @@
             resp, body = self.post('regions', req_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['region'])
+        return service_client.ResponseBody(resp, body)
 
     def update_region(self, region_id, **kwargs):
         """Updates a region."""
@@ -50,7 +50,7 @@
         resp, body = self.patch('regions/%s' % region_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['region'])
+        return service_client.ResponseBody(resp, body)
 
     def get_region(self, region_id):
         """Get region."""
@@ -58,7 +58,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['region'])
+        return service_client.ResponseBody(resp, body)
 
     def list_regions(self, params=None):
         """List regions."""
@@ -68,7 +68,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['regions'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_region(self, region_id):
         """Delete region."""
diff --git a/tempest/services/identity/v3/json/service_client.py b/tempest/services/identity/v3/json/service_client.py
index 52ff479..2acc3a8 100644
--- a/tempest/services/identity/v3/json/service_client.py
+++ b/tempest/services/identity/v3/json/service_client.py
@@ -23,7 +23,7 @@
 
     def update_service(self, service_id, **kwargs):
         """Updates a service."""
-        body = self.get_service(service_id)
+        body = self.get_service(service_id)['service']
         name = kwargs.get('name', body['name'])
         type = kwargs.get('type', body['type'])
         desc = kwargs.get('description', body['description'])
@@ -36,7 +36,7 @@
         resp, body = self.patch('services/%s' % service_id, patch_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['service'])
+        return service_client.ResponseBody(resp, body)
 
     def get_service(self, service_id):
         """Get Service."""
@@ -44,7 +44,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['service'])
+        return service_client.ResponseBody(resp, body)
 
     def create_service(self, serv_type, name=None, description=None,
                        enabled=True):
@@ -58,7 +58,7 @@
         resp, body = self.post("services", body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body["service"])
+        return service_client.ResponseBody(resp, body)
 
     def delete_service(self, serv_id):
         url = "services/" + serv_id
@@ -70,4 +70,4 @@
         resp, body = self.get('services')
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['services'])
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index a07612a..d97da36 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -130,7 +130,7 @@
         self._error_checker('POST', '/v1/images', headers, data, resp,
                             body_iter)
         body = json.loads(''.join([c for c in body_iter]))
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     def _update_with_data(self, image_id, headers, data):
         url = '/v1/images/%s' % image_id
@@ -139,7 +139,7 @@
         self._error_checker('PUT', url, headers, data,
                             resp, body_iter)
         body = json.loads(''.join([c for c in body_iter]))
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     @property
     def http(self):
@@ -169,7 +169,7 @@
         resp, body = self.post('v1/images', None, headers)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     def update_image(self, image_id, name=None, container_format=None,
                      data=None, properties=None):
@@ -193,7 +193,7 @@
         resp, body = self.put(url, data, headers)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_image(self, image_id):
         url = 'v1/images/%s' % image_id
@@ -223,7 +223,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['images'])
+        return service_client.ResponseBody(resp, body)
 
     def get_image_meta(self, image_id):
         url = 'v1/images/%s' % image_id
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index 67f7708..c5aa41a 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -123,7 +123,7 @@
         self.expected_success(200, resp.status)
         body = json.loads(body)
         self._validate_schema(body, type='images')
-        return service_client.ResponseBodyList(resp, body['images'])
+        return service_client.ResponseBody(resp, body)
 
     def show_image(self, image_id):
         url = 'v2/images/%s' % image_id
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 65f3aa7..ce200d2 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -39,46 +39,12 @@
     version = '2.0'
     uri_prefix = "v2.0"
 
-    def get_uri(self, plural_name):
-        # get service prefix from resource name
-
-        # the following map is used to construct proper URI
-        # for the given neutron resource
-        service_resource_prefix_map = {
-            'networks': '',
-            'subnets': '',
-            'ports': '',
-            'metering_labels': 'metering',
-            'metering_label_rules': 'metering',
-        }
-        service_prefix = service_resource_prefix_map.get(
-            plural_name)
-        plural_name = plural_name.replace("_", "-")
-        if service_prefix:
-            uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
-                                plural_name)
-        else:
-            uri = '%s/%s' % (self.uri_prefix, plural_name)
-        return uri
-
-    def pluralize(self, resource_name):
-        # get plural from map or just add 's'
-
-        # map from resource name to a plural name
-        # needed only for those which can't be constructed as name + 's'
-        resource_plural_map = {
-            'security_groups': 'security_groups',
-            'security_group_rules': 'security_group_rules',
-            'quotas': 'quotas',
-        }
-        return resource_plural_map.get(resource_name, resource_name + 's')
-
     def _list_resources(self, uri, **filters):
         req_uri = self.uri_prefix + uri
         if filters:
             req_uri += '?' + urllib.urlencode(filters, doseq=1)
         resp, body = self.get(req_uri)
-        body = self.deserialize_list(body)
+        body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
@@ -96,23 +62,23 @@
         if fields:
             req_uri += '?' + urllib.urlencode(fields, doseq=1)
         resp, body = self.get(req_uri)
-        body = self.deserialize_single(body)
+        body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
     def _create_resource(self, uri, post_data):
         req_uri = self.uri_prefix + uri
-        req_post_data = self.serialize(post_data)
+        req_post_data = json.dumps(post_data)
         resp, body = self.post(req_uri, req_post_data)
-        body = self.deserialize_single(body)
+        body = json.loads(body)
         self.expected_success(201, resp.status)
         return service_client.ResponseBody(resp, body)
 
     def _update_resource(self, uri, post_data):
         req_uri = self.uri_prefix + uri
-        req_post_data = self.serialize(post_data)
+        req_post_data = json.dumps(post_data)
         resp, body = self.put(req_uri, req_post_data)
-        body = self.deserialize_single(body)
+        body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
@@ -285,34 +251,21 @@
         uri = '/extensions'
         return self._list_resources(uri, **filters)
 
-    # Common methods that are hard to automate
     def create_bulk_network(self, names):
         network_list = [{'name': name} for name in names]
         post_data = {'networks': network_list}
-        body = self.serialize_list(post_data, "networks", "network")
-        uri = self.get_uri("networks")
-        resp, body = self.post(uri, body)
-        body = self.deserialize_list(body)
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/networks'
+        return self._create_resource(uri, post_data)
 
     def create_bulk_subnet(self, subnet_list):
         post_data = {'subnets': subnet_list}
-        body = self.serialize_list(post_data, 'subnets', 'subnet')
-        uri = self.get_uri('subnets')
-        resp, body = self.post(uri, body)
-        body = self.deserialize_list(body)
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/subnets'
+        return self._create_resource(uri, post_data)
 
     def create_bulk_port(self, port_list):
         post_data = {'ports': port_list}
-        body = self.serialize_list(post_data, 'ports', 'port')
-        uri = self.get_uri('ports')
-        resp, body = self.post(uri, body)
-        body = self.deserialize_list(body)
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/ports'
+        return self._create_resource(uri, post_data)
 
     def wait_for_resource_deletion(self, resource_type, id):
         """Waits for a resource to be deleted."""
@@ -371,32 +324,14 @@
             message = '(%s) %s' % (caller, message)
         raise exceptions.TimeoutException(message)
 
-    def deserialize_single(self, body):
-        return json.loads(body)
-
-    def deserialize_list(self, body):
-        return json.loads(body)
-
-    def serialize(self, data):
-        return json.dumps(data)
-
-    def serialize_list(self, data, root=None, item=None):
-        return self.serialize(data)
-
     def update_quotas(self, tenant_id, **kwargs):
         put_body = {'quota': kwargs}
-        body = json.dumps(put_body)
-        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['quota'])
+        uri = '/quotas/%s' % tenant_id
+        return self._update_resource(uri, put_body)
 
     def reset_quotas(self, tenant_id):
-        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/quotas/%s' % tenant_id
+        return self._delete_resource(uri)
 
     def show_quotas(self, tenant_id, **fields):
         uri = '/quotas/%s' % tenant_id
@@ -410,18 +345,12 @@
         post_body = {'router': kwargs}
         post_body['router']['name'] = name
         post_body['router']['admin_state_up'] = admin_state_up
-        body = json.dumps(post_body)
-        uri = '%s/routers' % (self.uri_prefix)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/routers'
+        return self._create_resource(uri, post_body)
 
     def _update_router(self, router_id, set_enable_snat, **kwargs):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
+        uri = '/routers/%s' % router_id
+        body = self._show_resource(uri)
         update_body = {}
         update_body['name'] = kwargs.get('name', body['router']['name'])
         update_body['admin_state_up'] = kwargs.get(
@@ -440,11 +369,7 @@
         if 'distributed' in kwargs:
             update_body['distributed'] = kwargs['distributed']
         update_body = dict(router=update_body)
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def update_router(self, router_id, **kwargs):
         """Update a router leaving enable_snat to its default value."""
@@ -476,64 +401,37 @@
         return self._update_router(router_id, set_enable_snat=True, **kwargs)
 
     def add_router_interface_with_subnet_id(self, router_id, subnet_id):
-        uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
-                                                      router_id)
+        uri = '/routers/%s/add_router_interface' % router_id
         update_body = {"subnet_id": subnet_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def add_router_interface_with_port_id(self, router_id, port_id):
-        uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
-                                                      router_id)
+        uri = '/routers/%s/add_router_interface' % router_id
         update_body = {"port_id": port_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
-        uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
-                                                         router_id)
+        uri = '/routers/%s/remove_router_interface' % router_id
         update_body = {"subnet_id": subnet_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def remove_router_interface_with_port_id(self, router_id, port_id):
-        uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
-                                                         router_id)
+        uri = '/routers/%s/remove_router_interface' % router_id
         update_body = {"port_id": port_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def list_router_interfaces(self, uuid):
-        uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/ports?device_id=%s' % uuid
+        return self._list_resources(uri)
 
     def update_agent(self, agent_id, agent_info):
         """
         :param agent_info: Agent update information.
         E.g {"admin_state_up": True}
         """
-        uri = '%s/agents/%s' % (self.uri_prefix, agent_id)
+        uri = '/agents/%s' % agent_id
         agent = {"agent": agent_info}
-        body = json.dumps(agent)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, agent)
 
     def show_agent(self, agent_id, **fields):
         uri = '/agents/%s' % agent_id
@@ -544,88 +442,54 @@
         return self._list_resources(uri, **filters)
 
     def list_routers_on_l3_agent(self, agent_id):
-        uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/l3-routers' % agent_id
+        return self._list_resources(uri)
 
     def list_l3_agents_hosting_router(self, router_id):
-        uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/routers/%s/l3-agents' % router_id
+        return self._list_resources(uri)
 
     def add_router_to_l3_agent(self, agent_id, router_id):
-        uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
+        uri = '/agents/%s/l3-routers' % agent_id
         post_body = {"router_id": router_id}
-        body = json.dumps(post_body)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._create_resource(uri, post_body)
 
     def remove_router_from_l3_agent(self, agent_id, router_id):
-        uri = '%s/agents/%s/l3-routers/%s' % (
-            self.uri_prefix, agent_id, router_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
+        return self._delete_resource(uri)
 
     def list_dhcp_agent_hosting_network(self, network_id):
-        uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/networks/%s/dhcp-agents' % network_id
+        return self._list_resources(uri)
 
     def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
-        uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/dhcp-networks' % agent_id
+        return self._list_resources(uri)
 
     def remove_network_from_dhcp_agent(self, agent_id, network_id):
-        uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
-                                                 network_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/dhcp-networks/%s' % (agent_id,
+                                               network_id)
+        return self._delete_resource(uri)
 
     def update_extra_routes(self, router_id, routes):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+        uri = '/routers/%s' % router_id
         put_body = {
             'router': {
                 'routes': routes
             }
         }
-        body = json.dumps(put_body)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, put_body)
 
     def delete_extra_routes(self, router_id):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
-        null_routes = None
+        uri = '/routers/%s' % router_id
         put_body = {
             'router': {
-                'routes': null_routes
+                'routes': None
             }
         }
-        body = json.dumps(put_body)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, put_body)
 
     def add_dhcp_agent_to_network(self, agent_id, network_id):
         post_body = {'network_id': network_id}
-        body = json.dumps(post_body)
-        uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/dhcp-networks' % agent_id
+        return self._create_resource(uri, post_body)
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index b31fe1b..e8ee20b 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -119,24 +119,6 @@
             params={'limit': limit, 'format': 'json'})
         self.expected_success(200, resp.status)
         return objlist
-        """tmp = []
-        for obj in objlist:
-            tmp.append(obj['name'])
-        objlist = tmp
-
-        if len(objlist) >= limit:
-
-            # Increment marker
-            marker = objlist[len(objlist) - 1]
-
-            # Get the next chunk of the list
-            objlist.extend(_list_all_container_objects(container,
-                                                      params={'marker': marker,
-                                                              'limit': limit}))
-            return objlist
-        else:
-            # Return final, complete list
-            return objlist"""
 
     def list_container_contents(self, container, params=None):
         """
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 84a9ed9..22e53f5 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -36,7 +36,7 @@
         resp, body = self.get(uri)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['stacks'])
+        return service_client.ResponseBody(resp, body)
 
     def create_stack(self, name, disable_rollback=True, parameters=None,
                      timeout_mins=60, template=None, template_url=None,
@@ -111,7 +111,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['stack'])
+        return service_client.ResponseBody(resp, body)
 
     def suspend_stack(self, stack_identifier):
         """Suspend a stack."""
@@ -135,7 +135,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['resources'])
+        return service_client.ResponseBody(resp, body)
 
     def show_resource(self, stack_identifier, resource_name):
         """Returns the details of a single resource."""
@@ -143,7 +143,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['resource'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_stack(self, stack_identifier):
         """Deletes the specified Stack."""
@@ -160,7 +160,7 @@
         while True:
             try:
                 body = self.show_resource(
-                    stack_identifier, resource_name)
+                    stack_identifier, resource_name)['resource']
             except lib_exc.NotFound:
                 # ignore this, as the resource may not have
                 # been created yet
@@ -195,7 +195,7 @@
 
         while True:
             try:
-                body = self.show_stack(stack_identifier)
+                body = self.show_stack(stack_identifier)['stack']
             except lib_exc.NotFound:
                 if status == 'DELETE_COMPLETE':
                     return
@@ -224,7 +224,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['metadata'])
+        return service_client.ResponseBody(resp, body)
 
     def list_events(self, stack_identifier):
         """Returns list of all events for a stack."""
@@ -232,7 +232,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['events'])
+        return service_client.ResponseBody(resp, body)
 
     def list_resource_events(self, stack_identifier, resource_name):
         """Returns list of all events for a resource from stack."""
@@ -241,7 +241,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['events'])
+        return service_client.ResponseBody(resp, body)
 
     def show_event(self, stack_identifier, resource_name, event_id):
         """Returns the details of a single stack's event."""
@@ -250,7 +250,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['event'])
+        return service_client.ResponseBody(resp, body)
 
     def show_template(self, stack_identifier):
         """Returns the template for the stack."""
@@ -293,7 +293,7 @@
         resp, body = self.get('resource_types')
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['resource_types'])
+        return service_client.ResponseBody(resp, body)
 
     def show_resource_type(self, resource_type_name):
         """Return the schema of a resource type."""
diff --git a/tempest/services/telemetry/json/telemetry_client.py b/tempest/services/telemetry/json/telemetry_client.py
index 2b1cdc0..1f181e3 100644
--- a/tempest/services/telemetry/json/telemetry_client.py
+++ b/tempest/services/telemetry/json/telemetry_client.py
@@ -84,6 +84,10 @@
         uri = '%s/meters/%s' % (self.uri_prefix, meter_id)
         return self._helper_list(uri, query)
 
+    def list_events(self, query=None):
+        uri = '%s/events' % self.uri_prefix
+        return self._helper_list(uri, query)
+
     def show_resource(self, resource_id):
         uri = '%s/resources/%s' % (self.uri_prefix, resource_id)
         resp, body = self.get(uri)
diff --git a/tempest/services/volume/json/admin/volume_hosts_client.py b/tempest/services/volume/json/admin/volume_hosts_client.py
index 6801453..ab9cd5a 100644
--- a/tempest/services/volume/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/json/admin/volume_hosts_client.py
@@ -34,7 +34,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['hosts'])
+        return service_client.ResponseBody(resp, body)
 
 
 class VolumeHostsClient(BaseVolumeHostsClient):
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/json/admin/volume_services_client.py
index c8607c1..798a642 100644
--- a/tempest/services/volume/json/admin/volume_services_client.py
+++ b/tempest/services/volume/json/admin/volume_services_client.py
@@ -29,7 +29,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['services'])
+        return service_client.ResponseBody(resp, body)
 
 
 class VolumesServicesClient(BaseVolumesServicesClient):
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/json/admin/volume_types_client.py
index 84c7bc5..cd61859 100644
--- a/tempest/services/volume/json/admin/volume_types_client.py
+++ b/tempest/services/volume/json/admin/volume_types_client.py
@@ -58,7 +58,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['volume_types'])
+        return service_client.ResponseBody(resp, body)
 
     def show_volume_type(self, volume_id):
         """Returns the details of a single volume_type."""
@@ -66,7 +66,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['volume_type'])
+        return service_client.ResponseBody(resp, body)
 
     def create_volume_type(self, name, **kwargs):
         """
@@ -84,7 +84,7 @@
         resp, body = self.post('types', post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['volume_type'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_volume_type(self, volume_id):
         """Deletes the Specified Volume_type."""
@@ -101,7 +101,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['extra_specs'])
+        return service_client.ResponseBody(resp, body)
 
     def show_volume_type_extra_specs(self, vol_type_id, extra_spec_name):
         """Returns the details of a single volume_type extra spec."""
@@ -123,7 +123,7 @@
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['extra_specs'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_volume_type_extra_specs(self, vol_id, extra_spec_name):
         """Deletes the Specified Volume_type extra spec."""
@@ -177,7 +177,7 @@
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['encryption'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_encryption_type(self, vol_type_id):
         """Delete the encryption type for the specified volume-type."""
diff --git a/tempest/services/volume/json/availability_zone_client.py b/tempest/services/volume/json/availability_zone_client.py
index 13d5d55..4d24ede 100644
--- a/tempest/services/volume/json/availability_zone_client.py
+++ b/tempest/services/volume/json/availability_zone_client.py
@@ -24,7 +24,7 @@
         resp, body = self.get('os-availability-zone')
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['availabilityZoneInfo'])
+        return service_client.ResponseBody(resp, body)
 
 
 class VolumeAvailabilityZoneClient(BaseVolumeAvailabilityZoneClient):
diff --git a/tempest/services/volume/json/backups_client.py b/tempest/services/volume/json/backups_client.py
index 0f83b8d..6827c93 100644
--- a/tempest/services/volume/json/backups_client.py
+++ b/tempest/services/volume/json/backups_client.py
@@ -17,6 +17,8 @@
 
 from oslo_serialization import jsonutils as json
 
+from tempest_lib import exceptions as lib_exc
+
 from tempest.common import service_client
 from tempest import exceptions
 
@@ -40,7 +42,7 @@
         resp, body = self.post('backups', post_body)
         body = json.loads(body)
         self.expected_success(202, resp.status)
-        return service_client.ResponseBody(resp, body['backup'])
+        return service_client.ResponseBody(resp, body)
 
     def restore_backup(self, backup_id, volume_id=None):
         """Restore volume from backup."""
@@ -49,7 +51,7 @@
         resp, body = self.post('backups/%s/restore' % (backup_id), post_body)
         body = json.loads(body)
         self.expected_success(202, resp.status)
-        return service_client.ResponseBody(resp, body['restore'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_backup(self, backup_id):
         """Delete a backup of volume."""
@@ -63,7 +65,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['backup'])
+        return service_client.ResponseBody(resp, body)
 
     def list_backups(self, detail=False):
         """Information for all the tenant's backups."""
@@ -73,17 +75,35 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['backups'])
+        return service_client.ResponseBody(resp, body)
+
+    def export_backup(self, backup_id):
+        """Export backup metadata record."""
+        url = "backups/%s/export_record" % backup_id
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.expected_success(200, resp.status)
+        return service_client.ResponseBody(resp, body)
+
+    def import_backup(self, backup_service, backup_url):
+        """Import backup metadata record."""
+        post_body = {'backup_service': backup_service,
+                     'backup_url': backup_url}
+        post_body = json.dumps({'backup-record': post_body})
+        resp, body = self.post("backups/import_record", post_body)
+        body = json.loads(body)
+        self.expected_success(201, resp.status)
+        return service_client.ResponseBody(resp, body)
 
     def wait_for_backup_status(self, backup_id, status):
         """Waits for a Backup to reach a given status."""
-        body = self.show_backup(backup_id)
+        body = self.show_backup(backup_id)['backup']
         backup_status = body['status']
         start = int(time.time())
 
         while backup_status != status:
             time.sleep(self.build_interval)
-            body = self.show_backup(backup_id)
+            body = self.show_backup(backup_id)['backup']
             backup_status = body['status']
             if backup_status == 'error':
                 raise exceptions.VolumeBackupException(backup_id=backup_id)
@@ -95,6 +115,18 @@
                             self.build_timeout))
                 raise exceptions.TimeoutException(message)
 
+    def wait_for_backup_deletion(self, backup_id):
+        """Waits for backup deletion"""
+        start_time = int(time.time())
+        while True:
+            try:
+                self.show_backup(backup_id)
+            except lib_exc.NotFound:
+                return
+            if int(time.time()) - start_time >= self.build_timeout:
+                raise exceptions.TimeoutException
+            time.sleep(self.build_interval)
+
 
 class BackupsClient(BaseBackupsClient):
     """Volume V1 Backups client"""
diff --git a/tempest/services/volume/json/extensions_client.py b/tempest/services/volume/json/extensions_client.py
index 1098e1e..5744d4a 100644
--- a/tempest/services/volume/json/extensions_client.py
+++ b/tempest/services/volume/json/extensions_client.py
@@ -25,7 +25,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['extensions'])
+        return service_client.ResponseBody(resp, body)
 
 
 class ExtensionsClient(BaseExtensionsClient):
diff --git a/tempest/services/volume/json/qos_client.py b/tempest/services/volume/json/qos_client.py
index e3d6a29..c79168c 100644
--- a/tempest/services/volume/json/qos_client.py
+++ b/tempest/services/volume/json/qos_client.py
@@ -48,15 +48,15 @@
         start_time = int(time.time())
         while True:
             if operation == 'qos-key-unset':
-                body = self.show_qos(qos_id)
+                body = self.show_qos(qos_id)['qos_specs']
                 if not any(key in body['specs'] for key in args):
                     return
             elif operation == 'disassociate':
-                body = self.show_association_qos(qos_id)
+                body = self.show_association_qos(qos_id)['qos_associations']
                 if not any(args in body[i]['id'] for i in range(0, len(body))):
                     return
             elif operation == 'disassociate-all':
-                body = self.show_association_qos(qos_id)
+                body = self.show_association_qos(qos_id)['qos_associations']
                 if not body:
                     return
             else:
@@ -79,7 +79,7 @@
         resp, body = self.post('qos-specs', post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['qos_specs'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_qos(self, qos_id, force=False):
         """Delete the specified QoS specification."""
@@ -94,7 +94,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['qos_specs'])
+        return service_client.ResponseBody(resp, body)
 
     def show_qos(self, qos_id):
         """Get the specified QoS specification."""
@@ -102,7 +102,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['qos_specs'])
+        return service_client.ResponseBody(resp, body)
 
     def set_qos_key(self, qos_id, **kwargs):
         """Set the specified keys/values of QoS specification.
@@ -113,7 +113,7 @@
         resp, body = self.put('qos-specs/%s' % qos_id, put_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['qos_specs'])
+        return service_client.ResponseBody(resp, body)
 
     def unset_qos_key(self, qos_id, keys):
         """Unset the specified keys of QoS specification.
@@ -139,7 +139,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['qos_associations'])
+        return service_client.ResponseBody(resp, body)
 
     def disassociate_qos(self, qos_id, vol_type_id):
         """Disassociate the specified QoS with specified volume-type."""
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index fa1f9dd..3fcf18c 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -40,7 +40,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['snapshots'])
+        return service_client.ResponseBody(resp, body)
 
     def show_snapshot(self, snapshot_id):
         """Returns the details of a single snapshot."""
@@ -48,7 +48,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['snapshot'])
+        return service_client.ResponseBody(resp, body)
 
     def create_snapshot(self, volume_id, **kwargs):
         """
@@ -64,7 +64,7 @@
         resp, body = self.post('snapshots', post_body)
         body = json.loads(body)
         self.expected_success(self.create_resp, resp.status)
-        return service_client.ResponseBody(resp, body['snapshot'])
+        return service_client.ResponseBody(resp, body)
 
     def update_snapshot(self, snapshot_id, **kwargs):
         """Updates a snapshot."""
@@ -72,11 +72,11 @@
         resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['snapshot'])
+        return service_client.ResponseBody(resp, body)
 
     # NOTE(afazekas): just for the wait function
     def _get_snapshot_status(self, snapshot_id):
-        body = self.show_snapshot(snapshot_id)
+        body = self.show_snapshot(snapshot_id)['snapshot']
         status = body['status']
         # NOTE(afazekas): snapshot can reach an "error"
         # state in a "normal" lifecycle
@@ -155,7 +155,7 @@
         resp, body = self.post(url, put_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['metadata'])
+        return service_client.ResponseBody(resp, body)
 
     def show_snapshot_metadata(self, snapshot_id):
         """Get metadata of the snapshot."""
@@ -163,7 +163,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['metadata'])
+        return service_client.ResponseBody(resp, body)
 
     def update_snapshot_metadata(self, snapshot_id, metadata):
         """Update metadata for the snapshot."""
@@ -172,7 +172,7 @@
         resp, body = self.put(url, put_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['metadata'])
+        return service_client.ResponseBody(resp, body)
 
     def update_snapshot_metadata_item(self, snapshot_id, id, meta_item):
         """Update metadata item for the snapshot."""
@@ -181,7 +181,7 @@
         resp, body = self.put(url, put_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body['meta'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_snapshot_metadata_item(self, snapshot_id, id):
         """Delete metadata item for the snapshot."""
diff --git a/tempest/stress/actions/server_create_destroy.py b/tempest/stress/actions/server_create_destroy.py
index 9f41526..17f4bc9 100644
--- a/tempest/stress/actions/server_create_destroy.py
+++ b/tempest/stress/actions/server_create_destroy.py
@@ -37,5 +37,6 @@
         self.logger.info("created %s" % server_id)
         self.logger.info("deleting %s" % name)
         self.manager.servers_client.delete_server(server_id)
-        self.manager.servers_client.wait_for_server_termination(server_id)
+        waiters.wait_for_server_termination(self.manager.servers_client,
+                                            server_id)
         self.logger.info("deleted %s" % server_id)
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index 4a27466..2505a77 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -86,18 +86,21 @@
     def _destroy_vm(self):
         self.logger.info("deleting %s" % self.server_id)
         self.manager.servers_client.delete_server(self.server_id)
-        self.manager.servers_client.wait_for_server_termination(self.server_id)
+        waiters.wait_for_server_termination(self.manager.servers_client,
+                                            self.server_id)
         self.logger.info("deleted %s" % self.server_id)
 
     def _create_sec_group(self):
         sec_grp_cli = self.manager.security_groups_client
         s_name = data_utils.rand_name('sec_grp')
         s_description = data_utils.rand_name('desc')
-        self.sec_grp = sec_grp_cli.create_security_group(s_name,
-                                                         s_description)
+        self.sec_grp = sec_grp_cli.create_security_group(
+            name=s_name, description=s_description)['security_group']
         create_rule = sec_grp_cli.create_security_group_rule
-        create_rule(self.sec_grp['id'], 'tcp', 22, 22)
-        create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+        create_rule(parent_group_id=self.sec_grp['id'], ip_protocol='tcp',
+                    from_port=22, to_port=22)
+        create_rule(parent_group_id=self.sec_grp['id'], ip_protocol='icmp',
+                    from_port=-1, to_port=-1)
 
     def _destroy_sec_grp(self):
         sec_grp_cli = self.manager.security_groups_client
@@ -105,7 +108,8 @@
 
     def _create_floating_ip(self):
         floating_cli = self.manager.floating_ips_client
-        self.floating = floating_cli.create_floating_ip(self.floating_pool)
+        self.floating = (floating_cli.create_floating_ip(self.floating_pool)
+                         ['floating_ip'])
 
     def _destroy_floating_ip(self):
         cli = self.manager.floating_ips_client
@@ -145,7 +149,8 @@
         cli = self.manager.floating_ips_client
 
         def func():
-            floating = cli.show_floating_ip(self.floating['id'])
+            floating = (cli.show_floating_ip(self.floating['id'])
+                        ['floating_ip'])
             return floating['instance_id'] is None
 
         if not tempest.test.call_until_true(func, self.check_timeout,
diff --git a/tempest/stress/actions/volume_attach_delete.py b/tempest/stress/actions/volume_attach_delete.py
index d6965c7..68e2989 100644
--- a/tempest/stress/actions/volume_attach_delete.py
+++ b/tempest/stress/actions/volume_attach_delete.py
@@ -49,8 +49,8 @@
         self.logger.info("attach volume (%s) to vm %s" %
                          (volume['id'], server_id))
         self.manager.servers_client.attach_volume(server_id,
-                                                  volume['id'],
-                                                  '/dev/vdc')
+                                                  volumeId=volume['id'],
+                                                  device='/dev/vdc')
         self.manager.volumes_client.wait_for_volume_status(volume['id'],
                                                            'in-use')
         self.logger.info("volume (%s) attached to vm %s" %
@@ -59,7 +59,8 @@
         # Step 4: delete vm
         self.logger.info("deleting vm: %s" % vm_name)
         self.manager.servers_client.delete_server(server_id)
-        self.manager.servers_client.wait_for_server_termination(server_id)
+        waiters.wait_for_server_termination(self.manager.servers_client,
+                                            server_id)
         self.logger.info("deleted vm: %s" % server_id)
 
         # Step 5: delete volume
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
index 3ba2a91..dc7d217 100644
--- a/tempest/stress/actions/volume_attach_verify.py
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -26,7 +26,8 @@
 
     def _create_keypair(self):
         keyname = data_utils.rand_name("key")
-        self.key = self.manager.keypairs_client.create_keypair(keyname)
+        self.key = (self.manager.keypairs_client.create_keypair(name=keyname)
+                    ['keypair'])
 
     def _delete_keypair(self):
         self.manager.keypairs_client.delete_keypair(self.key['name'])
@@ -48,18 +49,21 @@
     def _destroy_vm(self):
         self.logger.info("deleting server: %s" % self.server_id)
         self.manager.servers_client.delete_server(self.server_id)
-        self.manager.servers_client.wait_for_server_termination(self.server_id)
+        waiters.wait_for_server_termination(self.manager.servers_client,
+                                            self.server_id)
         self.logger.info("deleted server: %s" % self.server_id)
 
     def _create_sec_group(self):
         sec_grp_cli = self.manager.security_groups_client
         s_name = data_utils.rand_name('sec_grp')
         s_description = data_utils.rand_name('desc')
-        self.sec_grp = sec_grp_cli.create_security_group(s_name,
-                                                         s_description)
+        self.sec_grp = sec_grp_cli.create_security_group(
+            name=s_name, description=s_description)['security_group']
         create_rule = sec_grp_cli.create_security_group_rule
-        create_rule(self.sec_grp['id'], 'tcp', 22, 22)
-        create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+        create_rule(parent_group_id=self.sec_grp['id'], ip_protocol='tcp',
+                    from_port=22, to_port=22)
+        create_rule(parent_group_id=self.sec_grp['id'], ip_protocol='icmp',
+                    from_port=-1, to_port=-1)
 
     def _destroy_sec_grp(self):
         sec_grp_cli = self.manager.security_groups_client
@@ -67,7 +71,8 @@
 
     def _create_floating_ip(self):
         floating_cli = self.manager.floating_ips_client
-        self.floating = floating_cli.create_floating_ip(self.floating_pool)
+        self.floating = (floating_cli.create_floating_ip(self.floating_pool)
+                         ['floating_ip'])
 
     def _destroy_floating_ip(self):
         cli = self.manager.floating_ips_client
@@ -96,7 +101,8 @@
         cli = self.manager.floating_ips_client
 
         def func():
-            floating = cli.show_floating_ip(self.floating['id'])
+            floating = (cli.show_floating_ip(self.floating['id'])
+                        ['floating_ip'])
             return floating['instance_id'] is None
 
         if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
@@ -163,7 +169,7 @@
         if not self.new_server:
             self.new_server_ops()
 
-    # now we just test is number of partition increased or decrised
+    # now we just test that the number of partitions has increased or decreased
     def part_wait(self, num_match):
         def _part_state():
             self.partitions = self.remote_client.get_partitions().split('\n')
@@ -189,8 +195,8 @@
         self.logger.info("attach volume (%s) to vm %s" %
                          (self.volume['id'], self.server_id))
         servers_client.attach_volume(self.server_id,
-                                     self.volume['id'],
-                                     self.part_name)
+                                     volumeId=self.volume['id'],
+                                     device=self.part_name)
         self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
                                                            'in-use')
         if self.enable_ssh_verify:
@@ -203,7 +209,7 @@
         self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
                                                            'available')
         if self.enable_ssh_verify:
-            self.logger.info("Scanning for block device disapperance on %s"
+            self.logger.info("Scanning for block device disappearance on %s"
                              % self.server_id)
             self.part_wait(self.detach_match_count)
         if self.new_volume:
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
index b785156..9456590 100644
--- a/tempest/stress/cleanup.py
+++ b/tempest/stress/cleanup.py
@@ -17,6 +17,7 @@
 from oslo_log import log as logging
 
 from tempest import clients
+from tempest.common import waiters
 
 LOG = logging.getLogger(__name__)
 
@@ -34,11 +35,12 @@
 
     for s in body['servers']:
         try:
-            admin_manager.servers_client.wait_for_server_termination(s['id'])
+            waiters.wait_for_server_termination(admin_manager.servers_client,
+                                                s['id'])
         except Exception:
             pass
 
-    keypairs = admin_manager.keypairs_client.list_keypairs()
+    keypairs = admin_manager.keypairs_client.list_keypairs()['keypairs']
     LOG.info("Cleanup::remove %s keypairs" % len(keypairs))
     for k in keypairs:
         try:
@@ -47,7 +49,8 @@
             pass
 
     secgrp_client = admin_manager.security_groups_client
-    secgrp = secgrp_client.list_security_groups(all_tenants=True)
+    secgrp = (secgrp_client.list_security_groups(all_tenants=True)
+              ['security_groups'])
     secgrp_del = [grp for grp in secgrp if grp['name'] != 'default']
     LOG.info("Cleanup::remove %s Security Group" % len(secgrp_del))
     for g in secgrp_del:
@@ -56,7 +59,8 @@
         except Exception:
             pass
 
-    floating_ips = admin_manager.floating_ips_client.list_floating_ips()
+    floating_ips = (admin_manager.floating_ips_client.list_floating_ips()
+                    ['floating_ips'])
     LOG.info("Cleanup::remove %s floating ips" % len(floating_ips))
     for f in floating_ips:
         try:
@@ -70,7 +74,7 @@
         if user['name'].startswith("stress_user"):
             admin_manager.identity_client.delete_user(user['id'])
 
-    tenants = admin_manager.identity_client.list_tenants()
+    tenants = admin_manager.identity_client.list_tenants()['tenants']
     LOG.info("Cleanup::remove %s tenants" % len(tenants))
     for tenant in tenants:
         if tenant['name'].startswith("stress_tenant"):
@@ -79,8 +83,8 @@
     # We have to delete snapshots first or
     # volume deletion may block
 
-    _, snaps = admin_manager.snapshots_client.\
-        list_snapshots(params={"all_tenants": True})
+    _, snaps = admin_manager.snapshots_client.list_snapshots(
+        params={"all_tenants": True})['snapshots']
     LOG.info("Cleanup::remove %s snapshots" % len(snaps))
     for v in snaps:
         try:
diff --git a/tempest/test.py b/tempest/test.py
index 0e60041..df6b30d 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -44,7 +44,7 @@
 
 
 def attr(**kwargs):
-    """A decorator which applies the  testtools attr decorator
+    """A decorator which applies the testtools attr decorator
 
     This decorator applies the testtools.testcase.attr if it is in the list of
     attributes to testtools we want to apply.
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 2701f02..640b004 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -40,6 +40,27 @@
         """
         return
 
+    @abc.abstractmethod
+    def register_opts(self, conf):
+        """Method to add additional configuration options to tempest. This
+        method will be run for the plugin during the register_opts() function
+        in tempest.config
+
+        :param ConfigOpts conf: The conf object that can be used to register
+            additional options on.
+        """
+        return
+
+    @abc.abstractmethod
+    def get_opt_lists(self):
+        """Method to get a list of options for sample config generation
+
+        :return option_list: A list of tuples with the group name and options
+                             in that group.
+        :rtype: list
+        """
+        return []
+
 
 @misc.singleton
 class TempestTestPluginManager(object):
@@ -64,3 +85,15 @@
         for plug in self.ext_plugins:
             load_tests_dict[plug.name] = plug.obj.load_tests()
         return load_tests_dict
+
+    def register_plugin_opts(self, conf):
+        for plug in self.ext_plugins:
+            plug.obj.register_opts(conf)
+
+    def get_plugin_options_list(self):
+        plugin_options = []
+        for plug in self.ext_plugins:
+            opt_list = plug.obj.get_opt_lists()
+            if opt_list:
+                plugin_options.extend(opt_list)
+        return plugin_options
diff --git a/tempest/tests/cmd/test_javelin.py b/tempest/tests/cmd/test_javelin.py
index f0921d5..a95d400 100644
--- a/tempest/tests/cmd/test_javelin.py
+++ b/tempest/tests/cmd/test_javelin.py
@@ -85,7 +85,7 @@
 class TestCreateResources(JavelinUnitTest):
     def test_create_tenants(self):
 
-        self.fake_client.identity.list_tenants.return_value = []
+        self.fake_client.identity.list_tenants.return_value = {'tenants': []}
         self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
                                               return_value=self.fake_client))
 
@@ -95,8 +95,8 @@
         mocked_function.assert_called_once_with(self.fake_object['name'])
 
     def test_create_duplicate_tenant(self):
-        self.fake_client.identity.list_tenants.return_value = [
-            {'name': self.fake_object['name']}]
+        self.fake_client.identity.list_tenants.return_value = {'tenants': [
+            {'name': self.fake_object['name']}]}
         self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
                                               return_value=self.fake_client))
 
@@ -270,16 +270,17 @@
     def test_create_secgroup(self):
         self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
                                               return_value=self.fake_client))
-        self.fake_client.secgroups.list_security_groups.return_value = []
+        self.fake_client.secgroups.list_security_groups.return_value = (
+            {'security_groups': []})
         self.fake_client.secgroups.create_security_group.return_value = \
-            {'id': self.fake_object['secgroup_id']}
+            {'security_group': {'id': self.fake_object['secgroup_id']}}
 
         javelin.create_secgroups([self.fake_object])
 
         mocked_function = self.fake_client.secgroups.create_security_group
         mocked_function.assert_called_once_with(
-            self.fake_object['name'],
-            self.fake_object['description'])
+            name=self.fake_object['name'],
+            description=self.fake_object['description'])
 
 
 class TestDestroyResources(JavelinUnitTest):
diff --git a/tempest/tests/cmd/test_tempest_init.py b/tempest/tests/cmd/test_tempest_init.py
new file mode 100644
index 0000000..6b5af7e
--- /dev/null
+++ b/tempest/tests/cmd/test_tempest_init.py
@@ -0,0 +1,66 @@
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import fixtures
+
+from tempest.cmd import init
+from tempest.tests import base
+
+
+class TestTempestInit(base.TestCase):
+
+    def test_generate_testr_conf(self):
+        # Create fake conf dir
+        conf_dir = self.useFixture(fixtures.TempDir())
+
+        init_cmd = init.TempestInit(None, None)
+        init_cmd.generate_testr_conf(conf_dir.path)
+
+        # Generate expected file contents
+        top_level_path = os.path.dirname(os.path.dirname(init.__file__))
+        discover_path = os.path.join(top_level_path, 'test_discover')
+        testr_conf_file = init.TESTR_CONF % (top_level_path, discover_path)
+
+        conf_path = conf_dir.join('.testr.conf')
+        conf_file = open(conf_path, 'r')
+        self.addCleanup(conf_file.close)
+        self.assertEqual(conf_file.read(), testr_conf_file)
+
+    def test_create_working_dir(self):
+        fake_local_dir = self.useFixture(fixtures.TempDir())
+        fake_local_conf_dir = self.useFixture(fixtures.TempDir())
+        # Create a fake conf file
+        fake_file = fake_local_conf_dir.join('conf_file.conf')
+        open(fake_file, 'w').close()
+        init_cmd = init.TempestInit(None, None)
+        init_cmd.create_working_dir(fake_local_dir.path,
+                                    fake_local_conf_dir.path)
+        # Assert directories are created
+        lock_path = os.path.join(fake_local_dir.path, 'tempest_lock')
+        etc_dir = os.path.join(fake_local_dir.path, 'etc')
+        log_dir = os.path.join(fake_local_dir.path, 'logs')
+        testr_dir = os.path.join(fake_local_dir.path, '.testrepository')
+        self.assertTrue(os.path.isdir(lock_path))
+        self.assertTrue(os.path.isdir(etc_dir))
+        self.assertTrue(os.path.isdir(log_dir))
+        self.assertTrue(os.path.isdir(testr_dir))
+        # Assert file creation
+        fake_file_moved = os.path.join(etc_dir, 'conf_file.conf')
+        local_conf_file = os.path.join(etc_dir, 'tempest.conf')
+        local_testr_conf = os.path.join(fake_local_dir.path, '.testr.conf')
+        self.assertTrue(os.path.isfile(fake_file_moved))
+        self.assertTrue(os.path.isfile(local_conf_file))
+        self.assertTrue(os.path.isfile(local_testr_conf))
diff --git a/tempest/tests/common/test_accounts.py b/tempest/tests/common/test_accounts.py
index 8fc3745..1acef8a 100644
--- a/tempest/tests/common/test_accounts.py
+++ b/tempest/tests/common/test_accounts.py
@@ -297,8 +297,9 @@
         test_accounts_class = accounts.Accounts('v2', 'test_name')
         with mock.patch('tempest.services.compute.json.networks_client.'
                         'NetworksClient.list_networks',
-                        return_value=[{'name': 'network-2', 'id': 'fake-id',
-                                       'label': 'network-2'}]):
+                        return_value={'networks': [{'name': 'network-2',
+                                                    'id': 'fake-id',
+                                                    'label': 'network-2'}]}):
             creds = test_accounts_class.get_creds_by_roles(['role-7'])
         self.assertTrue(isinstance(creds, cred_provider.TestResources))
         network = creds.network
diff --git a/tempest/tests/common/test_service_clients.py b/tempest/tests/common/test_service_clients.py
index 695d4a4..00b8470 100644
--- a/tempest/tests/common/test_service_clients.py
+++ b/tempest/tests/common/test_service_clients.py
@@ -40,6 +40,7 @@
 from tempest.services.compute.json import quotas_client
 from tempest.services.compute.json import security_group_default_rules_client \
     as nova_secgrop_default_client
+from tempest.services.compute.json import security_group_rules_client
 from tempest.services.compute.json import security_groups_client
 from tempest.services.compute.json import server_groups_client
 from tempest.services.compute.json import servers_client
@@ -115,8 +116,8 @@
             extensions_client.ExtensionsClient,
             fixed_ips_client.FixedIPsClient,
             flavors_client.FlavorsClient,
-            floating_ip_pools_client.FloatingIpPoolsClient,
-            floating_ips_bulk_client.FloatingIpsBulkClient,
+            floating_ip_pools_client.FloatingIPPoolsClient,
+            floating_ips_bulk_client.FloatingIPsBulkClient,
             floating_ips_client.FloatingIPsClient,
             hosts_client.HostsClient,
             hypervisor_client.HypervisorClient,
@@ -130,6 +131,7 @@
             quotas_client.QuotasClient,
             quota_classes_client.QuotaClassesClient,
             nova_secgrop_default_client.SecurityGroupDefaultRulesClient,
+            security_group_rules_client.SecurityGroupRulesClient,
             security_groups_client.SecurityGroupsClient,
             server_groups_client.ServerGroupsClient,
             servers_client.ServersClient,
diff --git a/tempest/tests/test_waiters.py b/tempest/tests/common/test_waiters.py
similarity index 65%
rename from tempest/tests/test_waiters.py
rename to tempest/tests/common/test_waiters.py
index 329d610..7aa6595 100644
--- a/tempest/tests/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -18,6 +18,7 @@
 
 from tempest.common import waiters
 from tempest import exceptions
+from tempest.services.volume.json import volumes_client
 from tempest.tests import base
 
 
@@ -47,3 +48,21 @@
         self.assertRaises(exceptions.AddImageException,
                           waiters.wait_for_image_status,
                           self.client, 'fake_image_id', 'active')
+
+    @mock.patch.object(time, 'sleep')
+    def test_wait_for_volume_status_error_restoring(self, mock_sleep):
+        # Tests that the wait method raises VolumeRestoreErrorException if
+        # the volume status is 'error_restoring'.
+        client = mock.Mock(spec=volumes_client.BaseVolumesClient,
+                           build_interval=1)
+        volume1 = {'status': 'restoring-backup'}
+        volume2 = {'status': 'error_restoring'}
+        mock_show = mock.Mock(side_effect=(volume1, volume2))
+        client.show_volume = mock_show
+        volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+        self.assertRaises(exceptions.VolumeRestoreErrorException,
+                          waiters.wait_for_volume_status,
+                          client, volume_id, 'available')
+        mock_show.assert_has_calls([mock.call(volume_id),
+                                    mock.call(volume_id)])
+        mock_sleep.assert_called_once_with(1)
diff --git a/tempest/tests/services/compute/test_agents_client.py b/tempest/tests/services/compute/test_agents_client.py
index e8ea525..d14d8bf 100644
--- a/tempest/tests/services/compute/test_agents_client.py
+++ b/tempest/tests/services/compute/test_agents_client.py
@@ -14,37 +14,92 @@
 
 import httplib2
 
+from oslo_serialization import jsonutils as json
 from oslotest import mockpatch
 
 from tempest.services.compute.json import agents_client
 from tempest.tests import base
 from tempest.tests import fake_auth_provider
-from tempest.tests import fake_config
 
 
 class TestAgentsClient(base.TestCase):
 
     def setUp(self):
         super(TestAgentsClient, self).setUp()
-        self.useFixture(fake_config.ConfigFixture())
         fake_auth = fake_auth_provider.FakeAuthProvider()
         self.client = agents_client.AgentsClient(fake_auth,
                                                  'compute', 'regionOne')
 
     def _test_list_agents(self, bytes_body=False):
+        body = '{"agents": []}'
         if bytes_body:
-            body = bytes(b'{"agents": []}')
-        else:
-            body = '{"agents": []}'
-        expected = []
+            body = body.encode('utf-8')
+        expected = {"agents": []}
         response = (httplib2.Response({'status': 200}), body)
         self.useFixture(mockpatch.Patch(
             'tempest.common.service_client.ServiceClient.get',
             return_value=response))
         self.assertEqual(expected, self.client.list_agents())
 
+    def _test_create_agent(self, bytes_body=False):
+        expected = {"agent": {"url": "http://foo.com", "hypervisor": "kvm",
+                              "md5hash": "md5", "version": "2",
+                              "architecture": "x86_64",
+                              "os": "linux", "agent_id": 1}}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.post',
+            return_value=mocked_resp))
+        resp = self.client.create_agent(
+            url="http://foo.com", hypervisor="kvm", md5hash="md5",
+            version="2", architecture="x86_64", os="linux"
+        )
+        self.assertEqual(expected, resp)
+
+    def _test_delete_agent(self):
+        mocked_resp = (httplib2.Response({'status': 200}), None)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.delete',
+            return_value=mocked_resp))
+        self.client.delete_agent("1")
+
+    def _test_update_agent(self, bytes_body=False):
+        expected = {"agent": {"url": "http://foo.com", "md5hash": "md5",
+                              "version": "2", "agent_id": 1}}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.put',
+            return_value=mocked_resp))
+        resp = self.client.update_agent(
+            "1", url="http://foo.com", md5hash="md5", version="2"
+        )
+        self.assertEqual(expected, resp)
+
     def test_list_agents_with_str_body(self):
         self._test_list_agents()
 
     def test_list_agents_with_bytes_body(self):
         self._test_list_agents(bytes_body=True)
+
+    def test_create_agent_with_str_body(self):
+        self._test_create_agent()
+
+    def test_create_agent_with_bytes_body(self):
+        self._test_create_agent(bytes_body=True)
+
+    def test_delete_agent(self):
+        self._test_delete_agent()
+
+    def test_update_agent_with_str_body(self):
+        self._test_update_agent()
+
+    def test_update_agent_with_bytes_body(self):
+        self._test_update_agent(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_aggregates_client.py b/tempest/tests/services/compute/test_aggregates_client.py
new file mode 100644
index 0000000..14930a7
--- /dev/null
+++ b/tempest/tests/services/compute/test_aggregates_client.py
@@ -0,0 +1,137 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import aggregates_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestAggregatesClient(base.TestCase):
+
+    def setUp(self):
+        super(TestAggregatesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = aggregates_client.AggregatesClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_list_aggregates(self, bytes_body=False):
+        body = '{"aggregates": []}'
+        if bytes_body:
+            body = body.encode('utf-8')
+        expected = {"aggregates": []}
+        response = (httplib2.Response({'status': 200}), body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=response))
+        self.assertEqual(expected, self.client.list_aggregates())
+
+    def test_list_aggregates_with_str_body(self):
+        self._test_list_aggregates()
+
+    def test_list_aggregates_with_bytes_body(self):
+        self._test_list_aggregates(bytes_body=True)
+
+    def _test_show_aggregate(self, bytes_body=False):
+        expected = {"aggregate": {"name": "hoge",
+                                  "availability_zone": None,
+                                  "deleted": False,
+                                  "created_at":
+                                  "2015-07-16T03:07:32.000000",
+                                  "updated_at": None,
+                                  "hosts": [],
+                                  "deleted_at": None,
+                                  "id": 1,
+                                  "metadata": {}}}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_aggregate(1)
+        self.assertEqual(expected, resp)
+
+    def test_show_aggregate_with_str_body(self):
+        self._test_show_aggregate()
+
+    def test_show_aggregate_with_bytes_body(self):
+        self._test_show_aggregate(bytes_body=True)
+
+    def _test_create_aggregate(self, bytes_body=False):
+        expected = {"aggregate": {"name": u'\xf4',
+                                  "availability_zone": None,
+                                  "deleted": False,
+                                  "created_at": "2015-07-21T04:11:18.000000",
+                                  "updated_at": None,
+                                  "deleted_at": None,
+                                  "id": 1}}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.post',
+            return_value=mocked_resp))
+        resp = self.client.create_aggregate(name='hoge')
+        self.assertEqual(expected, resp)
+
+    def test_create_aggregate_with_str_body(self):
+        self._test_create_aggregate()
+
+    def test_create_aggregate_with_bytes_body(self):
+        self._test_create_aggregate(bytes_body=True)
+
+    def test_delete_aggregate(self):
+        expected = {}
+        mocked_resp = (httplib2.Response({'status': 200}), None)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.delete',
+            return_value=mocked_resp))
+        resp = self.client.delete_aggregate("1")
+        self.assertEqual(expected, resp)
+
+    def _test_update_aggregate(self, bytes_body=False):
+        expected = {"aggregate": {"name": u'\xe9',
+                                  "availability_zone": None,
+                                  "deleted": False,
+                                  "created_at": "2015-07-16T03:07:32.000000",
+                                  "updated_at": "2015-07-23T05:16:29.000000",
+                                  "hosts": [],
+                                  "deleted_at": None,
+                                  "id": 1,
+                                  "metadata": {}}}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.put',
+            return_value=mocked_resp))
+        resp = self.client.update_aggregate(1)
+        self.assertEqual(expected, resp)
+
+    def test_update_aggregate_with_str_body(self):
+        self._test_update_aggregate()
+
+    def test_update_aggregate_with_bytes_body(self):
+        self._test_update_aggregate(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_extensions_client.py b/tempest/tests/services/compute/test_extensions_client.py
new file mode 100644
index 0000000..20a5b7b
--- /dev/null
+++ b/tempest/tests/services/compute/test_extensions_client.py
@@ -0,0 +1,75 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import extensions_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestExtensionsClient(base.TestCase):
+
+    def setUp(self):
+        super(TestExtensionsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = extensions_client.ExtensionsClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_list_extensions(self, bytes_body=False):
+        body = '{"extensions": []}'
+        if bytes_body:
+            body = body.encode('utf-8')
+        expected = {"extensions": []}
+        response = (httplib2.Response({'status': 200}), body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=response))
+        self.assertEqual(expected, self.client.list_extensions())
+
+    def test_list_extensions_with_str_body(self):
+        self._test_list_extensions()
+
+    def test_list_extensions_with_bytes_body(self):
+        self._test_list_extensions(bytes_body=True)
+
+    def _test_show_extension(self, bytes_body=False):
+        expected = {"extension": {
+            "updated": "2011-06-09T00:00:00Z",
+            "name": "Multinic",
+            "links": [],
+            "namespace":
+            "http://docs.openstack.org/compute/ext/multinic/api/v1.1",
+            "alias": "NMN",
+            "description": u'\u2740(*\xb4\u25e1`*)\u2740'
+        }}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_extension("NMN")
+        self.assertEqual(expected, resp)
+
+    def test_show_extension_with_str_body(self):
+        self._test_show_extension()
+
+    def test_show_extension_with_bytes_body(self):
+        self._test_show_extension(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_keypairs_client.py b/tempest/tests/services/compute/test_keypairs_client.py
new file mode 100644
index 0000000..8a0edd0
--- /dev/null
+++ b/tempest/tests/services/compute/test_keypairs_client.py
@@ -0,0 +1,111 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import keypairs_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestKeyPairsClient(base.TestCase):
+
+    FAKE_KEYPAIR = {"keypair": {
+        "public_key": "ssh-rsa foo Generated-by-Nova",
+        "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+        "user_id": "525d55f98980415ba98e634972fa4a10",
+        "fingerprint": "76:24:66:49:d7:ca:6e:5c:77:ea:8e:bb:9c:15:5f:98"
+        }}
+
+    def setUp(self):
+        super(TestKeyPairsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = keypairs_client.KeyPairsClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_list_keypairs(self, bytes_body=False):
+        body = '{"keypairs": []}'
+        if bytes_body:
+            body = body.encode('utf-8')
+        expected = {"keypairs": []}
+        response = (httplib2.Response({'status': 200}), body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=response))
+        self.assertEqual(expected, self.client.list_keypairs())
+
+    def test_list_keypairs_with_str_body(self):
+        self._test_list_keypairs()
+
+    def test_list_keypairs_with_bytes_body(self):
+        self._test_list_keypairs(bytes_body=True)
+
+    def _test_show_keypair(self, bytes_body=False):
+        fake_keypair = copy.deepcopy(self.FAKE_KEYPAIR)
+        fake_keypair["keypair"].update({
+            "deleted": False,
+            "created_at": "2015-07-22T04:53:52.000000",
+            "updated_at": None,
+            "deleted_at": None,
+            "id": 1
+            })
+        serialized_body = json.dumps(fake_keypair)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_keypair("test")
+        self.assertEqual(fake_keypair, resp)
+
+    def test_show_keypair_with_str_body(self):
+        self._test_show_keypair()
+
+    def test_show_keypair_with_bytes_body(self):
+        self._test_show_keypair(bytes_body=True)
+
+    def _test_create_keypair(self, bytes_body=False):
+        fake_keypair = copy.deepcopy(self.FAKE_KEYPAIR)
+        fake_keypair["keypair"].update({"private_key": "foo"})
+        serialized_body = json.dumps(fake_keypair)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.post',
+            return_value=mocked_resp))
+        resp = self.client.create_keypair(name='test')
+        self.assertEqual(fake_keypair, resp)
+
+    def test_create_keypair_with_str_body(self):
+        self._test_create_keypair()
+
+    def test_create_keypair_with_bytes_body(self):
+        self._test_create_keypair(bytes_body=True)
+
+    def test_delete_keypair(self):
+        expected = {}
+        mocked_resp = (httplib2.Response({'status': 202}), None)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.delete',
+            return_value=mocked_resp))
+        resp = self.client.delete_keypair('test')
+        self.assertEqual(expected, resp)
diff --git a/tempest/tests/services/compute/test_limits_client.py b/tempest/tests/services/compute/test_limits_client.py
new file mode 100644
index 0000000..4086210
--- /dev/null
+++ b/tempest/tests/services/compute/test_limits_client.py
@@ -0,0 +1,69 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import limits_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestLimitsClient(base.TestCase):
+
+    def setUp(self):
+        super(TestLimitsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = limits_client.LimitsClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_show_limits(self, bytes_body=False):
+        expected = {"rate": [],
+                    "absolute": {"maxServerMeta": 128,
+                                 "maxPersonality": 5,
+                                 "totalServerGroupsUsed": 0,
+                                 "maxImageMeta": 128,
+                                 "maxPersonalitySize": 10240,
+                                 "maxServerGroups": 10,
+                                 "maxSecurityGroupRules": 20,
+                                 "maxTotalKeypairs": 100,
+                                 "totalCoresUsed": 0,
+                                 "totalRAMUsed": 0,
+                                 "totalInstancesUsed": 0,
+                                 "maxSecurityGroups": 10,
+                                 "totalFloatingIpsUsed": 0,
+                                 "maxTotalCores": 20,
+                                 "totalSecurityGroupsUsed": 0,
+                                 "maxTotalFloatingIps": 10,
+                                 "maxTotalInstances": 10,
+                                 "maxTotalRAMSize": 51200,
+                                 "maxServerGroupMembers": 10}}
+        serialized_body = json.dumps({"limits": expected})
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_limits()
+        self.assertEqual(expected, resp)
+
+    def test_show_limits_with_str_body(self):
+        self._test_show_limits()
+
+    def test_show_limits_with_bytes_body(self):
+        self._test_show_limits(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_networks_client.py b/tempest/tests/services/compute/test_networks_client.py
new file mode 100644
index 0000000..cbeaefc
--- /dev/null
+++ b/tempest/tests/services/compute/test_networks_client.py
@@ -0,0 +1,108 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import networks_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestNetworksClient(base.TestCase):
+
+    FAKE_NETWORK = {
+        "bridge": None,
+        "vpn_public_port": None,
+        "dhcp_start": None,
+        "bridge_interface": None,
+        "share_address": None,
+        "updated_at": None,
+        "id": "34d5ae1e-5659-49cf-af80-73bccd7d7ad3",
+        "cidr_v6": None,
+        "deleted_at": None,
+        "gateway": None,
+        "rxtx_base": None,
+        "label": u'30d7',
+        "priority": None,
+        "project_id": None,
+        "vpn_private_address": None,
+        "deleted": None,
+        "vlan": None,
+        "broadcast": None,
+        "netmask": None,
+        "injected": None,
+        "cidr": None,
+        "vpn_public_address": None,
+        "multi_host": None,
+        "enable_dhcp": None,
+        "dns2": None,
+        "created_at": None,
+        "host": None,
+        "mtu": None,
+        "gateway_v6": None,
+        "netmask_v6": None,
+        "dhcp_server": None,
+        "dns1": None
+        }
+
+    network_id = "34d5ae1e-5659-49cf-af80-73bccd7d7ad3"
+
+    FAKE_NETWORKS = [FAKE_NETWORK]
+
+    def setUp(self):
+        super(TestNetworksClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = networks_client.NetworksClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_list_networks(self, bytes_body=False):
+        expected = {"networks": self.FAKE_NETWORKS}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.list_networks()
+        self.assertEqual(expected, resp)
+
+    def test_list_networks_with_str_body(self):
+        self._test_list_networks()
+
+    def test_list_networks_with_bytes_body(self):
+        self._test_list_networks(bytes_body=True)
+
+    def _test_show_network(self, bytes_body=False):
+        expected = {"network": self.FAKE_NETWORKS}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_network(self.network_id)
+        self.assertEqual(expected, resp)
+
+    def test_show_network_with_str_body(self):
+        self._test_show_network()
+
+    def test_show_network_with_bytes_body(self):
+        self._test_show_network(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_quota_classes_client.py b/tempest/tests/services/compute/test_quota_classes_client.py
new file mode 100644
index 0000000..ff9b310
--- /dev/null
+++ b/tempest/tests/services/compute/test_quota_classes_client.py
@@ -0,0 +1,81 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import quota_classes_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestQuotaClassesClient(base.TestCase):
+
+    FAKE_QUOTA_CLASS_SET = {
+        "injected_file_content_bytes": 10240,
+        "metadata_items": 128,
+        "server_group_members": 10,
+        "server_groups": 10,
+        "ram": 51200,
+        "floating_ips": 10,
+        "key_pairs": 100,
+        "id": u'\u2740(*\xb4\u25e1`*)\u2740',
+        "instances": 10,
+        "security_group_rules": 20,
+        "security_groups": 10,
+        "injected_files": 5,
+        "cores": 20,
+        "fixed_ips": -1,
+        "injected_file_path_bytes": 255,
+        }
+
+    def setUp(self):
+        super(TestQuotaClassesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = quota_classes_client.QuotaClassesClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_show_quota_class_set(self, bytes_body=False):
+        serialized_body = json.dumps({
+            "quota_class_set": self.FAKE_QUOTA_CLASS_SET})
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_quota_class_set("test")
+        self.assertEqual(self.FAKE_QUOTA_CLASS_SET, resp)
+
+    def test_show_quota_class_set_with_str_body(self):
+        self._test_show_quota_class_set()
+
+    def test_show_quota_class_set_with_bytes_body(self):
+        self._test_show_quota_class_set(bytes_body=True)
+
+    def test_update_quota_class_set(self):
+        fake_quota_class_set = copy.deepcopy(self.FAKE_QUOTA_CLASS_SET)
+        fake_quota_class_set.pop("id")
+        serialized_body = json.dumps({"quota_class_set": fake_quota_class_set})
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.put',
+            return_value=mocked_resp))
+        resp = self.client.update_quota_class_set("test")
+        self.assertEqual(fake_quota_class_set, resp)
diff --git a/tempest/tests/services/compute/test_quotas_client.py b/tempest/tests/services/compute/test_quotas_client.py
new file mode 100644
index 0000000..a9bd0a1
--- /dev/null
+++ b/tempest/tests/services/compute/test_quotas_client.py
@@ -0,0 +1,106 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import quotas_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestQuotasClient(base.TestCase):
+
+    FAKE_QUOTA_SET = {"injected_file_content_bytes": 10240,
+                      "metadata_items": 128,
+                      "server_group_members": 10,
+                      "server_groups": 10,
+                      "ram": 51200,
+                      "floating_ips": 10,
+                      "key_pairs": 100,
+                      "id": "8421f7be61064f50b680465c07f334af",
+                      "instances": 10,
+                      "security_group_rules": 20,
+                      "injected_files": 5,
+                      "cores": 20,
+                      "fixed_ips": -1,
+                      "injected_file_path_bytes": 255,
+                      "security_groups": 10}
+
+    project_id = "8421f7be61064f50b680465c07f334af"
+
+    def setUp(self):
+        super(TestQuotasClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = quotas_client.QuotasClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_show_quota_set(self, bytes_body=False):
+        serialized_body = json.dumps({"quota_set": self.FAKE_QUOTA_SET})
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_quota_set(self.project_id)
+        self.assertEqual(self.FAKE_QUOTA_SET, resp)
+
+    def test_show_quota_set_with_str_body(self):
+        self._test_show_quota_set()
+
+    def test_show_quota_set_with_bytes_body(self):
+        self._test_show_quota_set(bytes_body=True)
+
+    def _test_show_default_quota_set(self, bytes_body=False):
+        serialized_body = json.dumps({"quota_set": self.FAKE_QUOTA_SET})
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_default_quota_set(self.project_id)
+        self.assertEqual(self.FAKE_QUOTA_SET, resp)
+
+    def test_show_default_quota_set_with_str_body(self):
+        self._test_show_quota_set()
+
+    def test_show_default_quota_set_with_bytes_body(self):
+        self._test_show_quota_set(bytes_body=True)
+
+    def test_update_quota_set(self):
+        fake_quota_set = copy.deepcopy(self.FAKE_QUOTA_SET)
+        fake_quota_set.pop("id")
+        serialized_body = json.dumps({"quota_set": fake_quota_set})
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.put',
+            return_value=mocked_resp))
+        resp = self.client.update_quota_set(self.project_id)
+        self.assertEqual(fake_quota_set, resp)
+
+    def test_delete_quota_set(self):
+        expected = {}
+        mocked_resp = (httplib2.Response({'status': 202}), None)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.delete',
+            return_value=mocked_resp))
+        resp = self.client.delete_quota_set(self.project_id)
+        self.assertEqual(expected, resp)
diff --git a/tempest/tests/services/compute/test_services_client.py b/tempest/tests/services/compute/test_services_client.py
new file mode 100644
index 0000000..7d87711
--- /dev/null
+++ b/tempest/tests/services/compute/test_services_client.py
@@ -0,0 +1,108 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import services_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestServicesClient(base.TestCase):
+
+    FAKE_SERVICES = [{
+        "status": "enabled",
+        "binary": "nova-conductor",
+        "zone": "internal",
+        "state": "up",
+        "updated_at": "2015-08-19T06:50:55.000000",
+        "host": "controller",
+        "disabled_reason": None,
+        "id": 1
+        }]
+
+    FAKE_SERVICE = {
+        "status": "enabled",
+        "binary": "nova-conductor",
+        "host": "controller"
+        }
+
+    def setUp(self):
+        super(TestServicesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = services_client.ServicesClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_list_services(self, bytes_body=False):
+        expected = {"services": self.FAKE_SERVICES}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.list_services()
+        self.assertEqual(expected, resp)
+
+    def test_list_services_with_str_body(self):
+        self._test_list_services()
+
+    def test_list_services_with_bytes_body(self):
+        self._test_list_services(bytes_body=True)
+
+    def _test_enable_service(self, bytes_body=False):
+        expected = {"service": self.FAKE_SERVICE}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.put',
+            return_value=mocked_resp))
+        resp = self.client.enable_service("nova-conductor", "controller")
+        self.assertEqual(expected, resp)
+
+    def test_enable_service_with_str_body(self):
+        self._test_enable_service()
+
+    def test_enable_service_with_bytes_body(self):
+        self._test_enable_service(bytes_body=True)
+
+    def _test_disable_service(self, bytes_body=False):
+        fake_service = copy.deepcopy(self.FAKE_SERVICE)
+        fake_service["status"] = "disable"
+        expected = {"service": self.FAKE_SERVICE}
+        serialized_body = json.dumps(expected)
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.put',
+            return_value=mocked_resp))
+        resp = self.client.disable_service("nova-conductor", "controller")
+        self.assertEqual(expected, resp)
+
+    def test_disable_service_with_str_body(self):
+        self._test_enable_service()
+
+    def test_disable_service_with_bytes_body(self):
+        self._test_enable_service(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_tenant_usages_client.py b/tempest/tests/services/compute/test_tenant_usages_client.py
new file mode 100644
index 0000000..f6bcf5f
--- /dev/null
+++ b/tempest/tests/services/compute/test_tenant_usages_client.py
@@ -0,0 +1,95 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import tenant_usages_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestTenantUsagesClient(base.TestCase):
+
+    FAKE_SERVER_USAGES = [{
+        "ended_at": None,
+        "flavor": "m1.tiny",
+        "hours": 1.0,
+        "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8fe0",
+        "local_gb": 1,
+        "memory_mb": 512,
+        "name": "new-server-test",
+        "started_at": "2012-10-08T20:10:44.541277",
+        "state": "active",
+        "tenant_id": "openstack",
+        "uptime": 3600,
+        "vcpus": 1
+        }]
+
+    FAKE_TENANT_USAGES = [{
+        "server_usages": FAKE_SERVER_USAGES,
+        "start": "2012-10-08T21:10:44.587336",
+        "stop": "2012-10-08T22:10:44.587336",
+        "tenant_id": "openstack",
+        "total_hours": 1,
+        "total_local_gb_usage": 1,
+        "total_memory_mb_usage": 512,
+        "total_vcpus_usage": 1
+        }]
+
+    def setUp(self):
+        super(TestTenantUsagesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = tenant_usages_client.TenantUsagesClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_list_tenant_usages(self, bytes_body=False):
+        serialized_body = json.dumps({"tenant_usages":
+                                      self.FAKE_TENANT_USAGES})
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.list_tenant_usages()
+        self.assertEqual({"tenant_usages": self.FAKE_TENANT_USAGES}, resp)
+
+    def test_list_tenant_usages_with_str_body(self):
+        self._test_list_tenant_usages()
+
+    def test_list_tenant_usages_with_bytes_body(self):
+        self._test_list_tenant_usages(bytes_body=True)
+
+    def _test_show_tenant_usage(self, bytes_body=False):
+        serialized_body = json.dumps({"tenant_usage":
+                                      self.FAKE_TENANT_USAGES[0]})
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_tenant_usage('openstack')
+        self.assertEqual({"tenant_usage": self.FAKE_TENANT_USAGES[0]}, resp)
+
+    def test_show_tenant_usage_with_str_body(self):
+        self._test_show_tenant_usage()
+
+    def test_show_tenant_usage_with_bytes_body(self):
+        self._test_show_tenant_usage(bytes_body=True)
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 1ff4dee..55b8b12 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -303,7 +303,7 @@
     @classmethod
     def get_lfunction_gone(cls, obj):
         """If the object is instance of a well know type returns back with
-            with the correspoding function otherwise it assumes the obj itself
+            with the corresponding function otherwise it assumes the obj itself
             is the function.
             """
         ec = cls.ec2_error_code
@@ -467,7 +467,7 @@
                         client.InvalidInstanceID.NotFound.match(exc) is None:
                     return "_GONE"
                 # NOTE(afazekas): incorrect code,
-                # but the resource must be destoreyd
+                # but the resource must be destroyed
                 if exc.error_code == "InstanceNotFound":
                     return "_GONE"
 
@@ -505,7 +505,7 @@
             LOG.critical("%s Volume has %s snapshot(s)", volume.id,
                          map(snaps.id, snaps))
 
-        # NOTE(afazekas): detaching/attching not valid EC2 status
+        # NOTE(afazekas): detaching/attaching not valid EC2 status
         def _volume_state():
             volume.update(validate=True)
             try:
diff --git a/tempest/thirdparty/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
index 88ff154..594dc8b 100644
--- a/tempest/thirdparty/boto/test_ec2_security_groups.py
+++ b/tempest/thirdparty/boto/test_ec2_security_groups.py
@@ -68,5 +68,5 @@
 
         group_get = self.client.get_all_security_groups(
             groupnames=(group_name,))[0]
-        # all rules shuld be removed now
+        # all rules should be removed now
         self.assertEqual(0, len(group_get.rules))
diff --git a/test-requirements.txt b/test-requirements.txt
index 8fcf071..db2b2ce 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,7 +7,6 @@
 python-subunit>=0.0.18
 oslosphinx>=2.5.0 # Apache-2.0
 mox>=0.5.3
-mock>=1.1;python_version!='2.6'
-mock==1.0.1;python_version=='2.6'
+mock>=1.2
 coverage>=3.6
-oslotest>=1.7.0 # Apache-2.0
+oslotest>=1.10.0 # Apache-2.0
diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh
deleted file mode 100755
index 7b08695..0000000
--- a/tools/config/check_uptodate.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env bash
-
-PROJECT_NAME=${PROJECT_NAME:-tempest}
-CFGFILE_NAME=${PROJECT_NAME}.conf.sample
-
-if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
-    CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
-elif [ -e etc/${CFGFILE_NAME} ]; then
-    CFGFILE=etc/${CFGFILE_NAME}
-else
-    echo "${0##*/}: can not find config file"
-    exit 1
-fi
-
-TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
-trap "rm -rf $TEMPDIR" EXIT
-
-oslo-config-generator --config-file tools/config/config-generator.tempest.conf --output-file ${TEMPDIR}/${CFGFILE_NAME}
-if [ $? != 0 ]
-then
-    exit 1
-fi
-
-if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
-then
-   echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
-   echo "${0##*/}: Please run tox -egenconfig."
-   exit 1
-fi
diff --git a/tox.ini b/tox.ini
index cf7013d..15652e8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -40,6 +40,16 @@
   find . -type f -name "*.pyc" -delete
   bash tools/pretty_tox.sh '{posargs}'
 
+[testenv:all-plugin]
+sitepackages = True
+# 'all' includes slow tests
+setenv = {[tempestenv]setenv}
+         OS_TEST_TIMEOUT=1200
+deps = {[tempestenv]deps}
+commands =
+  find . -type f -name "*.pyc" -delete
+  bash tools/pretty_tox.sh '{posargs}'
+
 [testenv:full]
 sitepackages = {[tempestenv]sitepackages}
 setenv = {[tempestenv]setenv}
@@ -98,12 +108,13 @@
 commands = {posargs}
 
 [testenv:docs]
-commands = python setup.py build_sphinx {posargs}
+# The sample config file we generate is included in the sphinxdoc, so build that first.
+commands =
+   python setup.py build_sphinx {posargs}
 
 [testenv:pep8]
 commands =
    flake8 {posargs}
-   {toxinidir}/tools/config/check_uptodate.sh
    python tools/check_uuid.py
 
 [testenv:uuidgen]