Merge "Fixing test_create_port_in_allowed_allocation_pool when 3 (or more) controllers"
diff --git a/.coveragerc b/.coveragerc
index c9b6467..51482d3 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,4 +1,4 @@
 [run]
 branch = True
 source = tempest
-omit = tempest/tests/*,tempest/openstack/*
+omit = tempest/tests/*,tempest/scenario/test_*.py,tempest/api_schema/*,tempest/api/*
diff --git a/.gitignore b/.gitignore
index efba45e..d58b162 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,6 +16,9 @@
 dist
 build
 .testrepository
+.idea
+.project
+.pydevproject
 .coverage*
 !.coveragerc
 cover/
diff --git a/.mailmap b/.mailmap
index 5c37a5e..a43c0b9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1,9 +1,25 @@
-Ravikumar Venkatesan <ravikumar.venkatesan@hp.com> ravikumar-venkatesan <ravikumar.venkatesan@hp.com>
-Ravikumar Venkatesan <ravikumar.venkatesan@hp.com> ravikumar venkatesan <ravikumar.venkatesan@hp.com>
-Rohit Karajgi <rohit.karajgi@nttdata.com> Rohit Karajgi <rohit.karajgi@vertex.co.in>
-Jay Pipes <jaypipes@gmail.com> Jay Pipes <jpipes@librebox.gateway.2wire.net>
-Joe Gordon <joe.gordon0@gmail.com> <jogo@cloudscaling.com>
 <brian.waldon@rackspace.com> <bcwaldon@gmail.com>
-Daryl Walleck <daryl.walleck@rackspace.com> dwalleck <daryl.walleck@rackspace.com>
 <jeblair@hp.com> <corvus@inaugust.com>
 <jeblair@hp.com> <james.blair@rackspace.com>
+Adam Gandelman <adamg@ubuntu.com> Adam Gandelman <adamg@canonical.com>
+Andrea Frittoli (andreaf) <andrea.frittoli@hpe.com> Andrea Frittoli (andreaf) <andrea.frittoli@hp.com>
+Andrea Frittoli (andreaf) <andrea.frittoli@hpe.com> Andrea Frittoli <andrea.frittoli@hp.com>
+Daryl Walleck <daryl.walleck@rackspace.com> dwalleck <daryl.walleck@rackspace.com>
+David Kranz <dkranz@redhat.com> David Kranz <david.kranz@qrclab.com>
+Ghanshyam <ghanshyam.mann@nectechnologies.in> Ghanshyam Mann <ghanshyam.mann@nectechnologies.in>
+Ghanshyam <ghanshyam.mann@nectechnologies.in> ghanshyam <ghanshyam.mann@nectechnologies.in>
+Jay Pipes <jaypipes@gmail.com> Jay Pipes <jpipes@librebox.gateway.2wire.net>
+Joe Gordon <joe.gordon0@gmail.com> <jogo@cloudscaling.com>
+Ken'ichi Ohmichi <ken-oomichi@wx.jp.nec.com> Ken'ichi Ohmichi <oomichi@mxs.nes.nec.co.jp>
+Marc Koderer <marc@koderer.com> Marc Koderer <m.koderer@telekom.de>
+Masayuki Igawa <masayuki.igawa@gmail.com> Masayuki Igawa <igawa@mxs.nes.nec.co.jp>
+Masayuki Igawa <masayuki.igawa@gmail.com> Masayuki Igawa <mas-igawa@ut.jp.nec.com>
+Matthew Treinish <mtreinish@kortar.org> Matthew Treinish <treinish@linux.vnet.ibm.com>
+Nayna Patel <nayna.patel@hp.com> nayna-patel <nayna.patel@hp.com>
+ravikumar-venkatesan <ravikumar.venkatesan@hp.com> Ravikumar Venkatesan <ravikumar.venkatesan@hp.com>
+ravikumar-venkatesan <ravikumar.venkatesan@hp.com> ravikumar venkatesan <ravikumar.venkatesan@hp.com>
+Rohit Karajgi <rohit.karajgi@nttdata.com> Rohit Karajgi <rohit.karajgi@vertex.co.in>
+Sean Dague <sean@dague.net> Sean Dague <sdague@linux.vnet.ibm.com>
+Sean Dague <sean@dague.net> Sean Dague <sean.dague@samsung.com>
+Yuiko Takada <takada-yuiko@mxn.nes.nec.co.jp> YuikoTakada <takada-yuiko@mxn.nes.nec.co.jp>
+Zhi Kun Liu <zhikunli@cn.ibm.com> Liu, Zhi Kun <zhikunli@cn.ibm.com>
diff --git a/HACKING.rst b/HACKING.rst
index 3799046..0962f80 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -17,6 +17,7 @@
 - [T108] Check no hyphen at the end of rand_name() argument
 - [T109] Cannot use testtools.skip decorator; instead use
          decorators.skip_because from tempest-lib
+- [T110] Check that service client names of GET should be consistent
 - [N322] Method's default argument shouldn't be mutable
 
 Test Data/Configuration
@@ -329,24 +330,25 @@
         # The created server should be in the detailed list of all servers
         ...
 
-Tempest includes a ``check_uuid.py`` tool that will test for the existence
-and uniqueness of idempotent_id metadata for every test. By default the
-tool runs against the Tempest package by calling::
+Tempest-lib includes a ``check-uuid`` tool that will test for the existence
+and uniqueness of idempotent_id metadata for every test. If you have
+tempest-lib installed you run the tool against Tempest by calling from the
+tempest repo::
 
-    python check_uuid.py
+    check-uuid
 
 It can be invoked against any test suite by passing a package name::
 
-    python check_uuid.py --package <package_name>
+    check-uuid --package <package_name>
 
 Tests without an ``idempotent_id`` can be automatically fixed by running
 the command with the ``--fix`` flag, which will modify the source package
 by inserting randomly generated uuids for every test that does not have
 one::
 
-    python check_uuid.py --fix
+    check-uuid --fix
 
-The ``check_uuid.py`` tool is used as part of the tempest gate job
+The ``check-uuid`` tool is used as part of the tempest gate job
 to ensure that all tests have an ``idempotent_id`` decorator.
 
 Branchless Tempest Considerations
diff --git a/README.rst b/README.rst
index bf513bd..71e185f 100644
--- a/README.rst
+++ b/README.rst
@@ -22,8 +22,8 @@
 - Tempest should not touch private or implementation specific
   interfaces. This means not directly going to the database, not
   directly hitting the hypervisors, not testing extensions not
-  included in the OpenStack base. If there is some feature of
-  OpenStack that is not verifiable through standard interfaces, this
+  included in the OpenStack base. If there are some features of
+  OpenStack that are not verifiable through standard interfaces, this
   should be considered a possible enhancement.
 - Tempest strives for complete coverage of the OpenStack API and
   common scenarios that demonstrate a working cloud.
@@ -47,10 +47,11 @@
 assumptions related to that. For this section we'll only cover the newer method
 as it is simpler, and quicker to work with.
 
-#. You first need to install Tempest this is done with pip, after you check out
-   the Tempest repo you simply run something like::
+#. You first need to install Tempest. This is done with pip after you check out
+   the Tempest repo::
 
-    $ pip install tempest
+    $ git clone https://github.com/openstack/tempest/
+    $ pip install tempest/
 
    This can be done within a venv, but the assumption for this guide is that
    the Tempest cli entry point will be in your shell's PATH.
@@ -86,7 +87,7 @@
    be done with testr directly or any `testr`_ based test runner, like
    `ostestr`_. For example, from the working dir running::
 
-     $ ostestr --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty))'
+     $ ostestr --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario))'
 
    will run the same set of tests as the default gate jobs.
 
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 12d1d40..7e4503d 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -127,8 +127,11 @@
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
-html_last_updated_fmt = os.popen(git_cmd).read()
+git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
+   "-n1"]
+html_last_updated_fmt = subprocess.Popen(git_cmd,
+                                         stdout=subprocess.PIPE).\
+                                         communicate()[0]
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index f228040..e428592 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -3,102 +3,88 @@
 Tempest Configuration Guide
 ===========================
 
-This guide is a starting point for configuring tempest. It aims to elaborate
+This guide is a starting point for configuring Tempest. It aims to elaborate
 on and explain some of the mandatory and common configuration settings and how
 they are used in conjunction. The source of truth on each option is the sample
 config file which explains the purpose of each individual option. You can see
 the sample config file here: :ref:`tempest-sampleconf`
 
-Lock Path
----------
-
-There are some tests and operations inside of tempest that need to be
-externally locked when running in parallel to prevent them from running at
-the same time. This is a mandatory step for configuring tempest and is still
-needed even when running serially. All that is needed to do this is:
-
- #. Set the lock_path option in the oslo_concurrency group
-
 Auth/Credentials
 ----------------
 
-Tempest currently has 2 different ways in configuration to provide credentials
-to use when running tempest. One is a traditional set of configuration options
-in the tempest.conf file. These options are in the identity section and let you
-specify a regular user, a global admin user, and an alternate user set of
-credentials. (which consist of a username, password, and project/tenant name)
-These options should be clearly labelled in the sample config file in the
-identity section.
+Tempest currently has two different ways in configuration to provide credentials
+to use when running Tempest. One is a traditional set of configuration options
+in the tempest.conf file. These options are clearly labelled in the ``identity``
+section and let you specify a set of credentials for a regular user, a global
+admin user, and an alternate user, consisting of a username, password, and
+project/tenant name.
 
 The other method to provide credentials is using the accounts.yaml file. This
 file is used to specify an arbitrary number of users available to run tests
-with. You can specify the location of the file in the
-auth section in the tempest.conf file. To see the specific format used in
-the file please refer to the accounts.yaml.sample file included in tempest.
-Currently users that are specified in the accounts.yaml file are assumed to
-have the same set of roles which can be used for executing all the tests you
-are running. This will be addressed in the future, but is a current limitation.
-Eventually the config options for providing credentials to tempest will be
-deprecated and removed in favor of the accounts.yaml file.
+with. You can specify the location of the file in the ``auth`` section in the
+tempest.conf file. To see the specific format used in the file please refer to
+the accounts.yaml.sample file included in Tempest.  Eventually the config
+options for providing credentials to Tempest will be deprecated and removed in
+favor of the accounts.yaml file.
 
 Keystone Connection Info
 ^^^^^^^^^^^^^^^^^^^^^^^^
-In order for tempest to be able to talk to your OpenStack deployment you need
+In order for Tempest to be able to talk to your OpenStack deployment you need
 to provide it with information about how it communicates with keystone.
-This involves configuring the following options in the identity section:
+This involves configuring the following options in the ``identity`` section:
 
- #. auth_version
- #. uri
- #. uri_v3
+ #. ``auth_version``
+ #. ``uri``
+ #. ``uri_v3``
 
-The *auth_version* option is used to tell tempest whether it should be using
+The ``auth_version`` option is used to tell Tempest whether it should be using
 keystone's v2 or v3 api for communicating with keystone. (except for the
-identity api tests which will test a specific version) The 2 uri options are
-used to tell tempest the url of the keystone endpoint. The *uri* option is used
-for keystone v2 request and *uri_v3* is used for keystone v3. You want to ensure
-that which ever version you set for *auth_version* has its uri option defined.
+identity api tests which will test a specific version) The two uri options are
+used to tell Tempest the url of the keystone endpoint. The ``uri`` option is
+used for keystone v2 request and ``uri_v3`` is used for keystone v3. You want to
+ensure that which ever version you set for ``auth_version`` has its uri option
+defined.
 
 
 Credential Provider Mechanisms
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Tempest currently also has 3 different internal methods for providing
-authentication to tests. Dynamic credentials, locking test accounts, and
+Tempest currently also has three different internal methods for providing
+authentication to tests: dynamic credentials, locking test accounts, and
 non-locking test accounts. Depending on which one is in use the configuration
-of tempest is slightly different.
+of Tempest is slightly different.
 
 Dynamic Credentials
 """""""""""""""""""
 Dynamic Credentials (formerly known as Tenant isolation) was originally created
-to enable running tempest in parallel.
-For each test class it creates a unique set of user credentials to use for the
-tests in the class. It can create up to 3 sets of username, password, and
-tenant/project names for a primary user, an admin user, and an alternate user.
-To enable and use dynamic credentials you only need to configure 2 things:
+to enable running Tempest in parallel.  For each test class it creates a unique
+set of user credentials to use for the tests in the class. It can create up to
+three sets of username, password, and tenant/project names for a primary user,
+an admin user, and an alternate user.  To enable and use dynamic credentials you
+only need to configure two things:
 
  #. A set of admin credentials with permissions to create users and
-    tenants/projects. This is specified in the auth section with the
-    admin_username, admin_tenant_name, admin_domain_name and admin_password
-    options
- #. To enable dynamic_creds in the auth section with the
-    use_dynamic_credentials option.
+    tenants/projects. This is specified in the ``auth`` section with the
+    ``admin_username``, ``admin_tenant_name``, ``admin_domain_name`` and
+    ``admin_password`` options
+ #. To enable dynamic credentials in the ``auth`` section with the
+    ``use_dynamic_credentials`` option.
 
-This is also the currently the default credential provider enabled by tempest,
-due to it's common use and ease of configuration.
+This is also currently the default credential provider enabled by Tempest, due
+to its common use and ease of configuration.
 
 It is worth pointing out that depending on your cloud configuration you might
 need to assign a role to each of the users created by Tempest's dynamic
-credentials.
-This can be set using the *tempest_roles* option. It takes in a list of role
-names each of which will be assigned to each of the users created by dynamic
-credentials. This option will not have any effect when set and tempest is not
+credentials.  This can be set using the ``tempest_roles`` option. It takes in a
+list of role names each of which will be assigned to each of the users created
+by dynamic credentials. This option will not have any effect when Tempest is not
 configured to use dynamic credentials.
 
 
-Locking Test Accounts (aka accounts.yaml or accounts file)
-""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+Pre-Provisioned Credentials (aka accounts.yaml or accounts file)
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
 For a long time using dynamic credentials was the only method available if you
-wanted to enable parallel execution of tempest tests. However this was
+wanted to enable parallel execution of Tempest tests. However, this was
 insufficient for certain use cases because of the admin credentials requirement
 to create the credential sets on demand. To get around that the accounts.yaml
 file was introduced and with that a new internal credential provider to enable
@@ -109,17 +95,21 @@
 
 To enable and use locking test accounts you need do a few things:
 
- #. Create a accounts.yaml file which contains the set of pre-existing
+ #. Create an accounts.yaml file which contains the set of pre-existing
     credentials to use for testing. To make sure you don't have a credentials
-    starvation issue when running in parallel make sure you have at least 2
-    times the number of worker processes you are using to execute tempest
-    available in the file. (if running serially the worker count is 1)
+    starvation issue when running in parallel make sure you have at least two
+    times the number of worker processes you are using to execute Tempest
+    available in the file. (If running serially the worker count is 1.)
 
-    You can check the sample file packaged in tempest for the yaml format
- #. Provide tempest with the location of your accounts.yaml file with the
-    test_accounts_file option in the auth section
+    You can check the accounts.yaml.sample file packaged in Tempest for the yaml
+    format.
+ #. Provide Tempest with the location of your accounts.yaml file with the
+    ``test_accounts_file`` option in the ``auth`` section
 
- #. Set use_dynamic_credentials = False in the auth group
+    *NOTE: Be sure to use a full path for the file; otherwise Tempest will
+    likely not find it.*
+
+ #. Set ``use_dynamic_credentials = False`` in the ``auth`` group
 
 It is worth pointing out that each set of credentials in the accounts.yaml
 should have a unique tenant. This is required to provide proper isolation
@@ -127,40 +117,40 @@
 unexpected failures in some tests.
 
 
-Non-locking test accounts (aka credentials config options)
-""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
-**Starting in the Liberty release this mechanism was deprecated and will be
-removed in a future release**
+Legacy Credentials (aka credentials config options)
+"""""""""""""""""""""""""""""""""""""""""""""""""""
+**Starting in the Liberty release this mechanism was deprecated; it will be
+removed in a future release.**
 
 When Tempest was refactored to allow for locking test accounts, the original
 non-tenant isolated case was converted to internally work similarly to the
-accounts.yaml file. This mechanism was then called the non-locking test accounts
-provider. To use the non-locking test accounts provider you can specify the sets
-of credentials in the configuration file like detailed above with following 9
-options in the identity section:
+accounts.yaml file. This mechanism was then called the legacy test accounts
+provider. To use the legacy test accounts provider you can specify the sets of
+credentials in the configuration file as detailed above with following nine
+options in the ``identity`` section:
 
- #. username
- #. password
- #. tenant_name
- #. admin_username
- #. admin_password
- #. admin_tenant_name
- #. alt_username
- #. alt_password
- #. alt_tenant_name
+ #. ``username``
+ #. ``password``
+ #. ``tenant_name``
+ #. ``admin_username``
+ #. ``admin_password``
+ #. ``admin_tenant_name``
+ #. ``alt_username``
+ #. ``alt_password``
+ #. ``alt_tenant_name``
 
-And in the auth section:
+And in the ``auth`` section:
 
- #. use_dynamic_credentials = False
- #. comment out 'test_accounts_file' or keep it as empty
+ #. ``use_dynamic_credentials = False``
+ #. Comment out ``test_accounts_file`` or keep it empty.
 
-It only makes sense to use it if parallel execution isn't needed, since tempest
-won't be able to properly isolate tests using this. Additionally, using the
-traditional config options for credentials is not able to provide credentials to
-tests which requires specific roles on accounts. This is because the config
-options do not give sufficient flexibility to describe the roles assigned to a
-user for running the tests. There are additional limitations with regard to
-network configuration when using this credential provider mechanism, see the
+It only makes sense to use this if parallel execution isn't needed, since
+Tempest won't be able to properly isolate tests using this. Additionally, using
+the traditional config options for credentials is not able to provide
+credentials to tests requiring specific roles on accounts. This is because the
+config options do not give sufficient flexibility to describe the roles assigned
+to a user for running the tests. There are additional limitations with regard to
+network configuration when using this credential provider mechanism - see the
 `Networking`_ section below.
 
 Compute
@@ -168,63 +158,64 @@
 
 Flavors
 ^^^^^^^
-For tempest to be able to create servers you need to specify flavors that it
-can use to boot the servers with. There are 2 options in the tempest config
+For Tempest to be able to create servers you need to specify flavors that it
+can use to boot the servers with. There are two options in the Tempest config
 for doing this:
 
- #. flavor_ref
- #. flavor_ref_alt
+ #. ``flavor_ref``
+ #. ``flavor_ref_alt``
 
-Both of these options are in the compute section of the config file and take
-in the flavor id (not the name) from nova. The *flavor_ref* option is what will
-be used for booting almost all of the guests, *flavor_ref_alt* is only used in
-tests where 2 different sized servers are required. (for example a resize test)
+Both of these options are in the ``compute`` section of the config file and take
+in the flavor id (not the name) from nova. The ``flavor_ref`` option is what
+will be used for booting almost all of the guests; ``flavor_ref_alt`` is only
+used in tests where two different-sized servers are required (for example, a
+resize test).
 
-Using a smaller flavor is generally recommended, when larger flavors are used
+Using a smaller flavor is generally recommended. When larger flavors are used,
 the extra time required to bring up servers will likely affect total run time
 and probably require tweaking timeout values to ensure tests have ample time to
 finish.
 
 Images
 ^^^^^^
-Just like with flavors, tempest needs to know which images to use for booting
-servers. There are 2 options in the compute section just like with flavors:
+Just like with flavors, Tempest needs to know which images to use for booting
+servers. There are two options in the compute section just like with flavors:
 
- #. image_ref
- #. image_ref_alt
+ #. ``image_ref``
+ #. ``image_ref_alt``
 
-Both options are expecting an image id (not name) from nova. The *image_ref*
-option is what will be used for booting the majority of servers in tempest.
-*image_ref_alt* is used for tests that require 2 images such as rebuild. If 2
-images are not available you can set both options to the same image_ref and
+Both options are expecting an image id (not name) from nova. The ``image_ref``
+option is what will be used for booting the majority of servers in Tempest.
+``image_ref_alt`` is used for tests that require two images such as rebuild. If
+two images are not available you can set both options to the same image id and
 those tests will be skipped.
 
-There are also options in the scenario section for images:
+There are also options in the ``scenario`` section for images:
 
- #. img_file
- #. img_dir
- #. aki_img_file
- #. ari_img_file
- #. ami_img_file
- #. img_container_format
- #. img_disk_format
+ #. ``img_file``
+ #. ``img_dir``
+ #. ``aki_img_file``
+ #. ``ari_img_file``
+ #. ``ami_img_file``
+ #. ``img_container_format``
+ #. ``img_disk_format``
 
-however unlike the other image options these are used for a very small subset
+However, unlike the other image options, these are used for a very small subset
 of scenario tests which are uploading an image. These options are used to tell
-tempest where an image file is located and describe it's metadata for when it's
+Tempest where an image file is located and describe its metadata for when it is
 uploaded.
 
-The behavior of these options is a bit convoluted (which will likely be fixed
-in future versions). You first need to specify *img_dir*, which is the directory
-tempest will look for the image files in. First it will check if the filename
-set for *img_file* could be found in *img_dir*. If it is found then the
-*img_container_format* and *img_disk_format* options are used to upload that
-image to glance. However if it's not found tempest will look for the 3 uec image
-file name options as a fallback. If neither is found the tests requiring an
-image to upload will fail.
+The behavior of these options is a bit convoluted (which will likely be fixed in
+future versions). You first need to specify ``img_dir``, which is the directory
+in which Tempest will look for the image files. First it will check if the
+filename set for ``img_file`` could be found in ``img_dir``. If it is found then
+the ``img_container_format`` and ``img_disk_format`` options are used to upload
+that image to glance. However, if it is not found, Tempest will look for the
+three uec image file name options as a fallback. If neither is found, the tests
+requiring an image to upload will fail.
 
 It is worth pointing out that using `cirros`_ is a very good choice for running
-tempest. It's what is used for upstream testing, they boot quickly and have a
+Tempest. It's what is used for upstream testing, they boot quickly and have a
 small footprint.
 
 .. _cirros: https://launchpad.net/cirros
@@ -232,9 +223,9 @@
 Networking
 ----------
 OpenStack has a myriad of different networking configurations possible and
-depending on which of the 2 network backends, nova-network or neutron, you are
+depending on which of the two network backends, nova-network or neutron, you are
 using things can vary drastically. Due to this complexity Tempest has to provide
-a certain level of flexibility in it's configuration to ensure it will work
+a certain level of flexibility in its configuration to ensure it will work
 against any cloud. This ends up causing a large number of permutations in
 Tempest's config around network configuration.
 
@@ -246,7 +237,7 @@
 for doing this can be different. In certain configurations it is required to
 specify a single network with server create calls. Accordingly, Tempest provides
 a few different methods for providing this information in configuration to try
-and ensure that regardless of the clouds configuration it'll still be able to
+and ensure that regardless of the cloud's configuration it'll still be able to
 run. This section covers the different methods of configuring Tempest to provide
 a network when creating servers.
 
@@ -255,17 +246,17 @@
 This is the simplest method of specifying how networks should be used. You can
 just specify a single network name/label to use for all server creations. The
 limitation with this is that all tenants/projects and users must be able to see
-that network name/label if they were to perform a network list and be able to
-use it.
+that network name/label if they are to perform a network list and be able to use
+it.
 
 If no network name is assigned in the config file and none of the below
 alternatives are used, then Tempest will not specify a network on server
 creations, which depending on the cloud configuration might prevent them from
 booting.
 
-To set a fixed network name simply do:
+To set a fixed network name simply:
 
- #. Set the fixed_network_name option in the compute group
+ #. Set the ``fixed_network_name`` option in the ``compute`` group
 
 In the case that the configured fixed network name can not be found by a user
 network list call, it will be treated like one was not provided except that a
@@ -279,8 +270,8 @@
 server creations on a per tenant/project and user pair basis. This provides
 the necessary flexibility to work with more intricate networking configurations
 by enabling the user to specify exactly which network to use for which
-tenants/projects. You can refer to the accounts.yaml sample file included in
-the tempest repo for the syntax around specifying networks in the file.
+tenants/projects. You can refer to the accounts.yaml.sample file included in
+the Tempest repo for the syntax around specifying networks in the file.
 
 However, specifying a network is not required when using an accounts file. If
 one is not specified you can use a fixed network name to specify the network to
@@ -299,29 +290,29 @@
 
 With Dynamic Credentials
 """"""""""""""""""""""""
-With dynamic credentials enabled and using nova-network then nothing changes.
-Your only option for configuration is to either set a fixed network name or not.
-However, in most cases it shouldn't matter because nova-network should have no
-problem booting a server with multiple networks. If this is not the case for
-your cloud then using an accounts file is recommended because it provides the
-necessary flexibility to describe your configuration. Dynamic credentials is not
-able to dynamically allocate things as necessary if neutron is not enabled.
+With dynamic credentials enabled and using nova-network, your only option for
+configuration is to either set a fixed network name or not.  However, in most
+cases it shouldn't matter because nova-network should have no problem booting a
+server with multiple networks. If this is not the case for your cloud then using
+an accounts file is recommended because it provides the necessary flexibility to
+describe your configuration. Dynamic credentials is not able to dynamically
+allocate things as necessary if neutron is not enabled.
 
 With neutron and dynamic credentials enabled there should not be any additional
 configuration necessary to enable Tempest to create servers with working
-networking, assuming you have properly configured the network section to work
-for your cloud. Tempest will dynamically create the neutron resources necessary
-to enable using servers with that network. Also, just as with the accounts
-file, if you specify a fixed network name while using neutron and dynamic
-credentials it will enable running tests which require a static network and it
-will additionally be used as a fallback for server creation. However, unlike
-accounts.yaml this should never be triggered.
+networking, assuming you have properly configured the ``network`` section to
+work for your cloud. Tempest will dynamically create the neutron resources
+necessary to enable using servers with that network. Also, just as with the
+accounts file, if you specify a fixed network name while using neutron and
+dynamic credentials it will enable running tests which require a static network
+and it will additionally be used as a fallback for server creation. However,
+unlike accounts.yaml this should never be triggered.
 
-However, there is an option *create_isolated_networks* to disable dynamic
-credentials's automatic provisioning of network resources. If this option is
-used you will have to either rely on there only being a single/default network
-available for the server creation, or use *fixed_network_name* to inform
-Tempest which network to use.
+However, there is an option ``create_isolated_networks`` to disable dynamic
+credentials's automatic provisioning of network resources. If this option is set
+to False you will have to either rely on there only being a single/default
+network available for the server creation, or use ``fixed_network_name`` to
+inform Tempest which network to use.
 
 Configuring Available Services
 ------------------------------
@@ -332,7 +323,7 @@
 out which tests it is able to run and certain setup steps which differ based
 on the available services.
 
-The *service_available* section of the config file is used to set which
+The ``service_available`` section of the config file is used to set which
 services are available. It contains a boolean option for each service (except
 for keystone which is a hard requirement) set it to True if the service is
 available or False if it is not.
@@ -341,45 +332,56 @@
 ^^^^^^^^^^^^^^^
 Each project which has its own REST API contains an entry in the service
 catalog. Like most things in OpenStack this is also completely configurable.
-However, for tempest to be able to figure out the endpoints to send REST API
-calls for each service to it needs to know how that project is defined in the
-service catalog. There are 3 options for each service section to accomplish
+However, for Tempest to be able to figure out which endpoints should get REST
+API calls for each service, it needs to know how that project is defined in the
+service catalog. There are three options for each service section to accomplish
 this:
 
- #. catalog_type
- #. endpoint_type
- #. region
+ #. ``catalog_type``
+ #. ``endpoint_type``
+ #. ``region``
 
-Setting *catalog_type* and *endpoint_type* should normally give Tempest enough
-information to determine which endpoint it should pull from the service
-catalog to use for talking to that particular service. However, if you're cloud
-has multiple regions available and you need to specify a particular one to use
-a service you can set the *region* option in that service's section.
+Setting ``catalog_type`` and ``endpoint_type`` should normally give Tempest
+enough information to determine which endpoint it should pull from the service
+catalog to use for talking to that particular service. However, if your cloud
+has multiple regions available and you need to specify a particular one to use a
+service you can set the ``region`` option in that service's section.
 
 It should also be noted that the default values for these options are set
-to what devstack uses. (which is a de facto standard for service catalog
-entries) So often nothing actually needs to be set on these options to enable
+to what devstack uses (which is a de facto standard for service catalog
+entries). So often nothing actually needs to be set on these options to enable
 communication to a particular service. It is only if you are either not using
-the same *catalog_type* as devstack or you want Tempest to talk to a different
+the same ``catalog_type`` as devstack or you want Tempest to talk to a different
 endpoint type instead of publicURL for a service that these need to be changed.
 
+.. note::
 
-Service feature configuration
+    Tempest does not serve all kinds of fancy URLs in the service catalog.  The
+    service catalog should be in a standard format (which is going to be
+    standardized at the keystone level).
+    Tempest expects URLs in the Service catalog in the following format:
+     * ``http://example.com:1234/<version-info>``
+    Examples:
+     * Good - ``http://example.com:1234/v2.0``
+     * Wouldn’t work -  ``http://example.com:1234/xyz/v2.0/``
+       (adding prefix/suffix around version etc)
+
+Service Feature Configuration
 -----------------------------
 
-OpenStack provides its deployers a myriad of different configuration options
-to enable anyone deploying it to create a cloud tailor-made for any individual
-use case. It provides options for several different backend type, databases,
+OpenStack provides its deployers a myriad of different configuration options to
+enable anyone deploying it to create a cloud tailor-made for any individual use
+case. It provides options for several different backend types, databases,
 message queues, etc. However, the downside to this configurability is that
 certain operations and features aren't supported depending on the configuration.
 These features may or may not be discoverable from the API so the burden is
-often on the user to figure out what the cloud they're talking to supports.
-Besides the obvious interoperability issues with this it also leaves Tempest
-in an interesting situation trying to figure out which tests are expected to
-work. However, Tempest tests do not rely on dynamic api discovery for a feature
-(assuming one exists). Instead Tempest has to be explicitly configured as to
-which optional features are enabled. This is in order to prevent bugs in the
-discovery mechanisms from masking failures.
+often on the user to figure out what is supported by the cloud they're talking
+to.  Besides the obvious interoperability issues with this it also leaves
+Tempest in an interesting situation trying to figure out which tests are
+expected to work. However, Tempest tests do not rely on dynamic API discovery
+for a feature (assuming one exists). Instead Tempest has to be explicitly
+configured as to which optional features are enabled. This is in order to
+prevent bugs in the discovery mechanisms from masking failures.
 
 The service feature-enabled config sections are how Tempest addresses the
 optional feature question. Each service that has tests for optional features
@@ -391,10 +393,10 @@
 
 API Extensions
 ^^^^^^^^^^^^^^
-The service feature-enabled sections often contain an *api-extensions* option
-(or in the case of swift a *discoverable_apis* option) this is used to tell
-tempest which api extensions (or configurable middleware) is used in your
-deployment. It has 2 valid config states, either it contains a single value
-"all" (which is the default) which means that every api extension is assumed
+The service feature-enabled sections often contain an ``api-extensions`` option
+(or in the case of swift a ``discoverable_apis`` option). This is used to tell
+Tempest which api extensions (or configurable middleware) is used in your
+deployment. It has two valid config states: either it contains a single value
+``all`` (which is the default) which means that every api extension is assumed
 to be enabled, or it is set to a list of each individual extension that is
 enabled for that service.
diff --git a/doc/source/field_guide/thirdparty.rst b/doc/source/field_guide/thirdparty.rst
deleted file mode 120000
index 3fd6a51..0000000
--- a/doc/source/field_guide/thirdparty.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../../tempest/thirdparty/README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index fe6074f..32e6e51 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -26,7 +26,6 @@
    field_guide/api
    field_guide/scenario
    field_guide/stress
-   field_guide/thirdparty
    field_guide/unit_tests
 
 ---------------------------
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
index a41038c..29653a6 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugin.rst
@@ -26,7 +26,7 @@
 In order to create the basic structure with base classes and test directories
 you can use the tempest-plugin-cookiecutter project::
 
-  > cookiecutter https://git.openstack.org/openstack/tempest-plugin-cookiecutter
+  > pip install -U cookiecutter && cookiecutter https://git.openstack.org/openstack/tempest-plugin-cookiecutter
 
   Cloning into 'tempest-plugin-cookiecutter'...
   remote: Counting objects: 17, done.
diff --git a/requirements.txt b/requirements.txt
index 59d6856..7a6ed97 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,27 +1,27 @@
 # The order of packages is significant, because pip processes them in the order
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
-pbr>=1.6
-cliff>=1.14.0 # Apache-2.0
-anyjson>=0.3.3
-httplib2>=0.7.5
-jsonschema!=2.5.0,<3.0.0,>=2.0.0
-testtools>=1.4.0
-boto>=2.32.1
-paramiko>=1.13.0
-netaddr!=0.7.16,>=0.7.12
-testrepository>=0.0.18
-pyOpenSSL>=0.14
+pbr>=1.6 # Apache-2.0
+cliff>=1.15.0 # Apache-2.0
+anyjson>=0.3.3 # BSD
+httplib2>=0.7.5 # MIT
+jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
+testtools>=1.4.0 # MIT
+paramiko>=1.13.0 # LGPL
+netaddr!=0.7.16,>=0.7.12 # BSD
+testrepository>=0.0.18 # Apache-2.0/BSD
+pyOpenSSL>=0.14 # Apache-2.0
 oslo.concurrency>=2.3.0 # Apache-2.0
-oslo.config>=2.6.0 # Apache-2.0
-oslo.i18n>=1.5.0 # Apache-2.0
-oslo.log>=1.8.0 # Apache-2.0
+oslo.config>=3.4.0 # Apache-2.0
+oslo.i18n>=2.1.0 # Apache-2.0
+oslo.log>=1.14.0 # Apache-2.0
 oslo.serialization>=1.10.0 # Apache-2.0
-oslo.utils!=2.6.0,>=2.4.0 # Apache-2.0
-six>=1.9.0
-iso8601>=0.1.9
-fixtures>=1.3.1
-testscenarios>=0.4
-tempest-lib>=0.10.0
-PyYAML>=3.1.0
+oslo.utils>=3.4.0 # Apache-2.0
+six>=1.9.0 # MIT
+iso8601>=0.1.9 # MIT
+fixtures>=1.3.1 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+tempest-lib>=0.14.0 # Apache-2.0
+PyYAML>=3.1.0 # MIT
 stevedore>=1.5.0 # Apache-2.0
+PrettyTable<0.8,>=0.7 # BSD
diff --git a/run_tempest.sh b/run_tempest.sh
index 0f32045..8c8f25f 100755
--- a/run_tempest.sh
+++ b/run_tempest.sh
@@ -14,8 +14,6 @@
   echo "  -C, --config             Config file location"
   echo "  -h, --help               Print this usage message"
   echo "  -d, --debug              Run tests with testtools instead of testr. This allows you to use PDB"
-  echo "  -l, --logging            Enable logging"
-  echo "  -L, --logging-config     Logging config file location.  Default is etc/logging.conf"
   echo "  -- [TESTROPTIONS]        After the first '--' you can pass arbitrary arguments to testr "
 }
 
@@ -31,10 +29,8 @@
 wrapper=""
 config_file=""
 update=0
-logging=0
-logging_config=etc/logging.conf
 
-if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config: -- "$@")
 then
     # parse error
     usage
@@ -55,8 +51,6 @@
     -C|--config) config_file=$2; shift;;
     -s|--smoke) testrargs+="smoke";;
     -t|--serial) serial=1;;
-    -l|--logging) logging=1;;
-    -L|--logging-config) logging_config=$2; shift;;
     --) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no  ;;
     *) testrargs="$testrargs $1";;
   esac
@@ -69,16 +63,6 @@
     export TEMPEST_CONFIG=`basename "$config_file"`
 fi
 
-if [ $logging -eq 1 ]; then
-    if [ ! -f "$logging_config" ]; then
-        echo "No such logging config file: $logging_config"
-        exit 1
-    fi
-    logging_config=`readlink -f "$logging_config"`
-    export TEMPEST_LOG_CONFIG_DIR=`dirname "$logging_config"`
-    export TEMPEST_LOG_CONFIG=`basename "$logging_config"`
-fi
-
 cd `dirname "$0"`
 
 if [ $no_site_packages -eq 1 ]; then
@@ -104,9 +88,9 @@
   fi
 
   if [ $serial -eq 1 ]; then
-      ${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+      ${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-trace -n -f
   else
-      ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+      ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-trace -n -f
   fi
 }
 
diff --git a/run_tests.sh b/run_tests.sh
index 9a158e4..908056f 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -90,9 +90,9 @@
   fi
 
   if [ $serial -eq 1 ]; then
-      ${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+      ${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-trace -n -f
   else
-      ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+      ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-trace -n -f
   fi
 }
 
diff --git a/setup.cfg b/setup.cfg
index ee61788..cc3a365 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -33,8 +33,12 @@
     tempest-account-generator = tempest.cmd.account_generator:main
     tempest = tempest.cmd.main:main
 tempest.cm =
+    account-generator = tempest.cmd.account_generator:TempestAccountGenerator
     init = tempest.cmd.init:TempestInit
     cleanup = tempest.cmd.cleanup:TempestCleanup
+    run-stress = tempest.cmd.run_stress:TempestRunStress
+    list-plugins = tempest.cmd.list_plugins:TempestListPlugins
+    verify-config = tempest.cmd.verify_tempest_config:TempestVerifyConfig
 oslo.config.opts =
     tempest.config = tempest.config:list_opts
 
diff --git a/tempest/README.rst b/tempest/README.rst
index f93a173..113b191 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -16,7 +16,6 @@
 |    api/ - API tests
 |    scenario/ - complex scenario tests
 |    stress/ - stress tests
-|    thirdparty/ - 3rd party api tests
 
 Each of these directories contains different types of tests. What
 belongs in each directory, the rules and examples for good tests, are
@@ -56,14 +55,6 @@
 several test jobs in parallel and can run any existing test in Tempest as a
 stress job.
 
-:ref:`third_party_field_guide`
-------------------------------
-
-Many openstack components include 3rdparty API support. It is
-completely legitimate for Tempest to include tests of 3rdparty APIs,
-but those should be kept separate from the normal OpenStack
-validation.
-
 :ref:`unit_tests_field_guide`
 -----------------------------
 
diff --git a/tempest/api/baremetal/admin/base.py b/tempest/api/baremetal/admin/base.py
index d7d2efe..80b69b9 100644
--- a/tempest/api/baremetal/admin/base.py
+++ b/tempest/api/baremetal/admin/base.py
@@ -98,8 +98,7 @@
     @classmethod
     @creates('chassis')
     def create_chassis(cls, description=None, expect_errors=False):
-        """
-        Wrapper utility for creating test chassis.
+        """Wrapper utility for creating test chassis.
 
         :param description: A description of the chassis. if not supplied,
             a random value will be generated.
@@ -114,8 +113,7 @@
     @creates('node')
     def create_node(cls, chassis_id, cpu_arch='x86', cpus=8, local_gb=10,
                     memory_mb=4096):
-        """
-        Wrapper utility for creating test baremetal nodes.
+        """Wrapper utility for creating test baremetal nodes.
 
         :param cpu_arch: CPU architecture of the node. Default: x86.
         :param cpus: Number of CPUs. Default: 8.
@@ -134,8 +132,7 @@
     @classmethod
     @creates('port')
     def create_port(cls, node_id, address, extra=None, uuid=None):
-        """
-        Wrapper utility for creating test ports.
+        """Wrapper utility for creating test ports.
 
         :param address: MAC address of the port.
         :param extra: Meta data of the port. If not supplied, an empty
@@ -152,8 +149,7 @@
 
     @classmethod
     def delete_chassis(cls, chassis_id):
-        """
-        Deletes a chassis having the specified UUID.
+        """Deletes a chassis having the specified UUID.
 
         :param uuid: The unique identifier of the chassis.
         :return: Server response.
@@ -169,8 +165,7 @@
 
     @classmethod
     def delete_node(cls, node_id):
-        """
-        Deletes a node having the specified UUID.
+        """Deletes a node having the specified UUID.
 
         :param uuid: The unique identifier of the node.
         :return: Server response.
@@ -186,8 +181,7 @@
 
     @classmethod
     def delete_port(cls, port_id):
-        """
-        Deletes a port having the specified UUID.
+        """Deletes a port having the specified UUID.
 
         :param uuid: The unique identifier of the port.
         :return: Server response.
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index 38f5fb7..9f7bbae 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -23,9 +23,7 @@
 
 
 class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """
-    Tests Agents API
-    """
+    """Tests Agents API"""
 
     @classmethod
     def setup_clients(cls):
@@ -55,7 +53,7 @@
         if rand_key in kwargs:
             # NOTE: The rand_name is for avoiding agent conflicts.
             # If you try to create an agent with the same hypervisor,
-            # os and architecture as an exising agent, Nova will return
+            # os and architecture as an existing agent, Nova will return
             # an HTTPConflict or HTTPServerError.
             kwargs[rand_key] = data_utils.rand_name(kwargs[rand_key])
         return kwargs
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 4d05ff7..1d83fec 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -22,10 +22,7 @@
 
 
 class AggregatesAdminTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Aggregates API that require admin privileges
-    """
+    """Tests Aggregates API that require admin privileges"""
 
     _host_key = 'OS-EXT-SRV-ATTR:host'
 
@@ -148,7 +145,7 @@
 
     @test.idempotent_id('c8e85064-e79b-4906-9931-c11c24294d02')
     def test_aggregate_add_remove_host(self):
-        # Add an host to the given aggregate and remove.
+        # Add a host to the given aggregate and remove.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = (self.client.create_aggregate(name=aggregate_name)
@@ -171,7 +168,7 @@
 
     @test.idempotent_id('7f6a1cc5-2446-4cdb-9baa-b6ae0a919b72')
     def test_aggregate_add_host_list(self):
-        # Add an host to the given aggregate and list.
+        # Add a host to the given aggregate and list.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = (self.client.create_aggregate(name=aggregate_name)
@@ -191,7 +188,7 @@
 
     @test.idempotent_id('eeef473c-7c52-494d-9f09-2ed7fc8fc036')
     def test_aggregate_add_host_get_details(self):
-        # Add an host to the given aggregate and get details.
+        # Add a host to the given aggregate and get details.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = (self.client.create_aggregate(name=aggregate_name)
@@ -208,7 +205,7 @@
 
     @test.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
     def test_aggregate_add_host_create_server_with_az(self):
-        # Add an host to the given aggregate and create a server.
+        # Add a host to the given aggregate and create a server.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         az_name = data_utils.rand_name(self.az_name_prefix)
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index bc1a854..181533b 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -22,10 +22,7 @@
 
 
 class AggregatesAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Aggregates API that require admin privileges
-    """
+    """Tests Aggregates API that require admin privileges"""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 1b36ff2..5befa53 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -17,11 +17,8 @@
 from tempest import test
 
 
-class AZAdminV2TestJSON(base.BaseComputeAdminTest):
-    """
-    Tests Availability Zone API List
-    """
-    _api_version = 2
+class AZAdminV2TestJSON(base.BaseV2ComputeAdminTest):
+    """Tests Availability Zone API List"""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_availability_zone_negative.py b/tempest/api/compute/admin/test_availability_zone_negative.py
index be1c289..fe979d4 100644
--- a/tempest/api/compute/admin/test_availability_zone_negative.py
+++ b/tempest/api/compute/admin/test_availability_zone_negative.py
@@ -19,10 +19,7 @@
 
 
 class AZAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Availability Zone API List
-    """
+    """Tests Availability Zone API List"""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_baremetal_nodes.py b/tempest/api/compute/admin/test_baremetal_nodes.py
index 2599d86..b764483 100644
--- a/tempest/api/compute/admin/test_baremetal_nodes.py
+++ b/tempest/api/compute/admin/test_baremetal_nodes.py
@@ -20,9 +20,7 @@
 
 
 class BaremetalNodesAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """
-    Tests Baremetal API
-    """
+    """Tests Baremetal API"""
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 085916e..1ef8f67 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -23,10 +23,7 @@
 
 
 class FlavorsAdminTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Flavors API Create and Delete that require admin privileges
-    """
+    """Tests Flavors API Create and Delete that require admin privileges"""
 
     @classmethod
     def skip_checks(cls):
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 0a11d52..2063267 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -19,9 +19,8 @@
 
 
 class FlavorsAccessTestJSON(base.BaseV2ComputeAdminTest):
+    """Tests Flavor Access API extension.
 
-    """
-    Tests Flavor Access API extension.
     Add and remove Flavor Access require admin privileges.
     """
 
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index 89ae1b5..5070fd7 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -23,9 +23,8 @@
 
 
 class FlavorsAccessNegativeTestJSON(base.BaseV2ComputeAdminTest):
+    """Tests Flavor Access API extension.
 
-    """
-    Tests Flavor Access API extension.
     Add and remove Flavor Access require admin privileges.
     """
 
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 25dce6a..661cd18 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -19,9 +19,8 @@
 
 
 class FlavorsExtraSpecsTestJSON(base.BaseV2ComputeAdminTest):
+    """Tests Flavor Extra Spec API extension.
 
-    """
-    Tests Flavor Extra Spec API extension.
     SET, UNSET, UPDATE Flavor Extra specs require admin privileges.
     GET Flavor Extra specs can be performed even by without admin privileges.
     """
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index aa95454..14646e8 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -22,9 +22,8 @@
 
 
 class FlavorsExtraSpecsNegativeTestJSON(base.BaseV2ComputeAdminTest):
+    """Negative Tests Flavor Extra Spec API extension.
 
-    """
-    Negative Tests Flavor Extra Spec API extension.
     SET, UNSET, UPDATE Flavor Extra specs require admin privileges.
     """
 
diff --git a/tempest/api/compute/admin/test_floating_ips_bulk.py b/tempest/api/compute/admin/test_floating_ips_bulk.py
index e979616..456363c 100644
--- a/tempest/api/compute/admin/test_floating_ips_bulk.py
+++ b/tempest/api/compute/admin/test_floating_ips_bulk.py
@@ -24,9 +24,8 @@
 
 
 class FloatingIPsBulkAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """
-    Tests Floating IPs Bulk APIs Create, List and  Delete that
-    require admin privileges.
+    """Tests Floating IPs Bulk APIs that require admin privileges.
+
     API documentation - http://docs.openstack.org/api/openstack-compute/2/
     content/ext-os-floating-ips-bulk.html
     """
@@ -39,7 +38,7 @@
     @classmethod
     def resource_setup(cls):
         super(FloatingIPsBulkAdminTestJSON, cls).resource_setup()
-        cls.ip_range = CONF.compute.floating_ip_range
+        cls.ip_range = CONF.validation.floating_ip_range
         cls.verify_unallocated_floating_ip_range(cls.ip_range)
 
     @classmethod
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index 6d8788f..f6ea3a4 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -18,10 +18,7 @@
 
 
 class HostsAdminTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests hosts API using admin privileges.
-    """
+    """Tests hosts API using admin privileges."""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_hosts_negative.py b/tempest/api/compute/admin/test_hosts_negative.py
index 4c8d8a2..65ada4d 100644
--- a/tempest/api/compute/admin/test_hosts_negative.py
+++ b/tempest/api/compute/admin/test_hosts_negative.py
@@ -20,10 +20,7 @@
 
 
 class HostsAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests hosts API using admin privileges.
-    """
+    """Tests hosts API using admin privileges."""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 186867e..113ec40 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -18,10 +18,7 @@
 
 
 class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Hypervisors API that require admin privileges
-    """
+    """Tests Hypervisors API that require admin privileges"""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_hypervisor_negative.py b/tempest/api/compute/admin/test_hypervisor_negative.py
index ca4a691..0e8012a 100644
--- a/tempest/api/compute/admin/test_hypervisor_negative.py
+++ b/tempest/api/compute/admin/test_hypervisor_negative.py
@@ -23,10 +23,7 @@
 
 
 class HypervisorAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Hypervisors API that require admin privileges
-    """
+    """Tests Hypervisors API that require admin privileges"""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index f186a7d..653a3cd 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -28,14 +28,27 @@
     _host_key = 'OS-EXT-SRV-ATTR:host'
 
     @classmethod
+    def skip_checks(cls):
+        super(LiveBlockMigrationTestJSON, cls).skip_checks()
+
+        if not CONF.compute_feature_enabled.live_migration:
+            skip_msg = ("%s skipped as live-migration is "
+                        "not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+        if CONF.compute.min_compute_nodes < 2:
+            raise cls.skipException(
+                "Less than 2 compute nodes, skipping migration test.")
+
+    @classmethod
     def setup_clients(cls):
         super(LiveBlockMigrationTestJSON, cls).setup_clients()
         cls.admin_hosts_client = cls.os_adm.hosts_client
         cls.admin_servers_client = cls.os_adm.servers_client
         cls.admin_migration_client = cls.os_adm.migrations_client
 
-    def _get_compute_hostnames(self):
-        body = self.admin_hosts_client.list_hosts()['hosts']
+    @classmethod
+    def _get_compute_hostnames(cls):
+        body = cls.admin_hosts_client.list_hosts()['hosts']
         return [
             host_record['host_name']
             for host_record in body
@@ -66,11 +79,6 @@
     def _get_server_status(self, server_id):
         return self._get_server_details(server_id)['status']
 
-    def _create_server(self, volume_backed=False):
-            server = self.create_test_server(wait_until="ACTIVE",
-                                             volume_backed=volume_backed)
-            return server['id']
-
     def _volume_clean_up(self, server_id, volume_id):
         body = self.volumes_client.show_volume(volume_id)['volume']
         if body['status'] == 'in-use':
@@ -90,10 +98,8 @@
                               volume_backed, *block* migration is not used.
         """
         # Live migrate an instance to another host
-        if len(self._get_compute_hostnames()) < 2:
-            raise self.skipTest(
-                "Less than 2 compute nodes, skipping migration test.")
-        server_id = self._create_server(volume_backed=volume_backed)
+        server_id = self.create_test_server(wait_until="ACTIVE",
+                                            volume_backed=volume_backed)['id']
         actual_host = self._get_host_for_server(server_id)
         target_host = self._get_host_other_than(actual_host)
 
@@ -117,14 +123,10 @@
                          msg)
 
     @test.idempotent_id('1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b')
-    @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
-                          'Live migration not available')
     def test_live_block_migration(self):
         self._test_live_migration()
 
     @test.idempotent_id('1e107f21-61b2-4988-8f22-b196e938ab88')
-    @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
-                          'Live migration not available')
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     @testtools.skipUnless(CONF.compute_feature_enabled
@@ -135,25 +137,19 @@
         self._test_live_migration(state='PAUSED')
 
     @test.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
-    @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
-                          'Live migration not available')
     @test.services('volume')
     def test_volume_backed_live_migration(self):
         self._test_live_migration(volume_backed=True)
 
     @test.idempotent_id('e19c0cc6-6720-4ed8-be83-b6603ed5c812')
-    @testtools.skipIf(not CONF.compute_feature_enabled.live_migration or not
-                      CONF.compute_feature_enabled.
+    @testtools.skipIf(not CONF.compute_feature_enabled.
                       block_migration_for_live_migration,
                       'Block Live migration not available')
     @testtools.skipIf(not CONF.compute_feature_enabled.
                       block_migrate_cinder_iscsi,
                       'Block Live migration not configured for iSCSI')
     def test_iscsi_volume(self):
-        if len(self._get_compute_hostnames()) < 2:
-            raise self.skipTest(
-                "Less than 2 compute nodes, skipping migration test.")
-        server_id = self._create_server()
+        server_id = self.create_test_server(wait_until="ACTIVE")['id']
         actual_host = self._get_host_for_server(server_id)
         target_host = self._get_host_other_than(actual_host)
 
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index 1da3f6e..e5c8790 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -20,11 +20,9 @@
 CONF = config.CONF
 
 
-class NetworksTest(base.BaseComputeAdminTest):
-    _api_version = 2
+class NetworksTest(base.BaseV2ComputeAdminTest):
+    """Tests Nova Networks API that usually requires admin privileges.
 
-    """
-    Tests Nova Networks API that usually requires admin privileges.
     API docs:
     http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-networks
     """
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index dbca6bb..2907e26 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -111,7 +111,7 @@
 
         # Verify that GET shows the updated quota set of user
         user_name = data_utils.rand_name('cpu_quota_user')
-        password = data_utils.rand_name('password')
+        password = data_utils.rand_password()
         email = user_name + '@testmail.tm'
         user = self.identity_utils.create_user(username=user_name,
                                                password=password,
@@ -151,8 +151,7 @@
 
 
 class QuotaClassesAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests the os-quota-class-sets API to update default quotas.
-    """
+    """Tests the os-quota-class-sets API to update default quotas."""
 
     def setUp(self):
         # All test cases in this class need to externally lock on doing
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index b0a3086..1494745 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import testtools
-
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
 from tempest import config
@@ -28,7 +26,7 @@
     @classmethod
     def setup_clients(cls):
         super(SecurityGroupsTestAdminJSON, cls).setup_clients()
-        cls.adm_client = cls.os_adm.security_groups_client
+        cls.adm_client = cls.os_adm.compute_security_groups_client
         cls.client = cls.security_groups_client
 
     def _delete_security_group(self, securitygroup_id, admin=True):
@@ -38,9 +36,6 @@
             self.client.delete_security_group(securitygroup_id)
 
     @test.idempotent_id('49667619-5af9-4c63-ab5d-2cfdd1c8f7f1')
-    @testtools.skipIf(CONF.service_available.neutron,
-                      "Skipped because neutron does not support all_tenants "
-                      "search filter.")
     @test.services('network')
     def test_list_security_groups_list_all_tenants_filter(self):
         # Admin can list security groups of all tenants
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index dfaa5d5..49c7318 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -15,6 +15,7 @@
 from tempest_lib import decorators
 
 from tempest.api.compute import base
+from tempest.common import compute
 from tempest.common import fixed_network
 from tempest.common.utils import data_utils
 from tempest.common import waiters
@@ -22,10 +23,7 @@
 
 
 class ServersAdminTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Servers API using admin privileges
-    """
+    """Tests Servers API using admin privileges"""
 
     _host_key = 'OS-EXT-SRV-ATTR:host'
 
@@ -107,16 +105,14 @@
     def test_list_servers_filter_by_exist_host(self):
         # Filter the list of servers by existent host
         name = data_utils.rand_name('server')
-        flavor = self.flavor_ref
-        image_id = self.image_ref
         network = self.get_tenant_network()
         network_kwargs = fixed_network.set_networks_kwarg(network)
-        test_server = self.client.create_server(name=name, imageRef=image_id,
-                                                flavorRef=flavor,
-                                                **network_kwargs)['server']
+        # We need to create the server as an admin, so we can't use
+        # self.create_test_server() here as this method creates the server
+        # in the "primary" (i.e non-admin) tenant.
+        test_server, _ = compute.create_test_server(
+            self.os_adm, wait_until="ACTIVE", name=name, **network_kwargs)
         self.addCleanup(self.client.delete_server, test_server['id'])
-        waiters.wait_for_server_status(self.client,
-                                       test_server['id'], 'ACTIVE')
         server = self.client.show_server(test_server['id'])['server']
         self.assertEqual(server['status'], 'ACTIVE')
         hostname = server[self._host_key]
@@ -133,7 +129,7 @@
     @test.idempotent_id('ee8ae470-db70-474d-b752-690b7892cab1')
     def test_reset_state_server(self):
         # Reset server's state to 'error'
-        self.client.reset_state(self.s1_id)
+        self.client.reset_state(self.s1_id, state='error')
 
         # Verify server's state
         server = self.client.show_server(self.s1_id)['server']
@@ -150,7 +146,7 @@
     @test.idempotent_id('31ff3486-b8a0-4f56-a6c0-aab460531db3')
     def test_get_server_diagnostics_by_admin(self):
         # Retrieve server diagnostics by admin user
-        diagnostic = self.client.get_server_diagnostics(self.s1_id)
+        diagnostic = self.client.show_server_diagnostics(self.s1_id)
         basic_attrs = ['rx_packets', 'rx_errors', 'rx_drop',
                        'tx_packets', 'tx_errors', 'tx_drop',
                        'read_req', 'write_req', 'cpu', 'memory']
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index c2dc94c..23b8a6c 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -28,10 +28,7 @@
 
 
 class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Servers API using admin privileges
-    """
+    """Tests Servers API using admin privileges"""
 
     @classmethod
     def setup_clients(cls):
@@ -126,14 +123,14 @@
     @test.idempotent_id('e741298b-8df2-46f0-81cb-8f814ff2504c')
     def test_reset_state_server_nonexistent_server(self):
         self.assertRaises(lib_exc.NotFound,
-                          self.client.reset_state, '999')
+                          self.client.reset_state, '999', state='error')
 
     @test.attr(type=['negative'])
     @test.idempotent_id('e84e2234-60d2-42fa-8b30-e2d3049724ac')
     def test_get_server_diagnostics_by_non_admin(self):
         # Non-admin user can not view server diagnostics according to policy
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_adm_client.get_server_diagnostics,
+                          self.non_adm_client.show_server_diagnostics,
                           self.s1_id)
 
     @test.attr(type=['negative'])
@@ -158,7 +155,7 @@
         self.client.suspend_server(server_id)
         waiters.wait_for_server_status(self.client,
                                        server_id, 'SUSPENDED')
-        # migrate an suspended server should fail
+        # migrate a suspended server should fail
         self.assertRaises(lib_exc.Conflict,
                           self.client.migrate_server,
                           server_id)
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
new file mode 100644
index 0000000..814a876
--- /dev/null
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -0,0 +1,68 @@
+# Copyright 2016 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(ServersOnMultiNodesTest, cls).skip_checks()
+
+        if CONF.compute.min_compute_nodes < 2:
+            raise cls.skipException(
+                "Less than 2 compute nodes, skipping multi-nodes test.")
+
+    def _get_host(self, server_id):
+        return self.os_adm.servers_client.show_server(
+            server_id)['server']['OS-EXT-SRV-ATTR:host']
+
+    @test.idempotent_id('26a9d5df-6890-45f2-abc4-a659290cb130')
+    def test_create_servers_on_same_host(self):
+        server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+        hints = {'same_host': server01}
+        server02 = self.create_test_server(scheduler_hints=hints,
+                                           wait_until='ACTIVE')['id']
+        host01 = self._get_host(server01)
+        host02 = self._get_host(server02)
+        self.assertEqual(host01, host02)
+
+    @test.idempotent_id('cc7ca884-6e3e-42a3-a92f-c522fcf25e8e')
+    def test_create_servers_on_different_hosts(self):
+        server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+        hints = {'different_host': server01}
+        server02 = self.create_test_server(scheduler_hints=hints,
+                                           wait_until='ACTIVE')['id']
+        host01 = self._get_host(server01)
+        host02 = self._get_host(server02)
+        self.assertNotEqual(host01, host02)
+
+    @test.idempotent_id('7869cc84-d661-4e14-9f00-c18cdc89cf57')
+    def test_create_servers_on_different_hosts_with_list_of_servers(self):
+        server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+        # This scheduler-hint supports list of servers also.
+        hints = {'different_host': [server01]}
+        server02 = self.create_test_server(scheduler_hints=hints,
+                                           wait_until='ACTIVE')['id']
+        host01 = self._get_host(server01)
+        host02 = self._get_host(server02)
+        self.assertNotEqual(host01, host02)
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index 4d7dea5..8648b9f 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -19,10 +19,7 @@
 
 
 class ServicesAdminTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Services API. List and Enable/Disable require admin privileges.
-    """
+    """Tests Services API. List and Enable/Disable require admin privileges."""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_services_negative.py b/tempest/api/compute/admin/test_services_negative.py
index 0c81ccb..e57401a 100644
--- a/tempest/api/compute/admin/test_services_negative.py
+++ b/tempest/api/compute/admin/test_services_negative.py
@@ -19,10 +19,7 @@
 
 
 class ServicesAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-
-    """
-    Tests Services API. List and Enable/Disable require admin privileges.
-    """
+    """Tests Services API. List and Enable/Disable require admin privileges."""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index ec2192f..0856983 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -18,6 +18,7 @@
 from oslo_log import log as logging
 from tempest_lib import exceptions as lib_exc
 
+from tempest.common import api_version_utils
 from tempest.common import compute
 from tempest.common.utils import data_utils
 from tempest.common import waiters
@@ -30,10 +31,10 @@
 LOG = logging.getLogger(__name__)
 
 
-class BaseComputeTest(tempest.test.BaseTestCase):
+class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
+                        tempest.test.BaseTestCase):
     """Base test case class for all Compute API tests."""
 
-    _api_version = 2
     force_tenant_isolation = False
 
     # TODO(andreaf) We should care also for the alt_manager here
@@ -42,32 +43,42 @@
 
     @classmethod
     def skip_checks(cls):
-        super(BaseComputeTest, cls).skip_checks()
+        super(BaseV2ComputeTest, cls).skip_checks()
         if not CONF.service_available.nova:
             raise cls.skipException("Nova is not available")
-        if cls._api_version != 2:
-            msg = ("Unexpected API version is specified (%s)" %
-                   cls._api_version)
-            raise exceptions.InvalidConfiguration(message=msg)
+        cfg_min_version = CONF.compute_feature_enabled.min_microversion
+        cfg_max_version = CONF.compute_feature_enabled.max_microversion
+        api_version_utils.check_skip_with_microversion(cls.min_microversion,
+                                                       cls.max_microversion,
+                                                       cfg_min_version,
+                                                       cfg_max_version)
 
     @classmethod
     def setup_credentials(cls):
         cls.set_network_resources()
-        super(BaseComputeTest, cls).setup_credentials()
+        cls.request_microversion = (
+            api_version_utils.select_request_microversion(
+                cls.min_microversion,
+                CONF.compute_feature_enabled.min_microversion))
+        if cls.request_microversion:
+            cls.services_microversion = {
+                CONF.compute.catalog_type: cls.request_microversion}
+        super(BaseV2ComputeTest, cls).setup_credentials()
 
     @classmethod
     def setup_clients(cls):
-        super(BaseComputeTest, cls).setup_clients()
+        super(BaseV2ComputeTest, cls).setup_clients()
         cls.servers_client = cls.os.servers_client
         cls.server_groups_client = cls.os.server_groups_client
         cls.flavors_client = cls.os.flavors_client
-        cls.images_client = cls.os.images_client
+        cls.compute_images_client = cls.os.compute_images_client
         cls.extensions_client = cls.os.extensions_client
         cls.floating_ip_pools_client = cls.os.floating_ip_pools_client
-        cls.floating_ips_client = cls.os.floating_ips_client
+        cls.floating_ips_client = cls.os.compute_floating_ips_client
         cls.keypairs_client = cls.os.keypairs_client
-        cls.security_group_rules_client = cls.os.security_group_rules_client
-        cls.security_groups_client = cls.os.security_groups_client
+        cls.security_group_rules_client = (
+            cls.os.compute_security_group_rules_client)
+        cls.security_groups_client = cls.os.compute_security_groups_client
         cls.quotas_client = cls.os.quotas_client
         cls.quota_classes_client = cls.os.quota_classes_client
         cls.compute_networks_client = cls.os.compute_networks_client
@@ -96,16 +107,16 @@
 
     @classmethod
     def resource_setup(cls):
-        super(BaseComputeTest, cls).resource_setup()
+        super(BaseV2ComputeTest, cls).resource_setup()
         cls.build_interval = CONF.compute.build_interval
         cls.build_timeout = CONF.compute.build_timeout
-        cls.ssh_user = CONF.compute.ssh_user
         cls.image_ref = CONF.compute.image_ref
         cls.image_ref_alt = CONF.compute.image_ref_alt
         cls.flavor_ref = CONF.compute.flavor_ref
         cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
-        cls.image_ssh_user = CONF.compute.image_ssh_user
-        cls.image_ssh_password = CONF.compute.image_ssh_password
+        cls.ssh_user = CONF.validation.image_ssh_user
+        cls.image_ssh_user = CONF.validation.image_ssh_user
+        cls.image_ssh_password = CONF.validation.image_ssh_password
         cls.servers = []
         cls.images = []
         cls.security_groups = []
@@ -117,7 +128,7 @@
         cls.clear_servers()
         cls.clear_security_groups()
         cls.clear_server_groups()
-        super(BaseComputeTest, cls).resource_cleanup()
+        super(BaseV2ComputeTest, cls).resource_cleanup()
 
     @classmethod
     def clear_servers(cls):
@@ -144,10 +155,11 @@
     @classmethod
     def server_check_teardown(cls):
         """Checks is the shared server clean enough for subsequent test.
+
            Method will delete the server when it's dirty.
            The setUp method is responsible for creating a new server.
            Exceptions raised in tearDown class are fails the test case,
-           This method supposed to use only by tierDown methods, when
+           This method supposed to use only by tearDown methods, when
            the shared server_id is stored in the server_id of the class.
         """
         if getattr(cls, 'server_id', None) is not None:
@@ -167,7 +179,7 @@
         LOG.debug('Clearing images: %s', ','.join(cls.images))
         for image_id in cls.images:
             try:
-                cls.images_client.delete_image(image_id)
+                cls.compute_images_client.delete_image(image_id)
             except lib_exc.NotFound:
                 # The image may have already been deleted which is OK.
                 pass
@@ -275,8 +287,8 @@
             # into the delete_volume method as a convenience to the caller.
             volumes_client.wait_for_resource_deletion(volume_id)
         except lib_exc.NotFound:
-            LOG.warn("Unable to delete volume '%s' since it was not found. "
-                     "Maybe it was already deleted?" % volume_id)
+            LOG.warning("Unable to delete volume '%s' since it was not found. "
+                        "Maybe it was already deleted?" % volume_id)
 
     @classmethod
     def prepare_instance_network(cls):
@@ -292,14 +304,14 @@
         if 'name' in kwargs:
             name = kwargs.pop('name')
 
-        image = cls.images_client.create_image(server_id, name=name)
+        image = cls.compute_images_client.create_image(server_id, name=name)
         image_id = data_utils.parse_image_id(image.response['location'])
         cls.images.append(image_id)
 
         if 'wait_until' in kwargs:
-            waiters.wait_for_image_status(cls.images_client,
+            waiters.wait_for_image_status(cls.compute_images_client,
                                           image_id, kwargs['wait_until'])
-            image = cls.images_client.show_image(image_id)['image']
+            image = cls.compute_images_client.show_image(image_id)['image']
 
             if kwargs['wait_until'] == 'ACTIVE':
                 if kwargs.get('wait_for_server', True):
@@ -318,11 +330,12 @@
             except Exception:
                 LOG.exception('Failed to delete server %s' % server_id)
 
+        cls.password = data_utils.rand_password()
         server = cls.create_test_server(
             validatable,
             wait_until='ACTIVE',
+            adminPass=cls.password,
             **kwargs)
-        cls.password = server['adminPass']
         return server['id']
 
     @classmethod
@@ -344,34 +357,28 @@
     def get_server_ip(cls, server):
         """Get the server fixed or floating IP.
 
-        For the floating IP, the address created by the validation resources
-        is returned.
-        For the fixed IP, the server is returned and the current mechanism of
-        address extraction in the remote_client is followed.
+        Based on the configuration we're in, return a correct ip
+        address for validating that a guest is up.
         """
         if CONF.validation.connect_method == 'floating':
-            ip_or_server = cls.validation_resources['floating_ip']['ip']
+            return cls.validation_resources['floating_ip']['ip']
         elif CONF.validation.connect_method == 'fixed':
-            ip_or_server = server
-        return ip_or_server
+            addresses = server['addresses'][CONF.validation.network_for_ssh]
+            for address in addresses:
+                if address['version'] == CONF.validation.ip_version_for_ssh:
+                    return address['addr']
+            raise exceptions.ServerUnreachable()
+        else:
+            raise exceptions.InvalidConfiguration()
 
 
-class BaseV2ComputeTest(BaseComputeTest):
-    _api_version = 2
-
-
-class BaseComputeAdminTest(BaseComputeTest):
+class BaseV2ComputeAdminTest(BaseV2ComputeTest):
     """Base test case class for Compute Admin API tests."""
 
     credentials = ['primary', 'admin']
 
     @classmethod
     def setup_clients(cls):
-        super(BaseComputeAdminTest, cls).setup_clients()
+        super(BaseV2ComputeAdminTest, cls).setup_clients()
         cls.availability_zone_admin_client = (
             cls.os_adm.availability_zone_client)
-
-
-class BaseV2ComputeAdminTest(BaseComputeAdminTest):
-    """Base test case class for Compute Admin V2 API tests."""
-    _api_version = 2
diff --git a/tempest/api/compute/certificates/test_certificates.py b/tempest/api/compute/certificates/test_certificates.py
index 0096fc2..d5c7302 100644
--- a/tempest/api/compute/certificates/test_certificates.py
+++ b/tempest/api/compute/certificates/test_certificates.py
@@ -20,9 +20,7 @@
 CONF = config.CONF
 
 
-class CertificatesV2TestJSON(base.BaseComputeTest):
-
-    _api_version = 2
+class CertificatesV2TestJSON(base.BaseV2ComputeTest):
 
     @classmethod
     def skip_checks(cls):
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index e114c80..7e01296 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -17,9 +17,7 @@
 from tempest import test
 
 
-class FlavorsV2TestJSON(base.BaseComputeTest):
-
-    _api_version = 2
+class FlavorsV2TestJSON(base.BaseV2ComputeTest):
     _min_disk = 'minDisk'
     _min_ram = 'minRam'
 
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
index 64aac80..0223c0d 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
@@ -60,7 +60,7 @@
         # to a project should fail
         self.assertRaises(lib_exc.NotFound,
                           self.client.create_floating_ip,
-                          "non_exist_pool")
+                          pool="non_exist_pool")
 
     @test.attr(type=['negative'])
     @test.idempotent_id('ae1c55a8-552b-44d4-bfb6-2a115a15d0ba')
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 975b850..0724566 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -37,7 +37,7 @@
     def setup_clients(cls):
         super(ImagesMetadataTestJSON, cls).setup_clients()
         cls.glance_client = cls.os.image_client
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/api/compute/images/test_image_metadata_negative.py b/tempest/api/compute/images/test_image_metadata_negative.py
index 0f02166..85d137b 100644
--- a/tempest/api/compute/images/test_image_metadata_negative.py
+++ b/tempest/api/compute/images/test_image_metadata_negative.py
@@ -25,7 +25,7 @@
     @classmethod
     def setup_clients(cls):
         super(ImagesMetadataTestJSON, cls).setup_clients()
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
 
     @test.attr(type=['negative'])
     @test.idempotent_id('94069db2-792f-4fa8-8bd3-2271a6e0c095')
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index dc62620..150e8af 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -37,7 +37,7 @@
     @classmethod
     def setup_clients(cls):
         super(ImagesTestJSON, cls).setup_clients()
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
         cls.servers_client = cls.servers_client
 
     @test.idempotent_id('aa06b52b-2db5-4807-b218-9441f75d74e3')
diff --git a/tempest/api/compute/images/test_images_negative.py b/tempest/api/compute/images/test_images_negative.py
index 9197adf..8706566 100644
--- a/tempest/api/compute/images/test_images_negative.py
+++ b/tempest/api/compute/images/test_images_negative.py
@@ -39,7 +39,7 @@
     @classmethod
     def setup_clients(cls):
         super(ImagesNegativeTestJSON, cls).setup_clients()
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
         cls.servers_client = cls.servers_client
 
     @test.attr(type=['negative'])
@@ -68,7 +68,7 @@
         resp = {}
         resp['status'] = None
         self.assertRaises(lib_exc.NotFound, self.create_image_from_server,
-                          '!@#$%^&*()', name=name, meta=meta)
+                          '!@$%^&*()', name=name, meta=meta)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('ec176029-73dc-4037-8d72-2e4ff60cf538')
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 37c2bb6..7b978ab 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -62,7 +62,7 @@
     @classmethod
     def setup_clients(cls):
         super(ImagesOneServerTestJSON, cls).setup_clients()
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 9ea62fb..2fc9ef8 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -76,7 +76,7 @@
     @classmethod
     def setup_clients(cls):
         super(ImagesOneServerNegativeTestJSON, cls).setup_clients()
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 9f3ba71..af840cc 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -15,7 +15,6 @@
 
 import time
 
-from oslo_log import log as logging
 import six
 import testtools
 
@@ -27,8 +26,6 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
 
@@ -42,7 +39,7 @@
     @classmethod
     def setup_clients(cls):
         super(ListImageFiltersTestJSON, cls).setup_clients()
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
         cls.glance_client = cls.os.image_client
 
     @classmethod
diff --git a/tempest/api/compute/images/test_list_image_filters_negative.py b/tempest/api/compute/images/test_list_image_filters_negative.py
index 82062bd..34d26e2 100644
--- a/tempest/api/compute/images/test_list_image_filters_negative.py
+++ b/tempest/api/compute/images/test_list_image_filters_negative.py
@@ -34,7 +34,7 @@
     @classmethod
     def setup_clients(cls):
         super(ListImageFiltersNegativeTestJSON, cls).setup_clients()
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
 
     @test.attr(type=['negative'])
     @test.idempotent_id('391b0440-432c-4d4b-b5da-c5096aa247eb')
diff --git a/tempest/api/compute/images/test_list_images.py b/tempest/api/compute/images/test_list_images.py
index 6ca15d6..ae3667d 100644
--- a/tempest/api/compute/images/test_list_images.py
+++ b/tempest/api/compute/images/test_list_images.py
@@ -32,7 +32,7 @@
     @classmethod
     def setup_clients(cls):
         super(ListImagesTestJSON, cls).setup_clients()
-        cls.client = cls.images_client
+        cls.client = cls.compute_images_client
 
     @test.idempotent_id('490d0898-e12a-463f-aef0-c50156b9f789')
     def test_get_image(self):
diff --git a/tempest/api/compute/keypairs/base.py b/tempest/api/compute/keypairs/base.py
index 76e5573..ebfb724 100644
--- a/tempest/api/compute/keypairs/base.py
+++ b/tempest/api/compute/keypairs/base.py
@@ -16,11 +16,9 @@
 from tempest.api.compute import base
 
 
-class BaseKeypairTest(base.BaseComputeTest):
+class BaseKeypairTest(base.BaseV2ComputeTest):
     """Base test case class for all keypair API tests."""
 
-    _api_version = 2
-
     @classmethod
     def setup_clients(cls):
         super(BaseKeypairTest, cls).setup_clients()
@@ -29,10 +27,12 @@
     def _delete_keypair(self, keypair_name):
         self.client.delete_keypair(keypair_name)
 
-    def _create_keypair(self, keypair_name, pub_key=None):
+    def _create_keypair(self, keypair_name, pub_key=None, keypair_type=None):
         kwargs = {'name': keypair_name}
         if pub_key:
             kwargs.update({'public_key': pub_key})
+        if keypair_type:
+            kwargs.update({'type': keypair_type})
         body = self.client.create_keypair(**kwargs)['keypair']
         self.addCleanup(self._delete_keypair, keypair_name)
         return body
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index d10bf14..be6f615 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -19,6 +19,8 @@
 
 
 class KeyPairsV2TestJSON(base.BaseKeypairTest):
+    max_microversion = '2.1'
+
     @test.idempotent_id('1d1dbedb-d7a0-432a-9d09-83f543c3c19b')
     def test_keypairs_create_list_delete(self):
         # Keypairs created should be available in the response list
diff --git a/tempest/api/compute/keypairs/test_keypairs_v22.py b/tempest/api/compute/keypairs/test_keypairs_v22.py
new file mode 100644
index 0000000..997ef9b
--- /dev/null
+++ b/tempest/api/compute/keypairs/test_keypairs_v22.py
@@ -0,0 +1,51 @@
+# Copyright 2016 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute.keypairs import test_keypairs
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class KeyPairsV22TestJSON(test_keypairs.KeyPairsV2TestJSON):
+    min_microversion = '2.2'
+    max_microversion = 'latest'
+
+    def _check_keypair_type(self, keypair, keypair_type):
+        if keypair_type is None:
+            keypair_type = 'ssh'
+        self.assertEqual(keypair_type, keypair['type'])
+
+    def _test_keypairs_create_list_show(self, keypair_type=None):
+        k_name = data_utils.rand_name('keypair')
+        keypair = self._create_keypair(k_name, keypair_type=keypair_type)
+        # Verify whether 'type' is present in keypair create response of
+        # version 2.2 and it is with default value 'ssh'.
+        self._check_keypair_type(keypair, keypair_type)
+        keypair_detail = self.client.show_keypair(k_name)['keypair']
+        self._check_keypair_type(keypair_detail, keypair_type)
+        fetched_list = self.client.list_keypairs()['keypairs']
+        for keypair in fetched_list:
+            # Verify whether 'type' is present in keypair list response of
+            # version 2.2 and it is with default value 'ssh'.
+            if keypair['keypair']['name'] == k_name:
+                self._check_keypair_type(keypair['keypair'], keypair_type)
+
+    @test.idempotent_id('8726fa85-7f98-4b20-af9e-f710a4f3391c')
+    def test_keypairsv22_create_list_show(self):
+        self._test_keypairs_create_list_show()
+
+    @test.idempotent_id('89d59d43-f735-441a-abcf-0601727f47b6')
+    def test_keypairsv22_create_list_show_with_type(self):
+        keypair_type = 'x509'
+        self._test_keypairs_create_list_show(keypair_type=keypair_type)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 9aa59f7..38c294b 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -158,7 +158,9 @@
             to_port=to_port2)['security_group_rule']
         rule2_id = rule['id']
         # Delete the Security Group rule2 at the end of this method
-        self.addCleanup(self.client.delete_security_group_rule, rule2_id)
+        self.addCleanup(
+            self.security_group_rules_client.delete_security_group_rule,
+            rule2_id)
 
         # Get rules of the created Security Group
         rules = self.security_groups_client.show_security_group(
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index dbbeb70..81a02be 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -100,7 +100,7 @@
         server_id = server['id']
         waiters.wait_for_server_status(self.servers_client, server_id,
                                        'ACTIVE')
-        self.servers_client.add_security_group(server_id, sg['name'])
+        self.servers_client.add_security_group(server_id, name=sg['name'])
 
         # Check that we are not able to delete the security
         # group since it is in use by an active server
@@ -109,10 +109,10 @@
                           sg['id'])
 
         # Reboot and add the other security group
-        self.servers_client.reboot_server(server_id, 'HARD')
+        self.servers_client.reboot_server(server_id, type='HARD')
         waiters.wait_for_server_status(self.servers_client, server_id,
                                        'ACTIVE')
-        self.servers_client.add_security_group(server_id, sg2['name'])
+        self.servers_client.add_security_group(server_id, name=sg2['name'])
 
         # Check that we are not able to delete the other security
         # group since it is in use by an active server
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 24d503f..a6ccdd3 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -47,7 +47,7 @@
         cls.client = cls.os.interfaces_client
 
     def wait_for_interface_status(self, server, port_id, status):
-        """Waits for a interface to reach a given status."""
+        """Waits for an interface to reach a given status."""
         body = (self.client.show_interface(server, port_id)
                 ['interfaceAttachment'])
         interface_status = body['port_state']
diff --git a/tempest/api/compute/servers/test_availability_zone.py b/tempest/api/compute/servers/test_availability_zone.py
index 080441a..76da317 100644
--- a/tempest/api/compute/servers/test_availability_zone.py
+++ b/tempest/api/compute/servers/test_availability_zone.py
@@ -17,11 +17,8 @@
 from tempest import test
 
 
-class AZV2TestJSON(base.BaseComputeTest):
-    """
-    Tests Availability Zone API List
-    """
-    _api_version = 2
+class AZV2TestJSON(base.BaseV2ComputeTest):
+    """Tests Availability Zone API List"""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 902b72c..f719bfc 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -50,6 +50,7 @@
         cls.accessIPv4 = '1.1.1.1'
         cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
         cls.name = data_utils.rand_name('server')
+        cls.password = data_utils.rand_password()
         disk_config = cls.disk_config
         cls.server_initial = cls.create_test_server(
             validatable=True,
@@ -58,8 +59,8 @@
             metadata=cls.meta,
             accessIPv4=cls.accessIPv4,
             accessIPv6=cls.accessIPv6,
-            disk_config=disk_config)
-        cls.password = cls.server_initial['adminPass']
+            disk_config=disk_config,
+            adminPass=cls.password)
         cls.server = (cls.client.show_server(cls.server_initial['id'])
                       ['server'])
 
@@ -150,7 +151,7 @@
                                          wait_until='ACTIVE')
 
         # Check a server is in the group
-        server_group = (self.server_groups_client.get_server_group(group_id)
+        server_group = (self.server_groups_client.show_server_group(group_id)
                         ['server_group'])
         self.assertIn(server['id'], server_group['members'])
 
@@ -262,13 +263,16 @@
                           'Instance validation tests are disabled.')
     def test_verify_created_server_ephemeral_disk(self):
         # Verify that the ephemeral disk is created when creating server
+        flavor_base = self.flavors_client.show_flavor(
+            self.flavor_ref)['flavor']
 
         def create_flavor_with_extra_specs():
             flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
             flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
-            ram = 64
-            vcpus = 1
-            disk = 0
+
+            ram = flavor_base['ram']
+            vcpus = flavor_base['vcpus']
+            disk = flavor_base['disk']
 
             # Create a flavor with extra specs
             flavor = (self.flavor_client.
@@ -284,9 +288,9 @@
             flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
             flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
 
-            ram = 64
-            vcpus = 1
-            disk = 0
+            ram = flavor_base['ram']
+            vcpus = flavor_base['vcpus']
+            disk = flavor_base['disk']
 
             # Create a flavor without extra specs
             flavor = (self.flavor_client.
diff --git a/tempest/api/compute/servers/test_instance_actions.py b/tempest/api/compute/servers/test_instance_actions.py
index 97d47fd..1367629 100644
--- a/tempest/api/compute/servers/test_instance_actions.py
+++ b/tempest/api/compute/servers/test_instance_actions.py
@@ -35,7 +35,7 @@
     @test.idempotent_id('77ca5cc5-9990-45e0-ab98-1de8fead201a')
     def test_list_instance_actions(self):
         # List actions of the provided server
-        self.client.reboot_server(self.server_id, 'HARD')
+        self.client.reboot_server(self.server_id, type='HARD')
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
 
         body = (self.client.list_instance_actions(self.server_id)
@@ -47,7 +47,7 @@
     @test.idempotent_id('aacc71ca-1d70-4aa5-bbf6-0ff71470e43c')
     def test_get_instance_action(self):
         # Get the action details of the provided server
-        body = self.client.get_instance_action(
+        body = self.client.show_instance_action(
             self.server_id, self.request_id)['instanceAction']
         self.assertEqual(self.server_id, body['instance_uuid'])
         self.assertEqual('create', body['action'])
diff --git a/tempest/api/compute/servers/test_instance_actions_negative.py b/tempest/api/compute/servers/test_instance_actions_negative.py
index 6567da1..ac66d05 100644
--- a/tempest/api/compute/servers/test_instance_actions_negative.py
+++ b/tempest/api/compute/servers/test_instance_actions_negative.py
@@ -46,5 +46,5 @@
     @test.idempotent_id('0269f40a-6f18-456c-b336-c03623c897f1')
     def test_get_instance_action_invalid_request(self):
         # Get the action details of the provided server with invalid request
-        self.assertRaises(lib_exc.NotFound, self.client.get_instance_action,
+        self.assertRaises(lib_exc.NotFound, self.client.show_instance_action,
                           self.server_id, '999')
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 3acff98..37f322f 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -13,6 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest_lib import decorators
 from tempest_lib import exceptions as lib_exc
 
 from tempest.api.compute import base
@@ -43,7 +44,7 @@
         super(ListServerFiltersTestJSON, cls).resource_setup()
 
         # Check to see if the alternate image ref actually exists...
-        images_client = cls.images_client
+        images_client = cls.compute_images_client
         images = images_client.list_images()['images']
 
         if cls.image_ref != cls.image_ref_alt and \
@@ -56,13 +57,13 @@
         # Do some sanity checks here. If one of the images does
         # not exist, fail early since the tests won't work...
         try:
-            cls.images_client.show_image(cls.image_ref)
+            cls.compute_images_client.show_image(cls.image_ref)
         except lib_exc.NotFound:
             raise RuntimeError("Image %s (image_ref) was not found!" %
                                cls.image_ref)
 
         try:
-            cls.images_client.show_image(cls.image_ref_alt)
+            cls.compute_images_client.show_image(cls.image_ref_alt)
         except lib_exc.NotFound:
             raise RuntimeError("Image %s (image_ref_alt) was not found!" %
                                cls.image_ref_alt)
@@ -290,6 +291,7 @@
         self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
         self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
 
+    @decorators.skip_because(bug="1540645")
     @test.idempotent_id('a905e287-c35e-42f2-b132-d02b09f3654a')
     def test_list_servers_filtered_by_ip_regex(self):
         # Filter servers by regex ip
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index a945411..eb1beb1 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -25,10 +25,9 @@
         return data_utils.rand_name(self._name)
 
     def _create_multiple_servers(self, name=None, wait_until=None, **kwargs):
-        """
-        This is the right way to create_multiple servers and manage to get the
-        created servers into the servers list to be cleaned up after all.
-        """
+        # NOTE: This is the right way to create_multiple servers and manage to
+        # get the created servers into the servers list to be cleaned up after
+        # all.
         kwargs['name'] = name if name else self._generate_name()
         if wait_until:
             kwargs['wait_until'] = wait_until
diff --git a/tempest/api/compute/servers/test_multiple_create_negative.py b/tempest/api/compute/servers/test_multiple_create_negative.py
index 8135768..3d8a732 100644
--- a/tempest/api/compute/servers/test_multiple_create_negative.py
+++ b/tempest/api/compute/servers/test_multiple_create_negative.py
@@ -27,10 +27,8 @@
         return data_utils.rand_name(self._name)
 
     def _create_multiple_servers(self, name=None, wait_until=None, **kwargs):
-        """
-        This is the right way to create_multiple servers and manage to get the
-        created servers into the servers list to be cleaned up after all.
-        """
+        # This is the right way to create_multiple servers and manage to get
+        # the created servers into the servers list to be cleaned up after all.
         kwargs['name'] = kwargs.get('name', self._generate_name())
         body = self.create_test_server(**kwargs)
 
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index a59cb16..66e85a6 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -52,7 +52,7 @@
         except Exception:
             # Rebuild server if something happened to it during a test
             self.__class__.server_id = self.rebuild_server(
-                self.server_id, validatable=True)['server']
+                self.server_id, validatable=True)
 
     def tearDown(self):
         self.server_check_teardown()
@@ -81,7 +81,7 @@
     def test_change_server_password(self):
         # The server's password should be set to the provided password
         new_password = 'Newpass1234'
-        self.client.change_password(self.server_id, new_password)
+        self.client.change_password(self.server_id, adminPass=new_password)
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
 
         if CONF.validation.run_validation:
@@ -104,7 +104,7 @@
                 self.validation_resources['keypair']['private_key'])
             boot_time = linux_client.get_boot_time()
 
-        self.client.reboot_server(self.server_id, reboot_type)
+        self.client.reboot_server(self.server_id, type=reboot_type)
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
 
         if CONF.validation.run_validation:
@@ -172,11 +172,16 @@
         self.assertEqual(new_name, server['name'])
 
         if CONF.validation.run_validation:
-            # TODO(jlanoux) add authentication with the provided password
+            # Authentication is attempted in the following order of priority:
+            # 1.The key passed in, if one was passed in.
+            # 2.Any key we can find through an SSH agent (if allowed).
+            # 3.Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
+            #   ~/.ssh/ (if allowed).
+            # 4.Plain username/password auth, if a password was given.
             linux_client = remote_client.RemoteClient(
                 self.get_server_ip(rebuilt_server),
                 self.ssh_user,
-                self.password,
+                password,
                 self.validation_resources['keypair']['private_key'])
             linux_client.validate_authentication()
 
@@ -279,9 +284,9 @@
         # create the first and the second backup
         backup1 = data_utils.rand_name('backup-1')
         resp = self.client.create_backup(self.server_id,
-                                         'daily',
-                                         2,
-                                         backup1).response
+                                         backup_type='daily',
+                                         rotation=2,
+                                         name=backup1).response
         oldest_backup_exist = True
 
         # the oldest one should be deleted automatically in this test
@@ -303,9 +308,9 @@
         backup2 = data_utils.rand_name('backup-2')
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
         resp = self.client.create_backup(self.server_id,
-                                         'daily',
-                                         2,
-                                         backup2).response
+                                         backup_type='daily',
+                                         rotation=2,
+                                         name=backup2).response
         image2_id = data_utils.parse_image_id(resp['location'])
         self.addCleanup(self.os.image_client.delete_image, image2_id)
         self.os.image_client.wait_for_image_status(image2_id, 'active')
@@ -331,9 +336,9 @@
         backup3 = data_utils.rand_name('backup-3')
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
         resp = self.client.create_backup(self.server_id,
-                                         'daily',
-                                         2,
-                                         backup3).response
+                                         backup_type='daily',
+                                         rotation=2,
+                                         name=backup3).response
         image3_id = data_utils.parse_image_id(resp['location'])
         self.addCleanup(self.os.image_client.delete_image, image3_id)
         # the first back up should be deleted
@@ -356,7 +361,7 @@
 
     def _get_output(self):
         output = self.client.get_console_output(
-            self.server_id, 10)['output']
+            self.server_id, length=10)['output']
         self.assertTrue(output, "Console output was empty.")
         lines = len(output.split('\n'))
         self.assertEqual(lines, 10)
@@ -373,7 +378,7 @@
         # log file is truncated and we cannot get any console log through
         # "console-log" API.
         # The detail is https://bugs.launchpad.net/nova/+bug/1251920
-        self.client.reboot_server(self.server_id, 'HARD')
+        self.client.reboot_server(self.server_id, type='HARD')
         waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
         self.wait_for(self._get_output)
 
@@ -384,8 +389,7 @@
         server = self.create_test_server(wait_until='ACTIVE')
 
         def _check_full_length_console_log():
-            output = self.client.get_console_output(server['id'],
-                                                    None)['output']
+            output = self.client.get_console_output(server['id'])['output']
             self.assertTrue(output, "Console output was empty.")
             lines = len(output.split('\n'))
 
@@ -454,7 +458,7 @@
         server = self.client.show_server(self.server_id)['server']
         image_name = server['name'] + '-shelved'
         params = {'name': image_name}
-        images = self.images_client.list_images(**params)['images']
+        images = self.compute_images_client.list_images(**params)['images']
         self.assertEqual(1, len(images))
         self.assertEqual(image_name, images[0]['name'])
 
@@ -499,7 +503,7 @@
         console_types = ['novnc', 'xvpvnc']
         for console_type in console_types:
             body = self.client.get_vnc_console(self.server_id,
-                                               console_type)['console']
+                                               type=console_type)['console']
             self.assertEqual(console_type, body['type'])
             self.assertNotEqual('', body['url'])
             self._validate_url(body['url'])
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 0da7912..c23b365 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -21,8 +21,8 @@
 
 
 class ServerGroupTestJSON(base.BaseV2ComputeTest):
-    """
-    These tests check for the server-group APIs
+    """These tests check for the server-group APIs
+
     They create/delete server-groups with different policies.
     policies = affinity/anti-affinity
     It also adds the tests for list and get details of server-groups
@@ -104,9 +104,9 @@
             self._delete_server_group(server_groups[i])
 
     @test.idempotent_id('b3545034-dd78-48f0-bdc2-a4adfa6d0ead')
-    def test_get_server_group(self):
+    def test_show_server_group(self):
         # Get the server-group
-        body = self.client.get_server_group(
+        body = self.client.show_server_group(
             self.created_server_group['id'])['server_group']
         self.assertEqual(self.created_server_group, body)
 
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 77ddb3b..9c07677 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -87,8 +87,8 @@
     @test.idempotent_id('3043c57d-7e0e-49a6-9a96-ad569c265e6a')
     def test_get_server_metadata_item(self):
         # The value for a specific metadata key should be returned
-        meta = self.client.get_server_metadata_item(self.server_id,
-                                                    'key2')['meta']
+        meta = self.client.show_server_metadata_item(self.server_id,
+                                                     'key2')['meta']
         self.assertEqual('value2', meta['key2'])
 
     @test.idempotent_id('58c02d4f-5c67-40be-8744-d3fa5982eb1c')
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index cee60fb..18d80be 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -67,7 +67,7 @@
         # GET on a non-existent server should not succeed
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
-                          self.client.get_server_metadata_item,
+                          self.client.show_server_metadata_item,
                           non_existent_server_id,
                           'test2')
 
diff --git a/tempest/api/compute/servers/test_server_password.py b/tempest/api/compute/servers/test_server_password.py
index 35c2cfd..9b41708 100644
--- a/tempest/api/compute/servers/test_server_password.py
+++ b/tempest/api/compute/servers/test_server_password.py
@@ -32,7 +32,7 @@
 
     @test.idempotent_id('f83b582f-62a8-4f22-85b0-0dee50ff783a')
     def test_get_server_password(self):
-        self.client.get_password(self.server['id'])
+        self.client.show_password(self.server['id'])
 
     @test.idempotent_id('f8229e8b-b625-4493-800a-bde86ac611ea')
     def test_delete_server_password(self):
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 77af509..dad8e90 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -14,9 +14,12 @@
 #    under the License.
 
 import base64
+from tempest_lib.common.utils import data_utils
 from tempest_lib import exceptions as lib_exc
 
 from tempest.api.compute import base
+from tempest.common.utils.linux import remote_client
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -26,6 +29,16 @@
 class ServerPersonalityTestJSON(base.BaseV2ComputeTest):
 
     @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(ServerPersonalityTestJSON, cls).setup_credentials()
+
+    @classmethod
+    def resource_setup(cls):
+        cls.set_validation_resources()
+        super(ServerPersonalityTestJSON, cls).resource_setup()
+
+    @classmethod
     def skip_checks(cls):
         super(ServerPersonalityTestJSON, cls).skip_checks()
         if not CONF.compute_feature_enabled.personality:
@@ -40,18 +53,37 @@
     @test.idempotent_id('3cfe87fd-115b-4a02-b942-7dc36a337fdf')
     def test_create_server_with_personality(self):
         file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
+        file_path = '/test.txt'
+        personality = [{'path': file_path,
                         'contents': base64.b64encode(file_contents)}]
-        self.create_test_server(personality=personality)
+        password = data_utils.rand_password()
+        created_server = self.create_test_server(personality=personality,
+                                                 adminPass=password,
+                                                 wait_until='ACTIVE',
+                                                 validatable=True)
+        server = self.client.show_server(created_server['id'])['server']
+        if CONF.validation.run_validation:
+            linux_client = remote_client.RemoteClient(
+                self.get_server_ip(server),
+                self.ssh_user, password,
+                self.validation_resources['keypair']['private_key'])
+            self.assertEqual(file_contents,
+                             linux_client.exec_command(
+                                 'sudo cat %s' % file_path))
 
     @test.idempotent_id('128966d8-71fc-443c-8cab-08e24114ecc9')
     def test_rebuild_server_with_personality(self):
-        server_id = self.rebuild_server(None)
+        server = self.create_test_server(wait_until='ACTIVE', validatable=True)
+        server_id = server['id']
         file_contents = 'Test server rebuild.'
         personality = [{'path': 'rebuild.txt',
                         'contents': base64.b64encode(file_contents)}]
-        self.client.rebuild_server(server_id, self.image_ref_alt,
-                                   personality=personality)
+        rebuilt_server = self.client.rebuild_server(server_id,
+                                                    self.image_ref_alt,
+                                                    personality=personality)
+        waiters.wait_for_server_status(self.client, server_id, 'ACTIVE')
+        self.assertEqual(self.image_ref_alt,
+                         rebuilt_server['server']['image']['id'])
 
     @test.idempotent_id('176cd8c9-b9e8-48ee-a480-180beab292bf')
     def test_personality_files_exceed_limit(self):
@@ -83,9 +115,23 @@
             raise self.skipException("No limit for personality files")
         person = []
         for i in range(0, int(max_file_limit)):
-            path = 'etc/test' + str(i) + '.txt'
+            path = '/etc/test' + str(i) + '.txt'
             person.append({
                 'path': path,
                 'contents': base64.b64encode(file_contents),
             })
-        self.create_test_server(personality=person)
+        password = data_utils.rand_password()
+        created_server = self.create_test_server(personality=person,
+                                                 adminPass=password,
+                                                 wait_until='ACTIVE',
+                                                 validatable=True)
+        server = self.client.show_server(created_server['id'])['server']
+        if CONF.validation.run_validation:
+            linux_client = remote_client.RemoteClient(
+                self.get_server_ip(server),
+                self.ssh_user, password,
+                self.validation_resources['keypair']['private_key'])
+            for i in person:
+                self.assertEqual(base64.b64decode(i['contents']),
+                                 linux_client.exec_command(
+                                     'sudo cat %s' % i['path']))
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 96ce45e..12b824f 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -52,10 +52,11 @@
             name=cls.sg_name, description=cls.sg_desc)['security_group']
         cls.sg_id = cls.sg['id']
 
+        cls.password = data_utils.rand_password()
         # Server for positive tests
-        server = cls.create_test_server(wait_until='BUILD')
+        server = cls.create_test_server(adminPass=cls.password,
+                                        wait_until='BUILD')
         cls.server_id = server['id']
-        cls.password = server['adminPass']
         waiters.wait_for_server_status(cls.servers_client, cls.server_id,
                                        'ACTIVE')
 
@@ -116,8 +117,9 @@
         self.addCleanup(self._unrescue, self.server_id)
 
         # Add Security group
-        self.servers_client.add_security_group(self.server_id, self.sg_name)
+        self.servers_client.add_security_group(self.server_id,
+                                               name=self.sg_name)
 
         # Delete Security group
         self.servers_client.remove_security_group(self.server_id,
-                                                  self.sg_name)
+                                                  name=self.sg_name)
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index f8567cf..5afb4d1 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -43,14 +43,15 @@
     def resource_setup(cls):
         super(ServerRescueNegativeTestJSON, cls).resource_setup()
         cls.device = CONF.compute.volume_device_name
-
+        cls.password = data_utils.rand_password()
+        rescue_password = data_utils.rand_password()
         # Server for negative tests
-        server = cls.create_test_server(wait_until='BUILD')
-        resc_server = cls.create_test_server(wait_until='ACTIVE')
+        server = cls.create_test_server(adminPass=cls.password,
+                                        wait_until='BUILD')
+        resc_server = cls.create_test_server(adminPass=rescue_password,
+                                             wait_until='ACTIVE')
         cls.server_id = server['id']
-        cls.password = server['adminPass']
         cls.rescue_id = resc_server['id']
-        rescue_password = resc_server['adminPass']
 
         cls.servers_client.rescue_server(
             cls.rescue_id, adminPass=rescue_password)
@@ -101,7 +102,7 @@
     @test.idempotent_id('db22b618-f157-4566-a317-1b6d467a8094')
     def test_rescued_vm_reboot(self):
         self.assertRaises(lib_exc.Conflict, self.servers_client.reboot_server,
-                          self.rescue_id, 'HARD')
+                          self.rescue_id, type='HARD')
 
     @test.attr(type=['negative'])
     @test.idempotent_id('6dfc0a55-3a77-4564-a144-1587b7971dde')
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index d2fb652..2f79d47 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -13,11 +13,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
 from tempest.common import waiters
+from tempest import config
 from tempest import test
 
+CONF = config.CONF
+
 
 class ServersTestJSON(base.BaseV2ComputeTest):
 
@@ -31,6 +36,9 @@
         super(ServersTestJSON, self).tearDown()
 
     @test.idempotent_id('b92d5ec7-b1dd-44a2-87e4-45e888c46ef0')
+    @testtools.skipUnless(CONF.compute_feature_enabled.
+                          enable_instance_password,
+                          'Instance password not available.')
     def test_create_server_with_admin_password(self):
         # If an admin password is provided on server creation, the server's
         # root password should be set to that password.
@@ -71,9 +79,10 @@
         server = self.client.show_server(server['id'])['server']
         self.assertEqual(key_name, server['key_name'])
 
-    def _update_server_name(self, server_id, status):
+    def _update_server_name(self, server_id, status, prefix_name='server'):
         # The server name should be changed to the the provided value
-        new_name = data_utils.rand_name('server')
+        new_name = data_utils.rand_name(prefix_name)
+
         # Update the server with a new name
         self.client.update_server(server_id,
                                   name=new_name)
@@ -88,8 +97,9 @@
     def test_update_server_name(self):
         # The server name should be changed to the the provided value
         server = self.create_test_server(wait_until='ACTIVE')
-
-        self._update_server_name(server['id'], 'ACTIVE')
+        # Update instance name with non-ASCII characters
+        prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
+        self._update_server_name(server['id'], 'ACTIVE', prefix_name)
 
     @test.idempotent_id('6ac19cb1-27a3-40ec-b350-810bdc04c08e')
     def test_update_server_name_in_stop_state(self):
@@ -97,7 +107,11 @@
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.stop_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'SHUTOFF')
-        updated_server = self._update_server_name(server['id'], 'SHUTOFF')
+        # Update instance name with non-ASCII characters
+        prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
+        updated_server = self._update_server_name(server['id'],
+                                                  'SHUTOFF',
+                                                  prefix_name)
         self.assertNotIn('progress', updated_server)
 
     @test.idempotent_id('89b90870-bc13-4b73-96af-f9d4f2b70077')
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 98b292a..681b5db 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -152,7 +152,7 @@
         # Reboot a non existent server
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.reboot_server,
-                          nonexistent_server, 'SOFT')
+                          nonexistent_server, type='SOFT')
 
     @test.idempotent_id('d1417e7f-a509-41b5-a102-d5eed8613369')
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
@@ -169,8 +169,8 @@
 
     @test.attr(type=['negative'])
     @test.idempotent_id('98fa0458-1485-440f-873b-fe7f0d714930')
-    def test_rebuild_reboot_deleted_server(self):
-        # Rebuild and Reboot a deleted server
+    def test_rebuild_deleted_server(self):
+        # Rebuild a deleted server
         server = self.create_test_server()
         self.client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.client, server['id'])
@@ -178,8 +178,17 @@
         self.assertRaises(lib_exc.NotFound,
                           self.client.rebuild_server,
                           server['id'], self.image_ref_alt)
+
+    @test.attr(type=['negative'])
+    @test.idempotent_id('581a397d-5eab-486f-9cf9-1014bbd4c984')
+    def test_reboot_deleted_server(self):
+        # Reboot a deleted server
+        server = self.create_test_server()
+        self.client.delete_server(server['id'])
+        waiters.wait_for_server_termination(self.client, server['id'])
+
         self.assertRaises(lib_exc.NotFound, self.client.reboot_server,
-                          server['id'], 'SOFT')
+                          server['id'], type='SOFT')
 
     @test.attr(type=['negative'])
     @test.idempotent_id('d86141a7-906e-4731-b187-d64a2ea61422')
@@ -421,7 +430,7 @@
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.get_console_output,
-                          nonexistent_server, 10)
+                          nonexistent_server, length=10)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('6f47992b-5144-4250-9f8b-f00aa33950f3')
@@ -481,7 +490,7 @@
         server = self.client.show_server(self.server_id)['server']
         image_name = server['name'] + '-shelved'
         params = {'name': image_name}
-        images = self.images_client.list_images(**params)['images']
+        images = self.compute_images_client.list_images(**params)['images']
         self.assertEqual(1, len(images))
         self.assertEqual(image_name, images[0]['name'])
 
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index f8d0cca..bf4396d 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -48,17 +48,19 @@
     def setup_clients(cls):
         super(AuthorizationTestJSON, cls).setup_clients()
         cls.client = cls.os.servers_client
-        cls.images_client = cls.os.images_client
+        cls.compute_images_client = cls.os.compute_images_client
         cls.glance_client = cls.os.image_client
         cls.keypairs_client = cls.os.keypairs_client
-        cls.security_client = cls.os.security_groups_client
-        cls.rule_client = cls.os.security_group_rules_client
+        cls.security_client = cls.os.compute_security_groups_client
+        cls.rule_client = cls.os.compute_security_group_rules_client
 
         cls.alt_client = cls.alt_manager.servers_client
-        cls.alt_images_client = cls.alt_manager.images_client
+        cls.alt_compute_images_client = cls.alt_manager.compute_images_client
         cls.alt_keypairs_client = cls.alt_manager.keypairs_client
-        cls.alt_security_client = cls.alt_manager.security_groups_client
-        cls.alt_rule_client = cls.alt_manager.security_group_rules_client
+        cls.alt_security_client = (
+            cls.alt_manager.compute_security_groups_client)
+        cls.alt_rule_client = (
+            cls.alt_manager.compute_security_group_rules_client)
 
     @classmethod
     def resource_setup(cls):
@@ -76,7 +78,7 @@
         body = cls.glance_client.update_image(image_id,
                                               data=image_file)['image']
         cls.glance_client.wait_for_image_status(image_id, 'active')
-        cls.image = cls.images_client.show_image(image_id)['image']
+        cls.image = cls.compute_images_client.show_image(image_id)['image']
 
         cls.keypairname = data_utils.rand_name('keypair')
         cls.keypairs_client.create_keypair(name=cls.keypairname)
@@ -97,7 +99,7 @@
     @classmethod
     def resource_cleanup(cls):
         if hasattr(cls, 'image'):
-            cls.images_client.delete_image(cls.image['id'])
+            cls.compute_images_client.delete_image(cls.image['id'])
         if hasattr(cls, 'keypairname'):
             cls.keypairs_client.delete_keypair(cls.keypairname)
         if hasattr(cls, 'security_group'):
@@ -150,13 +152,13 @@
     def test_change_password_for_alt_account_fails(self):
         # A change password request for another user's server should fail
         self.assertRaises(lib_exc.NotFound, self.alt_client.change_password,
-                          self.server['id'], 'newpass')
+                          self.server['id'], adminPass='newpass')
 
     @test.idempotent_id('14cb5ff5-f646-45ca-8f51-09081d6c0c24')
     def test_reboot_server_for_alt_account_fails(self):
         # A reboot request for another user's server should fail
         self.assertRaises(lib_exc.NotFound, self.alt_client.reboot_server,
-                          self.server['id'], 'HARD')
+                          self.server['id'], type='HARD')
 
     @test.idempotent_id('8a0bce51-cd00-480b-88ba-dbc7d8408a37')
     def test_rebuild_server_for_alt_account_fails(self):
@@ -174,7 +176,7 @@
     def test_create_image_for_alt_account_fails(self):
         # A create image request for another user's server should fail
         self.assertRaises(lib_exc.NotFound,
-                          self.alt_images_client.create_image,
+                          self.alt_compute_images_client.create_image,
                           self.server['id'], name='testImage')
 
     @test.idempotent_id('95d445f6-babc-4f2e-aea3-aa24ec5e7f0d')
@@ -260,13 +262,14 @@
     def test_get_image_for_alt_account_fails(self):
         # A GET request for an image on another user's account should fail
         self.assertRaises(lib_exc.NotFound,
-                          self.alt_images_client.show_image, self.image['id'])
+                          self.alt_compute_images_client.show_image,
+                          self.image['id'])
 
     @test.idempotent_id('9facb962-f043-4a9d-b9ee-166a32dea098')
     def test_delete_image_for_alt_account_fails(self):
         # A DELETE request for another user's image should fail
         self.assertRaises(lib_exc.NotFound,
-                          self.alt_images_client.delete_image,
+                          self.alt_compute_images_client.delete_image,
                           self.image['id'])
 
     @test.idempotent_id('752c917e-83be-499d-a422-3559127f7d3c')
@@ -389,7 +392,7 @@
         # A set metadata for another user's image should fail
         req_metadata = {'meta1': 'value1', 'meta2': 'value2'}
         self.assertRaises(lib_exc.NotFound,
-                          self.alt_images_client.set_image_metadata,
+                          self.alt_compute_images_client.set_image_metadata,
                           self.image['id'], req_metadata)
 
     @test.idempotent_id('dea1936a-473d-49f2-92ad-97bb7aded22e')
@@ -400,20 +403,21 @@
         self.addCleanup(self.client.delete_server_metadata_item,
                         self.server['id'], 'meta1')
         self.assertRaises(lib_exc.NotFound,
-                          self.alt_client.get_server_metadata_item,
+                          self.alt_client.show_server_metadata_item,
                           self.server['id'], 'meta1')
 
     @test.idempotent_id('16b2d724-0d3b-4216-a9fa-97bd4d9cf670')
     def test_get_metadata_of_alt_account_image_fails(self):
         # A get metadata for another user's image should fail
         req_metadata = {'meta1': 'value1'}
-        self.addCleanup(self.images_client.delete_image_metadata_item,
+        self.addCleanup(self.compute_images_client.delete_image_metadata_item,
                         self.image['id'], 'meta1')
-        self.images_client.set_image_metadata(self.image['id'],
-                                              req_metadata)
-        self.assertRaises(lib_exc.NotFound,
-                          self.alt_images_client.show_image_metadata_item,
-                          self.image['id'], 'meta1')
+        self.compute_images_client.set_image_metadata(self.image['id'],
+                                                      req_metadata)
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.alt_compute_images_client.show_image_metadata_item,
+            self.image['id'], 'meta1')
 
     @test.idempotent_id('79531e2e-e721-493c-8b30-a35db36fdaa6')
     def test_delete_metadata_of_alt_account_server_fails(self):
@@ -430,17 +434,18 @@
     def test_delete_metadata_of_alt_account_image_fails(self):
         # A delete metadata for another user's image should fail
         req_metadata = {'meta1': 'data1'}
-        self.addCleanup(self.images_client.delete_image_metadata_item,
+        self.addCleanup(self.compute_images_client.delete_image_metadata_item,
                         self.image['id'], 'meta1')
-        self.images_client.set_image_metadata(self.image['id'],
-                                              req_metadata)
-        self.assertRaises(lib_exc.NotFound,
-                          self.alt_images_client.delete_image_metadata_item,
-                          self.image['id'], 'meta1')
+        self.compute_images_client.set_image_metadata(self.image['id'],
+                                                      req_metadata)
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.alt_compute_images_client.delete_image_metadata_item,
+            self.image['id'], 'meta1')
 
     @test.idempotent_id('b0c1e7a0-8853-40fd-8384-01f93d116cae')
     def test_get_console_output_of_alt_account_server_fails(self):
         # A Get Console Output for another user's server should fail
         self.assertRaises(lib_exc.NotFound,
                           self.alt_client.get_console_output,
-                          self.server['id'], 10)
+                          self.server['id'], length=10)
diff --git a/tempest/api/compute/test_versions.py b/tempest/api/compute/test_versions.py
index f94cee6..8b84a21 100644
--- a/tempest/api/compute/test_versions.py
+++ b/tempest/api/compute/test_versions.py
@@ -16,7 +16,7 @@
 from tempest import test
 
 
-class TestVersions(base.BaseComputeTest):
+class TestVersions(base.BaseV2ComputeTest):
 
     @test.idempotent_id('6c0a0990-43b6-4529-9b61-5fd8daf7c55c')
     def test_list_api_versions(self):
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index ab4ddf7..01a8e58 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -63,11 +63,11 @@
 
     def _create_and_attach(self):
         # Start a server and wait for it to become ready
-        admin_pass = self.image_ssh_password
+        self.admin_pass = self.image_ssh_password
         self.server = self.create_test_server(
             validatable=True,
             wait_until='ACTIVE',
-            adminPass=admin_pass)
+            adminPass=self.admin_pass)
 
         # Record addresses so that we can ssh later
         self.server['addresses'] = self.servers_client.list_addresses(
@@ -75,7 +75,7 @@
 
         # Create a volume and wait for it to become ready
         self.volume = self.volumes_client.create_volume(
-            CONF.volume.volume_size, display_name='test')['volume']
+            size=CONF.volume.volume_size, display_name='test')['volume']
         self.addCleanup(self._delete_volume)
         self.volumes_client.wait_for_volume_status(self.volume['id'],
                                                    'available')
@@ -108,7 +108,7 @@
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(self.server),
             self.image_ssh_user,
-            self.server['adminPass'],
+            self.admin_pass,
             self.validation_resources['keypair']['private_key'])
 
         partitions = linux_client.get_partitions()
@@ -127,7 +127,7 @@
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(self.server),
             self.image_ssh_user,
-            self.server['adminPass'],
+            self.admin_pass,
             self.validation_resources['keypair']['private_key'])
 
         partitions = linux_client.get_partitions()
@@ -144,7 +144,7 @@
         self.assertIn(self.attachment, body)
 
         # Get Volume attachment of the server
-        body = self.servers_client.get_volume_attachment(
+        body = self.servers_client.show_volume_attachment(
             self.server['id'],
             self.attachment['id'])['volumeAttachment']
         self.assertEqual(self.server['id'], body['serverId'])
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index a00c0ba..f42d153 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -50,7 +50,7 @@
         s_name = data_utils.rand_name('Snapshot')
         # Create snapshot
         snapshot = self.snapshots_client.create_snapshot(
-            volume['id'],
+            volume_id=volume['id'],
             display_name=s_name)['snapshot']
 
         def delete_snapshot(snapshot_id):
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index f0ed141..990e429 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -23,14 +23,11 @@
 
 
 class VolumesTestJSON(base.BaseV2ComputeTest):
-
-    """
-    This test creates a number of 1G volumes. To run successfully,
-    ensure that the backing file for the volume group that Nova uses
-    has space for at least 3 1G volumes!
-    If you are running a Devstack environment, ensure that the
-    VOLUME_BACKING_FILE_SIZE is atleast 4G in your localrc
-    """
+    # NOTE: This test creates a number of 1G volumes. To run successfully,
+    # ensure that the backing file for the volume group that Nova uses
+    # has space for at least 3 1G volumes!
+    # If you are running a Devstack environment, ensure that the
+    # VOLUME_BACKING_FILE_SIZE is atleast 4G in your localrc
 
     @classmethod
     def skip_checks(cls):
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 5d78539..b6d0c48 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 from collections import OrderedDict
+import copy
 
 import six
 from tempest_lib import exceptions as lib_exc
@@ -27,42 +28,93 @@
 """Default templates.
 There should always be at least a master1 and a worker1 node
 group template."""
-DEFAULT_TEMPLATES = {
-    'vanilla': OrderedDict([
-        ('2.6.0', {
-            'NODES': {
-                'master1': {
-                    'count': 1,
-                    'node_processes': ['namenode', 'resourcemanager',
-                                       'hiveserver']
+BASE_VANILLA_DESC = {
+    'NODES': {
+        'master1': {
+            'count': 1,
+            'node_processes': ['namenode', 'resourcemanager',
+                               'hiveserver']
+        },
+        'master2': {
+            'count': 1,
+            'node_processes': ['oozie', 'historyserver',
+                               'secondarynamenode']
+        },
+        'worker1': {
+            'count': 1,
+            'node_processes': ['datanode', 'nodemanager'],
+            'node_configs': {
+                'MapReduce': {
+                    'yarn.app.mapreduce.am.resource.mb': 256,
+                    'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
                 },
-                'master2': {
-                    'count': 1,
-                    'node_processes': ['oozie', 'historyserver',
-                                       'secondarynamenode']
-                },
-                'worker1': {
-                    'count': 1,
-                    'node_processes': ['datanode', 'nodemanager'],
-                    'node_configs': {
-                        'MapReduce': {
-                            'yarn.app.mapreduce.am.resource.mb': 256,
-                            'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
-                        },
-                        'YARN': {
-                            'yarn.scheduler.minimum-allocation-mb': 256,
-                            'yarn.scheduler.maximum-allocation-mb': 1024,
-                            'yarn.nodemanager.vmem-check-enabled': False
-                        }
-                    }
-                }
-            },
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs.replication': 1
+                'YARN': {
+                    'yarn.scheduler.minimum-allocation-mb': 256,
+                    'yarn.scheduler.maximum-allocation-mb': 1024,
+                    'yarn.nodemanager.vmem-check-enabled': False
                 }
             }
-        }),
+        }
+    },
+    'cluster_configs': {
+        'HDFS': {
+            'dfs.replication': 1
+        }
+    }
+}
+
+BASE_SPARK_DESC = {
+    'NODES': {
+        'master1': {
+            'count': 1,
+            'node_processes': ['namenode', 'master']
+        },
+        'worker1': {
+            'count': 1,
+            'node_processes': ['datanode', 'slave']
+        }
+    },
+    'cluster_configs': {
+        'HDFS': {
+            'dfs.replication': 1
+        }
+    }
+}
+
+BASE_CDH_DESC = {
+    'NODES': {
+        'master1': {
+            'count': 1,
+            'node_processes': ['CLOUDERA_MANAGER']
+        },
+        'master2': {
+            'count': 1,
+            'node_processes': ['HDFS_NAMENODE',
+                               'YARN_RESOURCEMANAGER']
+        },
+        'master3': {
+            'count': 1,
+            'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
+                               'HDFS_SECONDARYNAMENODE',
+                               'HIVE_METASTORE', 'HIVE_SERVER2']
+        },
+        'worker1': {
+            'count': 1,
+            'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
+        }
+    },
+    'cluster_configs': {
+        'HDFS': {
+            'dfs_replication': 1
+        }
+    }
+}
+
+
+DEFAULT_TEMPLATES = {
+    'vanilla': OrderedDict([
+        ('2.6.0', copy.deepcopy(BASE_VANILLA_DESC)),
+        ('2.7.1', copy.deepcopy(BASE_VANILLA_DESC)),
         ('1.2.1', {
             'NODES': {
                 'master1': {
@@ -123,81 +175,13 @@
         })
     ]),
     'spark': OrderedDict([
-        ('1.0.0', {
-            'NODES': {
-                'master1': {
-                    'count': 1,
-                    'node_processes': ['namenode', 'master']
-                },
-                'worker1': {
-                    'count': 1,
-                    'node_processes': ['datanode', 'slave']
-                }
-            },
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs.replication': 1
-                }
-            }
-        })
+        ('1.0.0', copy.deepcopy(BASE_SPARK_DESC)),
+        ('1.3.1', copy.deepcopy(BASE_SPARK_DESC))
     ]),
     'cdh': OrderedDict([
-        ('5.3.0', {
-            'NODES': {
-                'master1': {
-                    'count': 1,
-                    'node_processes': ['CLOUDERA_MANAGER']
-                },
-                'master2': {
-                    'count': 1,
-                    'node_processes': ['HDFS_NAMENODE',
-                                       'YARN_RESOURCEMANAGER']
-                },
-                'master3': {
-                    'count': 1,
-                    'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
-                                       'HDFS_SECONDARYNAMENODE',
-                                       'HIVE_METASTORE', 'HIVE_SERVER2']
-                },
-                'worker1': {
-                    'count': 1,
-                    'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
-                }
-            },
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs_replication': 1
-                }
-            }
-        }),
-        ('5', {
-            'NODES': {
-                'master1': {
-                    'count': 1,
-                    'node_processes': ['CLOUDERA_MANAGER']
-                },
-                'master2': {
-                    'count': 1,
-                    'node_processes': ['HDFS_NAMENODE',
-                                       'YARN_RESOURCEMANAGER']
-                },
-                'master3': {
-                    'count': 1,
-                    'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
-                                       'HDFS_SECONDARYNAMENODE',
-                                       'HIVE_METASTORE', 'HIVE_SERVER2']
-                },
-                'worker1': {
-                    'count': 1,
-                    'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
-                }
-            },
-            'cluster_configs': {
-                'HDFS': {
-                    'dfs_replication': 1
-                }
-            }
-        })
+        ('5.4.0', copy.deepcopy(BASE_CDH_DESC)),
+        ('5.3.0', copy.deepcopy(BASE_CDH_DESC)),
+        ('5', copy.deepcopy(BASE_CDH_DESC))
     ]),
     'mapr': OrderedDict([
         ('4.0.1.mrv2', {
@@ -399,6 +383,7 @@
     @classmethod
     def _get_default_version(cls):
         """Returns the default plugin version used for testing.
+
         This is gathered separately from the plugin to allow
         the usage of plugin name in skip_checks. This method is
         rather invoked into resource_setup, which allows API calls
@@ -439,6 +424,7 @@
     @classmethod
     def get_cluster_template(cls, node_group_template_ids=None):
         """Returns a cluster template for the default plugin.
+
         node_group_template_defined contains the type and ID of pre-defined
         node group templates that have to be used in the cluster template
         (instead of dynamically defining them with 'node_processes').
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index 42cbd14..dfd8e27 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -19,9 +19,9 @@
 
 
 class ClusterTemplateTest(dp_base.BaseDataProcessingTest):
-    """Link to the API documentation is http://docs.openstack.org/developer/
-    sahara/restapi/rest_api_v1.0.html#cluster-templates
-    """
+    # Link to the API documentation is http://docs.openstack.org/developer/
+    # sahara/restapi/rest_api_v1.0.html#cluster-templates
+
     @classmethod
     def skip_checks(cls):
         super(ClusterTemplateTest, cls).skip_checks()
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
index 98b7e24..a47ddbc 100644
--- a/tempest/api/data_processing/test_job_binaries.py
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -18,9 +18,9 @@
 
 
 class JobBinaryTest(dp_base.BaseDataProcessingTest):
-    """Link to the API documentation is http://docs.openstack.org/developer/
-    sahara/restapi/rest_api_v1.1_EDP.html#job-binaries
-    """
+    # Link to the API documentation is http://docs.openstack.org/developer/
+    # sahara/restapi/rest_api_v1.1_EDP.html#job-binaries
+
     @classmethod
     def resource_setup(cls):
         super(JobBinaryTest, cls).resource_setup()
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
index 6919fa5..b4f0769 100644
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -18,9 +18,9 @@
 
 
 class JobBinaryInternalTest(dp_base.BaseDataProcessingTest):
-    """Link to the API documentation is http://docs.openstack.org/developer/
-    sahara/restapi/rest_api_v1.1_EDP.html#job-binary-internals
-    """
+    # Link to the API documentation is http://docs.openstack.org/developer/
+    # sahara/restapi/rest_api_v1.1_EDP.html#job-binary-internals
+
     @classmethod
     def resource_setup(cls):
         super(JobBinaryInternalTest, cls).resource_setup()
diff --git a/tempest/api/data_processing/test_jobs.py b/tempest/api/data_processing/test_jobs.py
index 7798056..8503320 100644
--- a/tempest/api/data_processing/test_jobs.py
+++ b/tempest/api/data_processing/test_jobs.py
@@ -18,9 +18,9 @@
 
 
 class JobTest(dp_base.BaseDataProcessingTest):
-    """Link to the API documentation is http://docs.openstack.org/developer/
-    sahara/restapi/rest_api_v1.1_EDP.html#jobs
-    """
+    # NOTE: Link to the API documentation: http://docs.openstack.org/developer/
+    # sahara/restapi/rest_api_v1.1_EDP.html#jobs
+
     @classmethod
     def resource_setup(cls):
         super(JobTest, cls).resource_setup()
diff --git a/tempest/api/database/base.py b/tempest/api/database/base.py
index f4c1881..01e05db 100644
--- a/tempest/api/database/base.py
+++ b/tempest/api/database/base.py
@@ -13,13 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest import config
 import tempest.test
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class BaseDatabaseTest(tempest.test.BaseTestCase):
diff --git a/tempest/api/database/flavors/test_flavors.py b/tempest/api/database/flavors/test_flavors.py
index 62c1e05..f75b867 100644
--- a/tempest/api/database/flavors/test_flavors.py
+++ b/tempest/api/database/flavors/test_flavors.py
@@ -28,7 +28,7 @@
     @test.idempotent_id('c94b825e-0132-4686-8049-8a4a2bc09525')
     def test_get_db_flavor(self):
         # The expected flavor details should be returned
-        flavor = (self.client.get_db_flavor_details(self.db_flavor_ref)
+        flavor = (self.client.show_db_flavor(self.db_flavor_ref)
                   ['flavor'])
         self.assertEqual(self.db_flavor_ref, str(flavor['id']))
         self.assertIn('ram', flavor)
@@ -38,7 +38,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('685025d6-0cec-4673-8a8d-995cb8e0d3bb')
     def test_list_db_flavors(self):
-        flavor = (self.client.get_db_flavor_details(self.db_flavor_ref)
+        flavor = (self.client.show_db_flavor(self.db_flavor_ref)
                   ['flavor'])
         # List of all flavors should contain the expected flavor
         flavors = self.client.list_db_flavors()['flavors']
@@ -67,7 +67,7 @@
                          (os_flavors, db_flavors))
         for os_flavor in os_flavors:
             db_flavor =\
-                self.client.get_db_flavor_details(os_flavor['id'])['flavor']
+                self.client.show_db_flavor(os_flavor['id'])['flavor']
             self._check_values(['id', 'name', 'ram'], db_flavor, os_flavor)
             self._check_values(['disk', 'vcpus', 'swap'], db_flavor, os_flavor,
                                in_db=False)
diff --git a/tempest/api/database/flavors/test_flavors_negative.py b/tempest/api/database/flavors/test_flavors_negative.py
index 68cb7d6..3dee96f 100644
--- a/tempest/api/database/flavors/test_flavors_negative.py
+++ b/tempest/api/database/flavors/test_flavors_negative.py
@@ -31,4 +31,4 @@
     def test_get_non_existent_db_flavor(self):
         # flavor details are not returned for non-existent flavors
         self.assertRaises(lib_exc.NotFound,
-                          self.client.get_db_flavor_details, -1)
+                          self.client.show_db_flavor, -1)
diff --git a/tempest/api/identity/admin/v2/test_endpoints.py b/tempest/api/identity/admin/v2/test_endpoints.py
index bff4f91..df75d0a 100644
--- a/tempest/api/identity/admin/v2/test_endpoints.py
+++ b/tempest/api/identity/admin/v2/test_endpoints.py
@@ -27,7 +27,7 @@
         s_name = data_utils.rand_name('service')
         s_type = data_utils.rand_name('type')
         s_description = data_utils.rand_name('description')
-        cls.service_data = cls.client.create_service(
+        cls.service_data = cls.services_client.create_service(
             s_name, s_type, description=s_description)['OS-KSADM:service']
         cls.service_id = cls.service_data['id']
         cls.service_ids.append(cls.service_id)
@@ -36,11 +36,12 @@
         for i in range(2):
             region = data_utils.rand_name('region')
             url = data_utils.rand_url()
-            endpoint = cls.client.create_endpoint(cls.service_id,
-                                                  region,
-                                                  publicurl=url,
-                                                  adminurl=url,
-                                                  internalurl=url)['endpoint']
+            endpoint = cls.endpoints_client.create_endpoint(
+                cls.service_id,
+                region,
+                publicurl=url,
+                adminurl=url,
+                internalurl=url)['endpoint']
             # list_endpoints() will return 'enabled' field
             endpoint['enabled'] = True
             cls.setup_endpoints.append(endpoint)
@@ -48,15 +49,15 @@
     @classmethod
     def resource_cleanup(cls):
         for e in cls.setup_endpoints:
-            cls.client.delete_endpoint(e['id'])
+            cls.endpoints_client.delete_endpoint(e['id'])
         for s in cls.service_ids:
-            cls.client.delete_service(s)
+            cls.services_client.delete_service(s)
         super(EndPointsTestJSON, cls).resource_cleanup()
 
     @test.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
     def test_list_endpoints(self):
         # Get a list of endpoints
-        fetched_endpoints = self.client.list_endpoints()['endpoints']
+        fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
         # Asserting LIST endpoints
         missing_endpoints =\
             [e for e in self.setup_endpoints if e not in fetched_endpoints]
@@ -68,22 +69,23 @@
     def test_create_list_delete_endpoint(self):
         region = data_utils.rand_name('region')
         url = data_utils.rand_url()
-        endpoint = self.client.create_endpoint(self.service_id,
-                                               region,
-                                               publicurl=url,
-                                               adminurl=url,
-                                               internalurl=url)['endpoint']
+        endpoint = self.endpoints_client.create_endpoint(
+            self.service_id,
+            region,
+            publicurl=url,
+            adminurl=url,
+            internalurl=url)['endpoint']
         # Asserting Create Endpoint response body
         self.assertIn('id', endpoint)
         self.assertEqual(region, endpoint['region'])
         self.assertEqual(url, endpoint['publicurl'])
         # Checking if created endpoint is present in the list of endpoints
-        fetched_endpoints = self.client.list_endpoints()['endpoints']
+        fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
         self.assertIn(endpoint['id'], fetched_endpoints_id)
         # Deleting the endpoint created in this method
-        self.client.delete_endpoint(endpoint['id'])
+        self.endpoints_client.delete_endpoint(endpoint['id'])
         # Checking whether endpoint is deleted successfully
-        fetched_endpoints = self.client.list_endpoints()['endpoints']
+        fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
         self.assertNotIn(endpoint['id'], fetched_endpoints_id)
diff --git a/tempest/api/identity/admin/v2/test_roles.py b/tempest/api/identity/admin/v2/test_roles.py
index 78beead..5847129 100644
--- a/tempest/api/identity/admin/v2/test_roles.py
+++ b/tempest/api/identity/admin/v2/test_roles.py
@@ -27,15 +27,15 @@
         super(RolesTestJSON, cls).resource_setup()
         for _ in moves.xrange(5):
             role_name = data_utils.rand_name(name='role')
-            role = cls.client.create_role(role_name)['role']
+            role = cls.roles_client.create_role(name=role_name)['role']
             cls.data.roles.append(role)
 
     def _get_role_params(self):
         self.data.setup_test_user()
         self.data.setup_test_role()
-        user = self.get_user_by_name(self.data.test_user)
-        tenant = self.get_tenant_by_name(self.data.test_tenant)
-        role = self.get_role_by_name(self.data.test_role)
+        user = self.get_user_by_name(self.data.user['name'])
+        tenant = self.get_tenant_by_name(self.data.tenant['name'])
+        role = self.get_role_by_name(self.data.role['name'])
         return (user, tenant, role)
 
     def assert_role_in_role_list(self, role, roles):
@@ -48,7 +48,7 @@
     @test.idempotent_id('75d9593f-50b7-4fcf-bd64-e3fb4a278e23')
     def test_list_roles(self):
         """Return a list of all roles."""
-        body = self.client.list_roles()['roles']
+        body = self.roles_client.list_roles()['roles']
         found = [role for role in body if role in self.data.roles]
         self.assertTrue(any(found))
         self.assertEqual(len(found), len(self.data.roles))
@@ -57,16 +57,16 @@
     def test_role_create_delete(self):
         """Role should be created, verified, and deleted."""
         role_name = data_utils.rand_name(name='role-test')
-        body = self.client.create_role(role_name)['role']
+        body = self.roles_client.create_role(name=role_name)['role']
         self.assertEqual(role_name, body['name'])
 
-        body = self.client.list_roles()['roles']
+        body = self.roles_client.list_roles()['roles']
         found = [role for role in body if role['name'] == role_name]
         self.assertTrue(any(found))
 
-        body = self.client.delete_role(found[0]['id'])
+        body = self.roles_client.delete_role(found[0]['id'])
 
-        body = self.client.list_roles()['roles']
+        body = self.roles_client.list_roles()['roles']
         found = [role for role in body if role['name'] == role_name]
         self.assertFalse(any(found))
 
@@ -76,7 +76,7 @@
         self.data.setup_test_role()
         role_id = self.data.role['id']
         role_name = self.data.role['name']
-        body = self.client.get_role(role_id)['role']
+        body = self.roles_client.show_role(role_id)['role']
         self.assertEqual(role_id, body['id'])
         self.assertEqual(role_name, body['name'])
 
@@ -84,24 +84,28 @@
     def test_assign_user_role(self):
         """Assign a role to a user on a tenant."""
         (user, tenant, role) = self._get_role_params()
-        self.client.assign_user_role(tenant['id'], user['id'], role['id'])
-        roles = self.client.list_user_roles(tenant['id'], user['id'])['roles']
+        self.roles_client.assign_user_role(tenant['id'], user['id'],
+                                           role['id'])
+        roles = self.roles_client.list_user_roles(tenant['id'],
+                                                  user['id'])['roles']
         self.assert_role_in_role_list(role, roles)
 
     @test.idempotent_id('f0b9292c-d3ba-4082-aa6c-440489beef69')
     def test_remove_user_role(self):
         """Remove a role assigned to a user on a tenant."""
         (user, tenant, role) = self._get_role_params()
-        user_role = self.client.assign_user_role(tenant['id'],
-                                                 user['id'],
-                                                 role['id'])['role']
-        self.client.remove_user_role(tenant['id'], user['id'],
-                                     user_role['id'])
+        user_role = self.roles_client.assign_user_role(tenant['id'],
+                                                       user['id'],
+                                                       role['id'])['role']
+        self.roles_client.delete_user_role(tenant['id'], user['id'],
+                                           user_role['id'])
 
     @test.idempotent_id('262e1e3e-ed71-4edd-a0e5-d64e83d66d05')
     def test_list_user_roles(self):
         """List roles assigned to a user on tenant."""
         (user, tenant, role) = self._get_role_params()
-        self.client.assign_user_role(tenant['id'], user['id'], role['id'])
-        roles = self.client.list_user_roles(tenant['id'], user['id'])['roles']
+        self.roles_client.assign_user_role(tenant['id'], user['id'],
+                                           role['id'])
+        roles = self.roles_client.list_user_roles(tenant['id'],
+                                                  user['id'])['roles']
         self.assert_role_in_role_list(role, roles)
diff --git a/tempest/api/identity/admin/v2/test_roles_negative.py b/tempest/api/identity/admin/v2/test_roles_negative.py
index 5932aba..23a1958 100644
--- a/tempest/api/identity/admin/v2/test_roles_negative.py
+++ b/tempest/api/identity/admin/v2/test_roles_negative.py
@@ -27,9 +27,9 @@
     def _get_role_params(self):
         self.data.setup_test_user()
         self.data.setup_test_role()
-        user = self.get_user_by_name(self.data.test_user)
-        tenant = self.get_tenant_by_name(self.data.test_tenant)
-        role = self.get_role_by_name(self.data.test_role)
+        user = self.get_user_by_name(self.data.user['name'])
+        tenant = self.get_tenant_by_name(self.data.tenant['name'])
+        role = self.get_role_by_name(self.data.role['name'])
         return (user, tenant, role)
 
     @test.attr(type=['negative'])
@@ -37,7 +37,7 @@
     def test_list_roles_by_unauthorized_user(self):
         # Non-administrator user should not be able to list roles
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.list_roles)
+                          self.non_admin_roles_client.list_roles)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('11a3c7da-df6c-40c2-abc2-badd682edf9f')
@@ -45,14 +45,15 @@
         # Request to list roles without a valid token should fail
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.list_roles)
+        self.assertRaises(lib_exc.Unauthorized, self.roles_client.list_roles)
         self.client.auth_provider.clear_auth()
 
     @test.attr(type=['negative'])
     @test.idempotent_id('c0b89e56-accc-4c73-85f8-9c0f866104c1')
     def test_role_create_blank_name(self):
         # Should not be able to create a role with a blank name
-        self.assertRaises(lib_exc.BadRequest, self.client.create_role, '')
+        self.assertRaises(lib_exc.BadRequest, self.roles_client.create_role,
+                          name='')
 
     @test.attr(type=['negative'])
     @test.idempotent_id('585c8998-a8a4-4641-a5dd-abef7a8ced00')
@@ -60,7 +61,8 @@
         # Non-administrator user should not be able to create role
         role_name = data_utils.rand_name(name='role')
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.create_role, role_name)
+                          self.non_admin_roles_client.create_role,
+                          name=role_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('a7edd17a-e34a-4aab-8bb7-fa6f498645b8')
@@ -70,7 +72,7 @@
         self.client.delete_token(token)
         role_name = data_utils.rand_name(name='role')
         self.assertRaises(lib_exc.Unauthorized,
-                          self.client.create_role, role_name)
+                          self.roles_client.create_role, name=role_name)
         self.client.auth_provider.clear_auth()
 
     @test.attr(type=['negative'])
@@ -78,35 +80,35 @@
     def test_role_create_duplicate(self):
         # Role names should be unique
         role_name = data_utils.rand_name(name='role-dup')
-        body = self.client.create_role(role_name)['role']
+        body = self.roles_client.create_role(name=role_name)['role']
         role1_id = body.get('id')
-        self.addCleanup(self.client.delete_role, role1_id)
-        self.assertRaises(lib_exc.Conflict, self.client.create_role,
-                          role_name)
+        self.addCleanup(self.roles_client.delete_role, role1_id)
+        self.assertRaises(lib_exc.Conflict, self.roles_client.create_role,
+                          name=role_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('15347635-b5b1-4a87-a280-deb2bd6d865e')
     def test_delete_role_by_unauthorized_user(self):
         # Non-administrator user should not be able to delete role
         role_name = data_utils.rand_name(name='role')
-        body = self.client.create_role(role_name)['role']
+        body = self.roles_client.create_role(name=role_name)['role']
         self.data.roles.append(body)
         role_id = body.get('id')
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.delete_role, role_id)
+                          self.non_admin_roles_client.delete_role, role_id)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('44b60b20-70de-4dac-beaf-a3fc2650a16b')
     def test_delete_role_request_without_token(self):
         # Request to delete role without a valid token should fail
         role_name = data_utils.rand_name(name='role')
-        body = self.client.create_role(role_name)['role']
+        body = self.roles_client.create_role(name=role_name)['role']
         self.data.roles.append(body)
         role_id = body.get('id')
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
         self.assertRaises(lib_exc.Unauthorized,
-                          self.client.delete_role,
+                          self.roles_client.delete_role,
                           role_id)
         self.client.auth_provider.clear_auth()
 
@@ -115,7 +117,7 @@
     def test_delete_role_non_existent(self):
         # Attempt to delete a non existent role should fail
         non_existent_role = str(uuid.uuid4().hex)
-        self.assertRaises(lib_exc.NotFound, self.client.delete_role,
+        self.assertRaises(lib_exc.NotFound, self.roles_client.delete_role,
                           non_existent_role)
 
     @test.attr(type=['negative'])
@@ -125,7 +127,7 @@
         # assign a role to user
         (user, tenant, role) = self._get_role_params()
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.assign_user_role,
+                          self.non_admin_roles_client.assign_user_role,
                           tenant['id'], user['id'], role['id'])
 
     @test.attr(type=['negative'])
@@ -136,7 +138,7 @@
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
         self.assertRaises(lib_exc.Unauthorized,
-                          self.client.assign_user_role, tenant['id'],
+                          self.roles_client.assign_user_role, tenant['id'],
                           user['id'], role['id'])
         self.client.auth_provider.clear_auth()
 
@@ -146,7 +148,7 @@
         # Attempt to assign a non existent role to user should fail
         (user, tenant, role) = self._get_role_params()
         non_existent_role = str(uuid.uuid4().hex)
-        self.assertRaises(lib_exc.NotFound, self.client.assign_user_role,
+        self.assertRaises(lib_exc.NotFound, self.roles_client.assign_user_role,
                           tenant['id'], user['id'], non_existent_role)
 
     @test.attr(type=['negative'])
@@ -155,7 +157,7 @@
         # Attempt to assign a role on a non existent tenant should fail
         (user, tenant, role) = self._get_role_params()
         non_existent_tenant = str(uuid.uuid4().hex)
-        self.assertRaises(lib_exc.NotFound, self.client.assign_user_role,
+        self.assertRaises(lib_exc.NotFound, self.roles_client.assign_user_role,
                           non_existent_tenant, user['id'], role['id'])
 
     @test.attr(type=['negative'])
@@ -163,8 +165,9 @@
     def test_assign_duplicate_user_role(self):
         # Duplicate user role should not get assigned
         (user, tenant, role) = self._get_role_params()
-        self.client.assign_user_role(tenant['id'], user['id'], role['id'])
-        self.assertRaises(lib_exc.Conflict, self.client.assign_user_role,
+        self.roles_client.assign_user_role(tenant['id'], user['id'],
+                                           role['id'])
+        self.assertRaises(lib_exc.Conflict, self.roles_client.assign_user_role,
                           tenant['id'], user['id'], role['id'])
 
     @test.attr(type=['negative'])
@@ -173,11 +176,11 @@
         # Non-administrator user should not be authorized to
         # remove a user's role
         (user, tenant, role) = self._get_role_params()
-        self.client.assign_user_role(tenant['id'],
-                                     user['id'],
-                                     role['id'])
+        self.roles_client.assign_user_role(tenant['id'],
+                                           user['id'],
+                                           role['id'])
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.remove_user_role,
+                          self.non_admin_roles_client.delete_user_role,
                           tenant['id'], user['id'], role['id'])
 
     @test.attr(type=['negative'])
@@ -185,13 +188,13 @@
     def test_remove_user_role_request_without_token(self):
         # Request to remove a user's role without a valid token
         (user, tenant, role) = self._get_role_params()
-        self.client.assign_user_role(tenant['id'],
-                                     user['id'],
-                                     role['id'])
+        self.roles_client.assign_user_role(tenant['id'],
+                                           user['id'],
+                                           role['id'])
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
         self.assertRaises(lib_exc.Unauthorized,
-                          self.client.remove_user_role, tenant['id'],
+                          self.roles_client.delete_user_role, tenant['id'],
                           user['id'], role['id'])
         self.client.auth_provider.clear_auth()
 
@@ -200,11 +203,11 @@
     def test_remove_user_role_non_existent_role(self):
         # Attempt to delete a non existent role from a user should fail
         (user, tenant, role) = self._get_role_params()
-        self.client.assign_user_role(tenant['id'],
-                                     user['id'],
-                                     role['id'])
+        self.roles_client.assign_user_role(tenant['id'],
+                                           user['id'],
+                                           role['id'])
         non_existent_role = str(uuid.uuid4().hex)
-        self.assertRaises(lib_exc.NotFound, self.client.remove_user_role,
+        self.assertRaises(lib_exc.NotFound, self.roles_client.delete_user_role,
                           tenant['id'], user['id'], non_existent_role)
 
     @test.attr(type=['negative'])
@@ -212,11 +215,11 @@
     def test_remove_user_role_non_existent_tenant(self):
         # Attempt to remove a role from a non existent tenant should fail
         (user, tenant, role) = self._get_role_params()
-        self.client.assign_user_role(tenant['id'],
-                                     user['id'],
-                                     role['id'])
+        self.roles_client.assign_user_role(tenant['id'],
+                                           user['id'],
+                                           role['id'])
         non_existent_tenant = str(uuid.uuid4().hex)
-        self.assertRaises(lib_exc.NotFound, self.client.remove_user_role,
+        self.assertRaises(lib_exc.NotFound, self.roles_client.delete_user_role,
                           non_existent_tenant, user['id'], role['id'])
 
     @test.attr(type=['negative'])
@@ -225,10 +228,11 @@
         # Non-administrator user should not be authorized to list
         # a user's roles
         (user, tenant, role) = self._get_role_params()
-        self.client.assign_user_role(tenant['id'], user['id'], role['id'])
+        self.roles_client.assign_user_role(tenant['id'], user['id'],
+                                           role['id'])
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.list_user_roles, tenant['id'],
-                          user['id'])
+                          self.non_admin_roles_client.list_user_roles,
+                          tenant['id'], user['id'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('682adfb2-fd5f-4b0a-a9ca-322e9bebb907')
@@ -239,7 +243,7 @@
         self.client.delete_token(token)
         try:
             self.assertRaises(lib_exc.Unauthorized,
-                              self.client.list_user_roles, tenant['id'],
+                              self.roles_client.list_user_roles, tenant['id'],
                               user['id'])
         finally:
             self.client.auth_provider.clear_auth()
diff --git a/tempest/api/identity/admin/v2/test_services.py b/tempest/api/identity/admin/v2/test_services.py
index eebeedb..5685922 100644
--- a/tempest/api/identity/admin/v2/test_services.py
+++ b/tempest/api/identity/admin/v2/test_services.py
@@ -25,9 +25,9 @@
 
     def _del_service(self, service_id):
         # Deleting the service created in this method
-        self.client.delete_service(service_id)
+        self.services_client.delete_service(service_id)
         # Checking whether service is deleted successfully
-        self.assertRaises(lib_exc.NotFound, self.client.get_service,
+        self.assertRaises(lib_exc.NotFound, self.services_client.show_service,
                           service_id)
 
     @test.idempotent_id('84521085-c6e6-491c-9a08-ec9f70f90110')
@@ -37,7 +37,7 @@
         name = data_utils.rand_name('service')
         type = data_utils.rand_name('type')
         description = data_utils.rand_name('description')
-        service_data = self.client.create_service(
+        service_data = self.services_client.create_service(
             name, type, description=description)['OS-KSADM:service']
         self.assertFalse(service_data['id'] is None)
         self.addCleanup(self._del_service, service_data['id'])
@@ -50,8 +50,9 @@
         self.assertIn('description', service_data)
         self.assertEqual(description, service_data['description'])
         # Get service
-        fetched_service = (self.client.get_service(service_data['id'])
-                           ['OS-KSADM:service'])
+        fetched_service = (
+            self.services_client.show_service(service_data['id'])
+            ['OS-KSADM:service'])
         # verifying the existence of service created
         self.assertIn('id', fetched_service)
         self.assertEqual(fetched_service['id'], service_data['id'])
@@ -68,7 +69,8 @@
         # Create a service only with name and type
         name = data_utils.rand_name('service')
         type = data_utils.rand_name('type')
-        service = self.client.create_service(name, type)['OS-KSADM:service']
+        service = self.services_client.create_service(name,
+                                                      type)['OS-KSADM:service']
         self.assertIn('id', service)
         self.addCleanup(self._del_service, service['id'])
         self.assertIn('name', service)
@@ -85,17 +87,17 @@
             name = data_utils.rand_name('service')
             type = data_utils.rand_name('type')
             description = data_utils.rand_name('description')
-            service = self.client.create_service(
+            service = self.services_client.create_service(
                 name, type, description=description)['OS-KSADM:service']
             services.append(service)
         service_ids = map(lambda x: x['id'], services)
 
         def delete_services():
             for service_id in service_ids:
-                self.client.delete_service(service_id)
+                self.services_client.delete_service(service_id)
 
         self.addCleanup(delete_services)
         # List and Verify Services
-        body = self.client.list_services()['OS-KSADM:services']
+        body = self.services_client.list_services()['OS-KSADM:services']
         found = [serv for serv in body if serv['id'] in service_ids]
         self.assertEqual(len(found), len(services), 'Services not found')
diff --git a/tempest/api/identity/admin/v2/test_tenant_negative.py b/tempest/api/identity/admin/v2/test_tenant_negative.py
index 74558d1..a02dbc1 100644
--- a/tempest/api/identity/admin/v2/test_tenant_negative.py
+++ b/tempest/api/identity/admin/v2/test_tenant_negative.py
@@ -29,7 +29,7 @@
     def test_list_tenants_by_unauthorized_user(self):
         # Non-administrator user should not be able to list tenants
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.list_tenants)
+                          self.non_admin_tenants_client.list_tenants)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('df33926c-1c96-4d8d-a762-79cc6b0c3cf4')
@@ -37,7 +37,8 @@
         # Request to list tenants without a valid token should fail
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.list_tenants)
+        self.assertRaises(lib_exc.Unauthorized,
+                          self.tenants_client.list_tenants)
         self.client.auth_provider.clear_auth()
 
     @test.attr(type=['negative'])
@@ -45,21 +46,23 @@
     def test_tenant_delete_by_unauthorized_user(self):
         # Non-administrator user should not be able to delete a tenant
         tenant_name = data_utils.rand_name(name='tenant')
-        tenant = self.client.create_tenant(tenant_name)['tenant']
+        tenant = self.tenants_client.create_tenant(tenant_name)['tenant']
         self.data.tenants.append(tenant)
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.delete_tenant, tenant['id'])
+                          self.non_admin_tenants_client.delete_tenant,
+                          tenant['id'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('e450db62-2e9d-418f-893a-54772d6386b1')
     def test_tenant_delete_request_without_token(self):
         # Request to delete a tenant without a valid token should fail
         tenant_name = data_utils.rand_name(name='tenant')
-        tenant = self.client.create_tenant(tenant_name)['tenant']
+        tenant = self.tenants_client.create_tenant(tenant_name)['tenant']
         self.data.tenants.append(tenant)
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.delete_tenant,
+        self.assertRaises(lib_exc.Unauthorized,
+                          self.tenants_client.delete_tenant,
                           tenant['id'])
         self.client.auth_provider.clear_auth()
 
@@ -67,7 +70,7 @@
     @test.idempotent_id('9c9a2aed-6e3c-467a-8f5c-89da9d1b516b')
     def test_delete_non_existent_tenant(self):
         # Attempt to delete a non existent tenant should fail
-        self.assertRaises(lib_exc.NotFound, self.client.delete_tenant,
+        self.assertRaises(lib_exc.NotFound, self.tenants_client.delete_tenant,
                           str(uuid.uuid4().hex))
 
     @test.attr(type=['negative'])
@@ -75,14 +78,14 @@
     def test_tenant_create_duplicate(self):
         # Tenant names should be unique
         tenant_name = data_utils.rand_name(name='tenant')
-        body = self.client.create_tenant(tenant_name)['tenant']
+        body = self.tenants_client.create_tenant(tenant_name)['tenant']
         tenant = body
         self.data.tenants.append(tenant)
         tenant1_id = body.get('id')
 
-        self.addCleanup(self.client.delete_tenant, tenant1_id)
+        self.addCleanup(self.tenants_client.delete_tenant, tenant1_id)
         self.addCleanup(self.data.tenants.remove, tenant)
-        self.assertRaises(lib_exc.Conflict, self.client.create_tenant,
+        self.assertRaises(lib_exc.Conflict, self.tenants_client.create_tenant,
                           tenant_name)
 
     @test.attr(type=['negative'])
@@ -91,7 +94,8 @@
         # Non-administrator user should not be authorized to create a tenant
         tenant_name = data_utils.rand_name(name='tenant')
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.create_tenant, tenant_name)
+                          self.non_admin_tenants_client.create_tenant,
+                          tenant_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('a3ee9d7e-6920-4dd5-9321-d4b2b7f0a638')
@@ -100,7 +104,8 @@
         tenant_name = data_utils.rand_name(name='tenant')
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.create_tenant,
+        self.assertRaises(lib_exc.Unauthorized,
+                          self.tenants_client.create_tenant,
                           tenant_name)
         self.client.auth_provider.clear_auth()
 
@@ -108,7 +113,8 @@
     @test.idempotent_id('5a2e4ca9-b0c0-486c-9c48-64a94fba2395')
     def test_create_tenant_with_empty_name(self):
         # Tenant name should not be empty
-        self.assertRaises(lib_exc.BadRequest, self.client.create_tenant,
+        self.assertRaises(lib_exc.BadRequest,
+                          self.tenants_client.create_tenant,
                           name='')
 
     @test.attr(type=['negative'])
@@ -116,14 +122,15 @@
     def test_create_tenants_name_length_over_64(self):
         # Tenant name length should not be greater than 64 characters
         tenant_name = 'a' * 65
-        self.assertRaises(lib_exc.BadRequest, self.client.create_tenant,
+        self.assertRaises(lib_exc.BadRequest,
+                          self.tenants_client.create_tenant,
                           tenant_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('bd20dc2a-9557-4db7-b755-f48d952ad706')
     def test_update_non_existent_tenant(self):
         # Attempt to update a non existent tenant should fail
-        self.assertRaises(lib_exc.NotFound, self.client.update_tenant,
+        self.assertRaises(lib_exc.NotFound, self.tenants_client.update_tenant,
                           str(uuid.uuid4().hex))
 
     @test.attr(type=['negative'])
@@ -131,20 +138,22 @@
     def test_tenant_update_by_unauthorized_user(self):
         # Non-administrator user should not be able to update a tenant
         tenant_name = data_utils.rand_name(name='tenant')
-        tenant = self.client.create_tenant(tenant_name)['tenant']
+        tenant = self.tenants_client.create_tenant(tenant_name)['tenant']
         self.data.tenants.append(tenant)
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.update_tenant, tenant['id'])
+                          self.non_admin_tenants_client.update_tenant,
+                          tenant['id'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('7a421573-72c7-4c22-a98e-ce539219c657')
     def test_tenant_update_request_without_token(self):
         # Request to update a tenant without a valid token should fail
         tenant_name = data_utils.rand_name(name='tenant')
-        tenant = self.client.create_tenant(tenant_name)['tenant']
+        tenant = self.tenants_client.create_tenant(tenant_name)['tenant']
         self.data.tenants.append(tenant)
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.update_tenant,
+        self.assertRaises(lib_exc.Unauthorized,
+                          self.tenants_client.update_tenant,
                           tenant['id'])
         self.client.auth_provider.clear_auth()
diff --git a/tempest/api/identity/admin/v2/test_tenants.py b/tempest/api/identity/admin/v2/test_tenants.py
index 2ec5c4f..8d0b9b1 100644
--- a/tempest/api/identity/admin/v2/test_tenants.py
+++ b/tempest/api/identity/admin/v2/test_tenants.py
@@ -28,19 +28,19 @@
         tenants = []
         for _ in moves.xrange(3):
             tenant_name = data_utils.rand_name(name='tenant-new')
-            tenant = self.client.create_tenant(tenant_name)['tenant']
+            tenant = self.tenants_client.create_tenant(tenant_name)['tenant']
             self.data.tenants.append(tenant)
             tenants.append(tenant)
         tenant_ids = map(lambda x: x['id'], tenants)
-        body = self.client.list_tenants()['tenants']
+        body = self.tenants_client.list_tenants()['tenants']
         found = [t for t in body if t['id'] in tenant_ids]
         self.assertEqual(len(found), len(tenants), 'Tenants not created')
 
         for tenant in tenants:
-            self.client.delete_tenant(tenant['id'])
+            self.tenants_client.delete_tenant(tenant['id'])
             self.data.tenants.remove(tenant)
 
-        body = self.client.list_tenants()['tenants']
+        body = self.tenants_client.list_tenants()['tenants']
         found = [tenant for tenant in body if tenant['id'] in tenant_ids]
         self.assertFalse(any(found), 'Tenants failed to delete')
 
@@ -49,60 +49,60 @@
         # Create tenant with a description
         tenant_name = data_utils.rand_name(name='tenant')
         tenant_desc = data_utils.rand_name(name='desc')
-        body = self.client.create_tenant(tenant_name,
-                                         description=tenant_desc)['tenant']
-        tenant = body
+        body = self.tenants_client.create_tenant(tenant_name,
+                                                 description=tenant_desc)
+        tenant = body['tenant']
         self.data.tenants.append(tenant)
-        tenant_id = body['id']
-        desc1 = body['description']
+        tenant_id = tenant['id']
+        desc1 = tenant['description']
         self.assertEqual(desc1, tenant_desc, 'Description should have '
                          'been sent in response for create')
-        body = self.client.get_tenant(tenant_id)['tenant']
+        body = self.tenants_client.show_tenant(tenant_id)['tenant']
         desc2 = body['description']
         self.assertEqual(desc2, tenant_desc, 'Description does not appear'
                          'to be set')
-        self.client.delete_tenant(tenant_id)
+        self.tenants_client.delete_tenant(tenant_id)
         self.data.tenants.remove(tenant)
 
     @test.idempotent_id('670bdddc-1cd7-41c7-b8e2-751cfb67df50')
     def test_tenant_create_enabled(self):
         # Create a tenant that is enabled
         tenant_name = data_utils.rand_name(name='tenant')
-        body = self.client.create_tenant(tenant_name, enabled=True)['tenant']
-        tenant = body
+        body = self.tenants_client.create_tenant(tenant_name, enabled=True)
+        tenant = body['tenant']
         self.data.tenants.append(tenant)
-        tenant_id = body['id']
-        en1 = body['enabled']
+        tenant_id = tenant['id']
+        en1 = tenant['enabled']
         self.assertTrue(en1, 'Enable should be True in response')
-        body = self.client.get_tenant(tenant_id)['tenant']
+        body = self.tenants_client.show_tenant(tenant_id)['tenant']
         en2 = body['enabled']
         self.assertTrue(en2, 'Enable should be True in lookup')
-        self.client.delete_tenant(tenant_id)
+        self.tenants_client.delete_tenant(tenant_id)
         self.data.tenants.remove(tenant)
 
     @test.idempotent_id('3be22093-b30f-499d-b772-38340e5e16fb')
     def test_tenant_create_not_enabled(self):
         # Create a tenant that is not enabled
         tenant_name = data_utils.rand_name(name='tenant')
-        body = self.client.create_tenant(tenant_name, enabled=False)['tenant']
-        tenant = body
+        body = self.tenants_client.create_tenant(tenant_name, enabled=False)
+        tenant = body['tenant']
         self.data.tenants.append(tenant)
-        tenant_id = body['id']
-        en1 = body['enabled']
+        tenant_id = tenant['id']
+        en1 = tenant['enabled']
         self.assertEqual('false', str(en1).lower(),
                          'Enable should be False in response')
-        body = self.client.get_tenant(tenant_id)['tenant']
+        body = self.tenants_client.show_tenant(tenant_id)['tenant']
         en2 = body['enabled']
         self.assertEqual('false', str(en2).lower(),
                          'Enable should be False in lookup')
-        self.client.delete_tenant(tenant_id)
+        self.tenants_client.delete_tenant(tenant_id)
         self.data.tenants.remove(tenant)
 
     @test.idempotent_id('781f2266-d128-47f3-8bdb-f70970add238')
     def test_tenant_update_name(self):
         # Update name attribute of a tenant
         t_name1 = data_utils.rand_name(name='tenant')
-        body = self.client.create_tenant(t_name1)['tenant']
+        body = self.tenants_client.create_tenant(t_name1)['tenant']
         tenant = body
         self.data.tenants.append(tenant)
 
@@ -110,18 +110,18 @@
         resp1_name = body['name']
 
         t_name2 = data_utils.rand_name(name='tenant2')
-        body = self.client.update_tenant(t_id, name=t_name2)['tenant']
+        body = self.tenants_client.update_tenant(t_id, name=t_name2)['tenant']
         resp2_name = body['name']
         self.assertNotEqual(resp1_name, resp2_name)
 
-        body = self.client.get_tenant(t_id)['tenant']
+        body = self.tenants_client.show_tenant(t_id)['tenant']
         resp3_name = body['name']
 
         self.assertNotEqual(resp1_name, resp3_name)
         self.assertEqual(t_name1, resp1_name)
         self.assertEqual(resp2_name, resp3_name)
 
-        self.client.delete_tenant(t_id)
+        self.tenants_client.delete_tenant(t_id)
         self.data.tenants.remove(tenant)
 
     @test.idempotent_id('859fcfe1-3a03-41ef-86f9-b19a47d1cd87')
@@ -129,26 +129,27 @@
         # Update description attribute of a tenant
         t_name = data_utils.rand_name(name='tenant')
         t_desc = data_utils.rand_name(name='desc')
-        body = self.client.create_tenant(t_name, description=t_desc)['tenant']
-        tenant = body
+        body = self.tenants_client.create_tenant(t_name, description=t_desc)
+        tenant = body['tenant']
         self.data.tenants.append(tenant)
 
-        t_id = body['id']
-        resp1_desc = body['description']
+        t_id = tenant['id']
+        resp1_desc = tenant['description']
 
         t_desc2 = data_utils.rand_name(name='desc2')
-        body = self.client.update_tenant(t_id, description=t_desc2)['tenant']
-        resp2_desc = body['description']
+        body = self.tenants_client.update_tenant(t_id, description=t_desc2)
+        updated_tenant = body['tenant']
+        resp2_desc = updated_tenant['description']
         self.assertNotEqual(resp1_desc, resp2_desc)
 
-        body = self.client.get_tenant(t_id)['tenant']
+        body = self.tenants_client.show_tenant(t_id)['tenant']
         resp3_desc = body['description']
 
         self.assertNotEqual(resp1_desc, resp3_desc)
         self.assertEqual(t_desc, resp1_desc)
         self.assertEqual(resp2_desc, resp3_desc)
 
-        self.client.delete_tenant(t_id)
+        self.tenants_client.delete_tenant(t_id)
         self.data.tenants.remove(tenant)
 
     @test.idempotent_id('8fc8981f-f12d-4c66-9972-2bdcf2bc2e1a')
@@ -156,24 +157,25 @@
         # Update the enabled attribute of a tenant
         t_name = data_utils.rand_name(name='tenant')
         t_en = False
-        body = self.client.create_tenant(t_name, enabled=t_en)['tenant']
-        tenant = body
+        body = self.tenants_client.create_tenant(t_name, enabled=t_en)
+        tenant = body['tenant']
         self.data.tenants.append(tenant)
 
-        t_id = body['id']
-        resp1_en = body['enabled']
+        t_id = tenant['id']
+        resp1_en = tenant['enabled']
 
         t_en2 = True
-        body = self.client.update_tenant(t_id, enabled=t_en2)['tenant']
-        resp2_en = body['enabled']
+        body = self.tenants_client.update_tenant(t_id, enabled=t_en2)
+        updated_tenant = body['tenant']
+        resp2_en = updated_tenant['enabled']
         self.assertNotEqual(resp1_en, resp2_en)
 
-        body = self.client.get_tenant(t_id)['tenant']
+        body = self.tenants_client.show_tenant(t_id)['tenant']
         resp3_en = body['enabled']
 
         self.assertNotEqual(resp1_en, resp3_en)
         self.assertEqual('false', str(resp1_en).lower())
         self.assertEqual(resp2_en, resp3_en)
 
-        self.client.delete_tenant(t_id)
+        self.tenants_client.delete_tenant(t_id)
         self.data.tenants.remove(tenant)
diff --git a/tempest/api/identity/admin/v2/test_tokens.py b/tempest/api/identity/admin/v2/test_tokens.py
index 981a9ea..ee04420 100644
--- a/tempest/api/identity/admin/v2/test_tokens.py
+++ b/tempest/api/identity/admin/v2/test_tokens.py
@@ -24,14 +24,14 @@
     def test_create_get_delete_token(self):
         # get a token by username and password
         user_name = data_utils.rand_name(name='user')
-        user_password = data_utils.rand_name(name='pass')
+        user_password = data_utils.rand_password()
         # first:create a tenant
         tenant_name = data_utils.rand_name(name='tenant')
-        tenant = self.client.create_tenant(tenant_name)['tenant']
+        tenant = self.tenants_client.create_tenant(tenant_name)['tenant']
         self.data.tenants.append(tenant)
         # second:create a user
-        user = self.client.create_user(user_name, user_password,
-                                       tenant['id'], '')['user']
+        user = self.users_client.create_user(user_name, user_password,
+                                             tenant['id'], '')['user']
         self.data.users.append(user)
         # then get a token for the user
         body = self.token_client.auth(user_name,
@@ -41,7 +41,7 @@
                          tenant['name'])
         # Perform GET Token
         token_id = body['token']['id']
-        token_details = self.client.get_token(token_id)['access']
+        token_details = self.client.show_token(token_id)['access']
         self.assertEqual(token_id, token_details['token']['id'])
         self.assertEqual(user['id'], token_details['user']['id'])
         self.assertEqual(user_name, token_details['user']['name'])
@@ -52,39 +52,40 @@
 
     @test.idempotent_id('25ba82ee-8a32-4ceb-8f50-8b8c71e8765e')
     def test_rescope_token(self):
-        """An unscoped token can be requested, that token can be used to
-           request a scoped token.
+        """An unscoped token can be requested
+
+        That token can be used to request a scoped token.
         """
 
         # Create a user.
         user_name = data_utils.rand_name(name='user')
-        user_password = data_utils.rand_name(name='pass')
+        user_password = data_utils.rand_password()
         tenant_id = None  # No default tenant so will get unscoped token.
         email = ''
-        user = self.client.create_user(user_name, user_password,
-                                       tenant_id, email)['user']
+        user = self.users_client.create_user(user_name, user_password,
+                                             tenant_id, email)['user']
         self.data.users.append(user)
 
         # Create a couple tenants.
         tenant1_name = data_utils.rand_name(name='tenant')
-        tenant1 = self.client.create_tenant(tenant1_name)['tenant']
+        tenant1 = self.tenants_client.create_tenant(tenant1_name)['tenant']
         self.data.tenants.append(tenant1)
 
         tenant2_name = data_utils.rand_name(name='tenant')
-        tenant2 = self.client.create_tenant(tenant2_name)['tenant']
+        tenant2 = self.tenants_client.create_tenant(tenant2_name)['tenant']
         self.data.tenants.append(tenant2)
 
         # Create a role
         role_name = data_utils.rand_name(name='role')
-        role = self.client.create_role(role_name)['role']
+        role = self.roles_client.create_role(name=role_name)['role']
         self.data.roles.append(role)
 
         # Grant the user the role on the tenants.
-        self.client.assign_user_role(tenant1['id'], user['id'],
-                                     role['id'])
+        self.roles_client.assign_user_role(tenant1['id'], user['id'],
+                                           role['id'])
 
-        self.client.assign_user_role(tenant2['id'], user['id'],
-                                     role['id'])
+        self.roles_client.assign_user_role(tenant2['id'], user['id'],
+                                           role['id'])
 
         # Get an unscoped token.
         body = self.token_client.auth(user_name, user_password)
diff --git a/tempest/api/identity/admin/v2/test_users.py b/tempest/api/identity/admin/v2/test_users.py
index 6ee5218..60c4e97 100644
--- a/tempest/api/identity/admin/v2/test_users.py
+++ b/tempest/api/identity/admin/v2/test_users.py
@@ -26,7 +26,7 @@
     def resource_setup(cls):
         super(UsersTestJSON, cls).resource_setup()
         cls.alt_user = data_utils.rand_name('test_user')
-        cls.alt_password = data_utils.rand_name('pass')
+        cls.alt_password = data_utils.rand_password()
         cls.alt_email = cls.alt_user + '@testmail.tm'
 
     @test.attr(type='smoke')
@@ -34,9 +34,9 @@
     def test_create_user(self):
         # Create a user
         self.data.setup_test_tenant()
-        user = self.client.create_user(self.alt_user, self.alt_password,
-                                       self.data.tenant['id'],
-                                       self.alt_email)['user']
+        user = self.users_client.create_user(self.alt_user, self.alt_password,
+                                             self.data.tenant['id'],
+                                             self.alt_email)['user']
         self.data.users.append(user)
         self.assertEqual(self.alt_user, user['name'])
 
@@ -45,9 +45,10 @@
         # Create a user with enabled : False
         self.data.setup_test_tenant()
         name = data_utils.rand_name('test_user')
-        user = self.client.create_user(name, self.alt_password,
-                                       self.data.tenant['id'],
-                                       self.alt_email, enabled=False)['user']
+        user = self.users_client.create_user(name, self.alt_password,
+                                             self.data.tenant['id'],
+                                             self.alt_email,
+                                             enabled=False)['user']
         self.data.users.append(user)
         self.assertEqual(name, user['name'])
         self.assertEqual(False, user['enabled'])
@@ -58,22 +59,22 @@
         # Test case to check if updating of user attributes is successful.
         test_user = data_utils.rand_name('test_user')
         self.data.setup_test_tenant()
-        user = self.client.create_user(test_user, self.alt_password,
-                                       self.data.tenant['id'],
-                                       self.alt_email)['user']
+        user = self.users_client.create_user(test_user, self.alt_password,
+                                             self.data.tenant['id'],
+                                             self.alt_email)['user']
         # Delete the User at the end of this method
-        self.addCleanup(self.client.delete_user, user['id'])
+        self.addCleanup(self.users_client.delete_user, user['id'])
         # Updating user details with new values
         u_name2 = data_utils.rand_name('user2')
         u_email2 = u_name2 + '@testmail.tm'
-        update_user = self.client.update_user(user['id'], name=u_name2,
-                                              email=u_email2,
-                                              enabled=False)['user']
+        update_user = self.users_client.update_user(user['id'], name=u_name2,
+                                                    email=u_email2,
+                                                    enabled=False)['user']
         self.assertEqual(u_name2, update_user['name'])
         self.assertEqual(u_email2, update_user['email'])
         self.assertEqual(False, update_user['enabled'])
         # GET by id after updating
-        updated_user = self.client.get_user(user['id'])['user']
+        updated_user = self.users_client.show_user(user['id'])['user']
         # Assert response body of GET after updating
         self.assertEqual(u_name2, updated_user['name'])
         self.assertEqual(u_email2, updated_user['email'])
@@ -84,47 +85,49 @@
         # Delete a user
         test_user = data_utils.rand_name('test_user')
         self.data.setup_test_tenant()
-        user = self.client.create_user(test_user, self.alt_password,
-                                       self.data.tenant['id'],
-                                       self.alt_email)['user']
-        self.client.delete_user(user['id'])
+        user = self.users_client.create_user(test_user, self.alt_password,
+                                             self.data.tenant['id'],
+                                             self.alt_email)['user']
+        self.users_client.delete_user(user['id'])
 
     @test.idempotent_id('aca696c3-d645-4f45-b728-63646045beb1')
     def test_user_authentication(self):
         # Valid user's token is authenticated
         self.data.setup_test_user()
         # Get a token
-        self.token_client.auth(self.data.test_user, self.data.test_password,
-                               self.data.test_tenant)
+        self.token_client.auth(self.data.user['name'],
+                               self.data.user_password,
+                               self.data.tenant['name'])
         # Re-auth
-        self.token_client.auth(self.data.test_user,
-                               self.data.test_password,
-                               self.data.test_tenant)
+        self.token_client.auth(self.data.user['name'],
+                               self.data.user_password,
+                               self.data.tenant['name'])
 
     @test.idempotent_id('5d1fa498-4c2d-4732-a8fe-2b054598cfdd')
     def test_authentication_request_without_token(self):
         # Request for token authentication with a valid token in header
         self.data.setup_test_user()
-        self.token_client.auth(self.data.test_user, self.data.test_password,
-                               self.data.test_tenant)
+        self.token_client.auth(self.data.user['name'],
+                               self.data.user_password,
+                               self.data.tenant['name'])
         # Get the token of the current client
         token = self.client.auth_provider.get_token()
         # Delete the token from database
         self.client.delete_token(token)
         # Re-auth
-        self.token_client.auth(self.data.test_user,
-                               self.data.test_password,
-                               self.data.test_tenant)
+        self.token_client.auth(self.data.user['name'],
+                               self.data.user_password,
+                               self.data.tenant['name'])
         self.client.auth_provider.clear_auth()
 
     @test.idempotent_id('a149c02e-e5e0-4b89-809e-7e8faf33ccda')
     def test_get_users(self):
         # Get a list of users and find the test user
         self.data.setup_test_user()
-        users = self.client.get_users()['users']
+        users = self.users_client.list_users()['users']
         self.assertThat([u['name'] for u in users],
-                        matchers.Contains(self.data.test_user),
-                        "Could not find %s" % self.data.test_user)
+                        matchers.Contains(self.data.user['name']),
+                        "Could not find %s" % self.data.user['name'])
 
     @test.idempotent_id('6e317209-383a-4bed-9f10-075b7c82c79a')
     def test_list_users_for_tenant(self):
@@ -132,21 +135,22 @@
         self.data.setup_test_tenant()
         user_ids = list()
         fetched_user_ids = list()
+        password1 = data_utils.rand_password()
         alt_tenant_user1 = data_utils.rand_name('tenant_user1')
-        user1 = self.client.create_user(alt_tenant_user1, 'password1',
-                                        self.data.tenant['id'],
-                                        'user1@123')['user']
+        user1 = self.users_client.create_user(alt_tenant_user1, password1,
+                                              self.data.tenant['id'],
+                                              'user1@123')['user']
         user_ids.append(user1['id'])
         self.data.users.append(user1)
-
+        password2 = data_utils.rand_password()
         alt_tenant_user2 = data_utils.rand_name('tenant_user2')
-        user2 = self.client.create_user(alt_tenant_user2, 'password2',
-                                        self.data.tenant['id'],
-                                        'user2@123')['user']
+        user2 = self.users_client.create_user(alt_tenant_user2, password2,
+                                              self.data.tenant['id'],
+                                              'user2@123')['user']
         user_ids.append(user2['id'])
         self.data.users.append(user2)
         # List of users for the respective tenant ID
-        body = (self.client.list_users_for_tenant(self.data.tenant['id'])
+        body = (self.tenants_client.list_tenant_users(self.data.tenant['id'])
                 ['users'])
         for i in body:
             fetched_user_ids.append(i['id'])
@@ -162,27 +166,28 @@
         # Return list of users on tenant when roles are assigned to users
         self.data.setup_test_user()
         self.data.setup_test_role()
-        user = self.get_user_by_name(self.data.test_user)
-        tenant = self.get_tenant_by_name(self.data.test_tenant)
-        role = self.get_role_by_name(self.data.test_role)
+        user = self.get_user_by_name(self.data.user['name'])
+        tenant = self.get_tenant_by_name(self.data.tenant['name'])
+        role = self.get_role_by_name(self.data.role['name'])
         # Assigning roles to two users
         user_ids = list()
         fetched_user_ids = list()
         user_ids.append(user['id'])
-        role = self.client.assign_user_role(tenant['id'], user['id'],
-                                            role['id'])['role']
+        role = self.roles_client.assign_user_role(tenant['id'], user['id'],
+                                                  role['id'])['role']
 
         alt_user2 = data_utils.rand_name('second_user')
-        second_user = self.client.create_user(alt_user2, 'password1',
-                                              self.data.tenant['id'],
-                                              'user2@123')['user']
+        alt_password2 = data_utils.rand_password()
+        second_user = self.users_client.create_user(alt_user2, alt_password2,
+                                                    self.data.tenant['id'],
+                                                    'user2@123')['user']
         user_ids.append(second_user['id'])
         self.data.users.append(second_user)
-        role = self.client.assign_user_role(tenant['id'],
-                                            second_user['id'],
-                                            role['id'])['role']
+        role = self.roles_client.assign_user_role(tenant['id'],
+                                                  second_user['id'],
+                                                  role['id'])['role']
         # List of users with roles for the respective tenant ID
-        body = (self.client.list_users_for_tenant(self.data.tenant['id'])
+        body = (self.tenants_client.list_tenant_users(self.data.tenant['id'])
                 ['users'])
         for i in body:
             fetched_user_ids.append(i['id'])
@@ -198,13 +203,13 @@
         # Test case to check if updating of user password is successful.
         self.data.setup_test_user()
         # Updating the user with new password
-        new_pass = data_utils.rand_name('pass')
-        update_user = self.client.update_user_password(
-            self.data.user['id'], new_pass)['user']
+        new_pass = data_utils.rand_password()
+        update_user = self.users_client.update_user_password(
+            self.data.user['id'], password=new_pass)['user']
         self.assertEqual(update_user['id'], self.data.user['id'])
 
         # Validate the updated password
         # Get a token
-        body = self.token_client.auth(self.data.test_user, new_pass,
-                                      self.data.test_tenant)
+        body = self.token_client.auth(self.data.user['name'], new_pass,
+                                      self.data.tenant['name'])
         self.assertTrue('id' in body['token'])
diff --git a/tempest/api/identity/admin/v2/test_users_negative.py b/tempest/api/identity/admin/v2/test_users_negative.py
index 85f7411..0a5d0c9 100644
--- a/tempest/api/identity/admin/v2/test_users_negative.py
+++ b/tempest/api/identity/admin/v2/test_users_negative.py
@@ -28,7 +28,7 @@
     def resource_setup(cls):
         super(UsersNegativeTestJSON, cls).resource_setup()
         cls.alt_user = data_utils.rand_name('test_user')
-        cls.alt_password = data_utils.rand_name('pass')
+        cls.alt_password = data_utils.rand_password()
         cls.alt_email = cls.alt_user + '@testmail.tm'
 
     @test.attr(type=['negative'])
@@ -37,8 +37,9 @@
         # Non-administrator should not be authorized to create a user
         self.data.setup_test_tenant()
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.create_user, self.alt_user,
-                          self.alt_password, self.data.tenant['id'],
+                          self.non_admin_users_client.create_user,
+                          self.alt_user, self.alt_password,
+                          self.data.tenant['id'],
                           self.alt_email)
 
     @test.attr(type=['negative'])
@@ -46,8 +47,8 @@
     def test_create_user_with_empty_name(self):
         # User with an empty name should not be created
         self.data.setup_test_tenant()
-        self.assertRaises(lib_exc.BadRequest, self.client.create_user, '',
-                          self.alt_password, self.data.tenant['id'],
+        self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
+                          '', self.alt_password, self.data.tenant['id'],
                           self.alt_email)
 
     @test.attr(type=['negative'])
@@ -55,7 +56,7 @@
     def test_create_user_with_name_length_over_255(self):
         # Length of user name filed should be restricted to 255 characters
         self.data.setup_test_tenant()
-        self.assertRaises(lib_exc.BadRequest, self.client.create_user,
+        self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
                           'a' * 256, self.alt_password,
                           self.data.tenant['id'], self.alt_email)
 
@@ -64,15 +65,15 @@
     def test_create_user_with_duplicate_name(self):
         # Duplicate user should not be created
         self.data.setup_test_user()
-        self.assertRaises(lib_exc.Conflict, self.client.create_user,
-                          self.data.test_user, self.data.test_password,
-                          self.data.tenant['id'], self.data.test_email)
+        self.assertRaises(lib_exc.Conflict, self.users_client.create_user,
+                          self.data.user['name'], self.data.user_password,
+                          self.data.tenant['id'], self.data.user['email'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('0132cc22-7c4f-42e1-9e50-ac6aad31d59a')
     def test_create_user_for_non_existent_tenant(self):
         # Attempt to create a user in a non-existent tenant should fail
-        self.assertRaises(lib_exc.NotFound, self.client.create_user,
+        self.assertRaises(lib_exc.NotFound, self.users_client.create_user,
                           self.alt_user, self.alt_password, '49ffgg99999',
                           self.alt_email)
 
@@ -85,7 +86,7 @@
         token = self.client.auth_provider.get_token()
         # Delete the token from database
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.create_user,
+        self.assertRaises(lib_exc.Unauthorized, self.users_client.create_user,
                           self.alt_user, self.alt_password,
                           self.data.tenant['id'], self.alt_email)
 
@@ -98,7 +99,7 @@
         # Attempt to create a user with valid enabled para should fail
         self.data.setup_test_tenant()
         name = data_utils.rand_name('test_user')
-        self.assertRaises(lib_exc.BadRequest, self.client.create_user,
+        self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
                           name, self.alt_password,
                           self.data.tenant['id'],
                           self.alt_email, enabled=3)
@@ -109,7 +110,7 @@
         # Attempt to update a user non-existent user should fail
         user_name = data_utils.rand_name('user')
         non_existent_id = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound, self.client.update_user,
+        self.assertRaises(lib_exc.NotFound, self.users_client.update_user,
                           non_existent_id, name=user_name)
 
     @test.attr(type=['negative'])
@@ -121,7 +122,7 @@
         token = self.client.auth_provider.get_token()
         # Delete the token from database
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.update_user,
+        self.assertRaises(lib_exc.Unauthorized, self.users_client.update_user,
                           self.alt_user)
 
         # Unset the token to allow further tests to generate a new token
@@ -133,7 +134,8 @@
         # Non-administrator should not be authorized to update user
         self.data.setup_test_tenant()
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.update_user, self.alt_user)
+                          self.non_admin_users_client.update_user,
+                          self.alt_user)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('d45195d5-33ed-41b9-a452-7d0d6a00f6e9')
@@ -141,14 +143,14 @@
         # Non-administrator user should not be authorized to delete a user
         self.data.setup_test_user()
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.delete_user,
+                          self.non_admin_users_client.delete_user,
                           self.data.user['id'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('7cc82f7e-9998-4f89-abae-23df36495867')
     def test_delete_non_existent_user(self):
         # Attempt to delete a non-existent user should fail
-        self.assertRaises(lib_exc.NotFound, self.client.delete_user,
+        self.assertRaises(lib_exc.NotFound, self.users_client.delete_user,
                           'junk12345123')
 
     @test.attr(type=['negative'])
@@ -160,7 +162,7 @@
         token = self.client.auth_provider.get_token()
         # Delete the token from database
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.delete_user,
+        self.assertRaises(lib_exc.Unauthorized, self.users_client.delete_user,
                           self.alt_user)
 
         # Unset the token to allow further tests to generate a new token
@@ -171,22 +173,22 @@
     def test_authentication_for_disabled_user(self):
         # Disabled user's token should not get authenticated
         self.data.setup_test_user()
-        self.disable_user(self.data.test_user)
+        self.disable_user(self.data.user['name'])
         self.assertRaises(lib_exc.Unauthorized, self.token_client.auth,
-                          self.data.test_user,
-                          self.data.test_password,
-                          self.data.test_tenant)
+                          self.data.user['name'],
+                          self.data.user_password,
+                          self.data.tenant['name'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('440a7a8d-9328-4b7b-83e0-d717010495e4')
     def test_authentication_when_tenant_is_disabled(self):
         # User's token for a disabled tenant should not be authenticated
         self.data.setup_test_user()
-        self.disable_tenant(self.data.test_tenant)
+        self.disable_tenant(self.data.tenant['name'])
         self.assertRaises(lib_exc.Unauthorized, self.token_client.auth,
-                          self.data.test_user,
-                          self.data.test_password,
-                          self.data.test_tenant)
+                          self.data.user['name'],
+                          self.data.user_password,
+                          self.data.tenant['name'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('921f1ad6-7907-40b8-853f-637e7ee52178')
@@ -194,8 +196,8 @@
         # User's token for an invalid tenant should not be authenticated
         self.data.setup_test_user()
         self.assertRaises(lib_exc.Unauthorized, self.token_client.auth,
-                          self.data.test_user,
-                          self.data.test_password,
+                          self.data.user['name'],
+                          self.data.user_password,
                           'junktenant1234')
 
     @test.attr(type=['negative'])
@@ -204,8 +206,8 @@
         # Non-existent user's token should not get authenticated
         self.data.setup_test_user()
         self.assertRaises(lib_exc.Unauthorized, self.token_client.auth,
-                          'junkuser123', self.data.test_password,
-                          self.data.test_tenant)
+                          'junkuser123', self.data.user_password,
+                          self.data.tenant['name'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('d5308b33-3574-43c3-8d87-1c090c5e1eca')
@@ -213,8 +215,8 @@
         # User's token with invalid password should not be authenticated
         self.data.setup_test_user()
         self.assertRaises(lib_exc.Unauthorized, self.token_client.auth,
-                          self.data.test_user, 'junkpass1234',
-                          self.data.test_tenant)
+                          self.data.user['name'], 'junkpass1234',
+                          self.data.tenant['name'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('284192ce-fb7c-4909-a63b-9a502e0ddd11')
@@ -222,7 +224,7 @@
         # Non-administrator user should not be authorized to get user list
         self.data.setup_test_user()
         self.assertRaises(lib_exc.Forbidden,
-                          self.non_admin_client.get_users)
+                          self.non_admin_users_client.list_users)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('a73591ec-1903-4ffe-be42-282b39fefc9d')
@@ -230,7 +232,7 @@
         # Request to get list of users without a valid token should fail
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
-        self.assertRaises(lib_exc.Unauthorized, self.client.get_users)
+        self.assertRaises(lib_exc.Unauthorized, self.users_client.list_users)
         self.client.auth_provider.clear_auth()
 
     @test.attr(type=['negative'])
@@ -247,4 +249,4 @@
         # List the users with invalid tenant id
         for invalid in invalid_id:
             self.assertRaises(lib_exc.NotFound,
-                              self.client.list_users_for_tenant, invalid)
+                              self.tenants_client.list_tenant_users, invalid)
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index d22b27f..b81bff7 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -29,7 +29,7 @@
         u_name = data_utils.rand_name('user')
         u_desc = '%s description' % u_name
         u_email = '%s@testmail.tm' % u_name
-        u_password = data_utils.rand_name('pass')
+        u_password = data_utils.rand_password()
         for i in range(2):
             cls.project = cls.client.create_project(
                 data_utils.rand_name('project'),
@@ -53,11 +53,11 @@
     @test.attr(type='smoke')
     @test.idempotent_id('7cd59bf9-bda4-4c72-9467-d21cab278355')
     def test_credentials_create_get_update_delete(self):
-        keys = [data_utils.rand_name('Access'),
-                data_utils.rand_name('Secret')]
+        blob = '{"access": "%s", "secret": "%s"}' % (
+            data_utils.rand_name('Access'), data_utils.rand_name('Secret'))
         cred = self.creds_client.create_credential(
-            keys[0], keys[1], self.user_body['id'],
-            self.projects[0])['credential']
+            user_id=self.user_body['id'], project_id=self.projects[0],
+            blob=blob, type='ec2')['credential']
         self.addCleanup(self._delete_credential, cred['id'])
         for value1 in self.creds_list[0]:
             self.assertIn(value1, cred)
@@ -66,16 +66,17 @@
 
         new_keys = [data_utils.rand_name('NewAccess'),
                     data_utils.rand_name('NewSecret')]
+        blob = '{"access": "%s", "secret": "%s"}' % (new_keys[0], new_keys[1])
         update_body = self.creds_client.update_credential(
-            cred['id'], access_key=new_keys[0], secret_key=new_keys[1],
-            project_id=self.projects[1])['credential']
+            cred['id'], blob=blob, project_id=self.projects[1],
+            type='ec2')['credential']
         self.assertEqual(cred['id'], update_body['id'])
         self.assertEqual(self.projects[1], update_body['project_id'])
         self.assertEqual(self.user_body['id'], update_body['user_id'])
         self.assertEqual(update_body['blob']['access'], new_keys[0])
         self.assertEqual(update_body['blob']['secret'], new_keys[1])
 
-        get_body = self.creds_client.get_credential(cred['id'])['credential']
+        get_body = self.creds_client.show_credential(cred['id'])['credential']
         for value1 in self.creds_list[0]:
             self.assertEqual(update_body[value1],
                              get_body[value1])
@@ -89,10 +90,11 @@
         fetched_cred_ids = list()
 
         for i in range(2):
+            blob = '{"access": "%s", "secret": "%s"}' % (
+                data_utils.rand_name('Access'), data_utils.rand_name('Secret'))
             cred = self.creds_client.create_credential(
-                data_utils.rand_name('Access'),
-                data_utils.rand_name('Secret'),
-                self.user_body['id'], self.projects[0])['credential']
+                user_id=self.user_body['id'], project_id=self.projects[0],
+                blob=blob, type='ec2')['credential']
             created_cred_ids.append(cred['id'])
             self.addCleanup(self._delete_credential, cred['id'])
 
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
index 4c69758..53861ca 100644
--- a/tempest/api/identity/admin/v3/test_default_project_id.py
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -83,6 +83,6 @@
 
         # verify the user's token and see that it is scoped to the project
         token, auth_data = admin_client.auth_provider.get_auth()
-        result = admin_client.identity_v3_client.get_token(token)['token']
+        result = admin_client.identity_v3_client.show_token(token)['token']
         self.assertEqual(result['project']['domain']['id'], dom_id)
         self.assertEqual(result['project']['id'], proj_id)
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 15bea28..1729dc9 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -85,7 +85,7 @@
         self.assertEqual(new_desc, updated_domain['description'])
         self.assertEqual(True, updated_domain['enabled'])
 
-        fetched_domain = self.client.get_domain(domain['id'])['domain']
+        fetched_domain = self.client.show_domain(domain['id'])['domain']
         self.assertEqual(new_name, fetched_domain['name'])
         self.assertEqual(new_desc, fetched_domain['description'])
         self.assertEqual(True, fetched_domain['enabled'])
@@ -124,6 +124,6 @@
     @test.attr(type='smoke')
     @test.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
     def test_default_domain_exists(self):
-        domain = self.client.get_domain(self.domain_id)['domain']
+        domain = self.client.show_domain(self.domain_id)['domain']
 
         self.assertTrue(domain['enabled'])
diff --git a/tempest/api/identity/admin/v3/test_domains_negative.py b/tempest/api/identity/admin/v3/test_domains_negative.py
index 33819a8..9eb3149 100644
--- a/tempest/api/identity/admin/v3/test_domains_negative.py
+++ b/tempest/api/identity/admin/v3/test_domains_negative.py
@@ -44,3 +44,29 @@
         # Domain name should not be empty
         self.assertRaises(lib_exc.BadRequest, self.client.create_domain,
                           name='')
+
+    @test.attr(type=['negative'])
+    @test.idempotent_id('37b1bbf2-d664-4785-9a11-333438586eae')
+    def test_create_domain_with_name_length_over_64(self):
+        # Domain name length should not ne greater than 64 characters
+        d_name = 'a' * 65
+        self.assertRaises(lib_exc.BadRequest, self.client.create_domain,
+                          d_name)
+
+    @test.attr(type=['negative'])
+    @test.idempotent_id('43781c07-764f-4cf2-a405-953c1916f605')
+    def test_delete_non_existent_domain(self):
+        # Attempt to delete a non existent domain should fail
+        self.assertRaises(lib_exc.NotFound, self.client.delete_domain,
+                          data_utils.rand_uuid_hex())
+
+    @test.attr(type=['negative'])
+    @test.idempotent_id('e6f9e4a2-4f36-4be8-bdbc-4e199ae29427')
+    def test_domain_create_duplicate(self):
+        domain_name = data_utils.rand_name('domain-dup')
+        domain = self.client.create_domain(domain_name)['domain']
+        domain_id = domain['id']
+        self.addCleanup(self.delete_domain, domain_id)
+        # Domain name should be unique
+        self.assertRaises(
+            lib_exc.Conflict, self.client.create_domain, domain_name)
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index e44a96b..2538829 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -33,9 +33,9 @@
         s_name = data_utils.rand_name('service')
         s_type = data_utils.rand_name('type')
         s_description = data_utils.rand_name('description')
-        cls.service_data =\
-            cls.service_client.create_service(s_name, s_type,
-                                              description=s_description)
+        cls.service_data = (
+            cls.services_client.create_service(name=s_name, type=s_type,
+                                               description=s_description))
         cls.service_data = cls.service_data['service']
         cls.service_id = cls.service_data['id']
         cls.service_ids.append(cls.service_id)
@@ -45,8 +45,10 @@
             region = data_utils.rand_name('region')
             url = data_utils.rand_url()
             interface = 'public'
-            endpoint = (cls.client.create_endpoint(cls.service_id, interface,
-                        url, region=region, enabled=True))['endpoint']
+            endpoint = cls.client.create_endpoint(service_id=cls.service_id,
+                                                  interface=interface,
+                                                  url=url, region=region,
+                                                  enabled=True)['endpoint']
             cls.setup_endpoints.append(endpoint)
 
     @classmethod
@@ -54,7 +56,7 @@
         for e in cls.setup_endpoints:
             cls.client.delete_endpoint(e['id'])
         for s in cls.service_ids:
-            cls.service_client.delete_service(s)
+            cls.services_client.delete_service(s)
         super(EndPointsTestJSON, cls).resource_cleanup()
 
     @test.idempotent_id('c19ecf90-240e-4e23-9966-21cee3f6a618')
@@ -69,22 +71,38 @@
                          ', '.join(str(e) for e in missing_endpoints))
 
     @test.idempotent_id('0e2446d2-c1fd-461b-a729-b9e73e3e3b37')
-    def test_create_list_delete_endpoint(self):
+    def test_create_list_show_delete_endpoint(self):
         region = data_utils.rand_name('region')
         url = data_utils.rand_url()
         interface = 'public'
-        endpoint = (self.client.create_endpoint(self.service_id, interface,
-                    url, region=region, enabled=True)['endpoint'])
+        endpoint = self.client.create_endpoint(service_id=self.service_id,
+                                               interface=interface,
+                                               url=url, region=region,
+                                               enabled=True)['endpoint']
+
         # Asserting Create Endpoint response body
         self.assertIn('id', endpoint)
         self.assertEqual(region, endpoint['region'])
         self.assertEqual(url, endpoint['url'])
+
         # Checking if created endpoint is present in the list of endpoints
         fetched_endpoints = self.client.list_endpoints()['endpoints']
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
         self.assertIn(endpoint['id'], fetched_endpoints_id)
+
+        # Show endpoint
+        fetched_endpoint = (
+            self.client.show_endpoint(endpoint['id'])['endpoint'])
+        # Asserting if the attributes of endpoint are the same
+        self.assertEqual(self.service_id, fetched_endpoint['service_id'])
+        self.assertEqual(interface, fetched_endpoint['interface'])
+        self.assertEqual(url, fetched_endpoint['url'])
+        self.assertEqual(region, fetched_endpoint['region'])
+        self.assertEqual(True, fetched_endpoint['enabled'])
+
         # Deleting the endpoint created in this method
         self.client.delete_endpoint(endpoint['id'])
+
         # Checking whether endpoint is deleted successfully
         fetched_endpoints = self.client.list_endpoints()['endpoints']
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
@@ -98,30 +116,30 @@
         region1 = data_utils.rand_name('region')
         url1 = data_utils.rand_url()
         interface1 = 'public'
-        endpoint_for_update =\
-            self.client.create_endpoint(self.service_id, interface1,
-                                        url1, region=region1,
-                                        enabled=True)['endpoint']
+        endpoint_for_update = (
+            self.client.create_endpoint(service_id=self.service_id,
+                                        interface=interface1,
+                                        url=url1, region=region1,
+                                        enabled=True)['endpoint'])
         self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
         # Creating service so as update endpoint with new service ID
         s_name = data_utils.rand_name('service')
         s_type = data_utils.rand_name('type')
         s_description = data_utils.rand_name('description')
-        service2 =\
-            self.service_client.create_service(s_name, s_type,
-                                               description=s_description)
+        service2 = (
+            self.services_client.create_service(name=s_name, type=s_type,
+                                                description=s_description))
         service2 = service2['service']
         self.service_ids.append(service2['id'])
         # Updating endpoint with new values
         region2 = data_utils.rand_name('region')
         url2 = data_utils.rand_url()
         interface2 = 'internal'
-        endpoint = \
-            self.client.update_endpoint(endpoint_for_update['id'],
-                                        service_id=service2['id'],
-                                        interface=interface2, url=url2,
-                                        region=region2,
-                                        enabled=False)['endpoint']
+        endpoint = self.client.update_endpoint(endpoint_for_update['id'],
+                                               service_id=service2['id'],
+                                               interface=interface2,
+                                               url=url2, region=region2,
+                                               enabled=False)['endpoint']
         # Asserting if the attributes of endpoint are updated
         self.assertEqual(service2['id'], endpoint['service_id'])
         self.assertEqual(interface2, endpoint['interface'])
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 8cf853b..372254f 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -37,8 +37,8 @@
         s_type = data_utils.rand_name('type')
         s_description = data_utils.rand_name('description')
         cls.service_data = (
-            cls.service_client.create_service(s_name, s_type,
-                                              description=s_description)
+            cls.services_client.create_service(name=s_name, type=s_type,
+                                               description=s_description)
             ['service'])
         cls.service_id = cls.service_data['id']
         cls.service_ids.append(cls.service_id)
@@ -46,7 +46,7 @@
     @classmethod
     def resource_cleanup(cls):
         for s in cls.service_ids:
-            cls.service_client.delete_service(s)
+            cls.services_client.delete_service(s)
         super(EndpointsNegativeTestJSON, cls).resource_cleanup()
 
     @test.attr(type=['negative'])
@@ -57,8 +57,8 @@
         url = data_utils.rand_url()
         region = data_utils.rand_name('region')
         self.assertRaises(lib_exc.BadRequest, self.client.create_endpoint,
-                          self.service_id, interface, url, region=region,
-                          force_enabled='False')
+                          service_id=self.service_id, interface=interface,
+                          url=url, region=region, enabled='False')
 
     @test.attr(type=['negative'])
     @test.idempotent_id('9c43181e-0627-484a-8c79-923e8a59598b')
@@ -68,8 +68,8 @@
         url = data_utils.rand_url()
         region = data_utils.rand_name('region')
         self.assertRaises(lib_exc.BadRequest, self.client.create_endpoint,
-                          self.service_id, interface, url, region=region,
-                          force_enabled='True')
+                          service_id=self.service_id, interface=interface,
+                          url=url, region=region, enabled='True')
 
     def _assert_update_raises_bad_request(self, enabled):
 
@@ -78,13 +78,14 @@
         url1 = data_utils.rand_url()
         interface1 = 'public'
         endpoint_for_update = (
-            self.client.create_endpoint(self.service_id, interface1,
-                                        url1, region=region1,
-                                        enabled=True))['endpoint']
+            self.client.create_endpoint(service_id=self.service_id,
+                                        interface=interface1,
+                                        url=url1, region=region1,
+                                        enabled=True)['endpoint'])
         self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
 
         self.assertRaises(lib_exc.BadRequest, self.client.update_endpoint,
-                          endpoint_for_update['id'], force_enabled=enabled)
+                          endpoint_for_update['id'], enabled=enabled)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('65e41f32-5eb7-498f-a92a-a6ccacf7439a')
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index 5ce6354..03b8b29 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -24,65 +24,80 @@
     def test_group_create_update_get(self):
         name = data_utils.rand_name('Group')
         description = data_utils.rand_name('Description')
-        group = self.client.create_group(name,
-                                         description=description)['group']
-        self.addCleanup(self.client.delete_group, group['id'])
+        group = self.groups_client.create_group(
+            name=name, description=description)['group']
+        self.addCleanup(self.groups_client.delete_group, group['id'])
         self.assertEqual(group['name'], name)
         self.assertEqual(group['description'], description)
 
         new_name = data_utils.rand_name('UpdateGroup')
         new_desc = data_utils.rand_name('UpdateDescription')
-        updated_group = self.client.update_group(group['id'],
-                                                 name=new_name,
-                                                 description=new_desc)['group']
+        updated_group = self.groups_client.update_group(
+            group['id'], name=new_name, description=new_desc)['group']
         self.assertEqual(updated_group['name'], new_name)
         self.assertEqual(updated_group['description'], new_desc)
 
-        new_group = self.client.get_group(group['id'])['group']
+        new_group = self.groups_client.show_group(group['id'])['group']
         self.assertEqual(group['id'], new_group['id'])
         self.assertEqual(new_name, new_group['name'])
         self.assertEqual(new_desc, new_group['description'])
 
+    @test.idempotent_id('b66eb441-b08a-4a6d-81ab-fef71baeb26c')
+    def test_group_update_with_few_fields(self):
+        name = data_utils.rand_name('Group')
+        old_description = data_utils.rand_name('Description')
+        group = self.groups_client.create_group(
+            name=name, description=old_description)['group']
+        self.addCleanup(self.groups_client.delete_group, group['id'])
+
+        new_name = data_utils.rand_name('UpdateGroup')
+        updated_group = self.groups_client.update_group(
+            group['id'], name=new_name)['group']
+        self.assertEqual(new_name, updated_group['name'])
+        # Verify that 'description' is not being updated or deleted.
+        self.assertEqual(old_description, updated_group['description'])
+
     @test.attr(type='smoke')
     @test.idempotent_id('1598521a-2f36-4606-8df9-30772bd51339')
     def test_group_users_add_list_delete(self):
         name = data_utils.rand_name('Group')
-        group = self.client.create_group(name)['group']
-        self.addCleanup(self.client.delete_group, group['id'])
+        group = self.groups_client.create_group(name=name)['group']
+        self.addCleanup(self.groups_client.delete_group, group['id'])
         # add user into group
         users = []
         for i in range(3):
             name = data_utils.rand_name('User')
-            user = self.client.create_user(name)['user']
+            password = data_utils.rand_password()
+            user = self.client.create_user(name, password)['user']
             users.append(user)
             self.addCleanup(self.client.delete_user, user['id'])
-            self.client.add_group_user(group['id'], user['id'])
+            self.groups_client.add_group_user(group['id'], user['id'])
 
         # list users in group
-        group_users = self.client.list_group_users(group['id'])['users']
+        group_users = self.groups_client.list_group_users(group['id'])['users']
         self.assertEqual(sorted(users), sorted(group_users))
-        # delete user in group
+        # check and delete user in group
         for user in users:
-            self.client.delete_group_user(group['id'],
-                                          user['id'])
-        group_users = self.client.list_group_users(group['id'])['users']
+            self.groups_client.check_group_user_existence(
+                group['id'], user['id'])
+            self.groups_client.delete_group_user(group['id'], user['id'])
+        group_users = self.groups_client.list_group_users(group['id'])['users']
         self.assertEqual(len(group_users), 0)
 
     @test.idempotent_id('64573281-d26a-4a52-b899-503cb0f4e4ec')
     def test_list_user_groups(self):
         # create a user
         user = self.client.create_user(
-            data_utils.rand_name('User'),
-            password=data_utils.rand_name('Pass'))['user']
+            data_utils.rand_name('User'), data_utils.rand_password())['user']
         self.addCleanup(self.client.delete_user, user['id'])
         # create two groups, and add user into them
         groups = []
         for i in range(2):
             name = data_utils.rand_name('Group')
-            group = self.client.create_group(name)['group']
+            group = self.groups_client.create_group(name=name)['group']
             groups.append(group)
-            self.addCleanup(self.client.delete_group, group['id'])
-            self.client.add_group_user(group['id'], user['id'])
+            self.addCleanup(self.groups_client.delete_group, group['id'])
+            self.groups_client.add_group_user(group['id'], user['id'])
         # list groups which user belongs to
         user_groups = self.client.list_user_groups(user['id'])['groups']
         self.assertEqual(sorted(groups), sorted(user_groups))
@@ -96,12 +111,12 @@
         for _ in range(3):
             name = data_utils.rand_name('Group')
             description = data_utils.rand_name('Description')
-            group = self.client.create_group(name,
-                                             description=description)['group']
-            self.addCleanup(self.client.delete_group, group['id'])
+            group = self.groups_client.create_group(
+                name=name, description=description)['group']
+            self.addCleanup(self.groups_client.delete_group, group['id'])
             group_ids.append(group['id'])
         # List and Verify Groups
-        body = self.client.list_groups()['groups']
+        body = self.groups_client.list_groups()['groups']
         for g in body:
             fetched_ids.append(g['id'])
         missing_groups = [g for g in group_ids if g not in fetched_ids]
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 5185fea..aaed467 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -44,8 +44,8 @@
         list_projects = self.client.list_projects()['projects']
 
         for p in self.project_ids:
-            get_project = self.client.get_project(p)['project']
-            self.assertIn(get_project, list_projects)
+            show_project = self.client.show_project(p)['project']
+            self.assertIn(show_project, list_projects)
 
     @test.idempotent_id('fab13f3c-f6a6-4b9f-829b-d32fd44fdf10')
     def test_list_projects_with_domains(self):
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index 320b479..4921c00 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -25,7 +25,7 @@
         # assert the response based on expected and not_expected
         # expected: user expected in the list response
         # not_expected: user, which should not be present in list response
-        body = self.client.get_users(params)['users']
+        body = self.client.list_users(params)['users']
         self.assertIn(expected[key], map(lambda x: x[key], body))
         self.assertNotIn(not_expected[key],
                          map(lambda x: x[key], body))
@@ -34,7 +34,7 @@
     def resource_setup(cls):
         super(UsersV3TestJSON, cls).resource_setup()
         alt_user = data_utils.rand_name('test_user')
-        alt_password = data_utils.rand_name('pass')
+        alt_password = data_utils.rand_password()
         cls.alt_email = alt_user + '@testmail.tm'
         cls.data.setup_test_domain()
         # Create user with Domain
@@ -42,13 +42,13 @@
         cls.domain_enabled_user = cls.client.create_user(
             u1_name, password=alt_password,
             email=cls.alt_email, domain_id=cls.data.domain['id'])['user']
-        cls.data.v3_users.append(cls.domain_enabled_user)
+        cls.data.users.append(cls.domain_enabled_user)
         # Create default not enabled user
         u2_name = data_utils.rand_name('test_user')
         cls.non_domain_enabled_user = cls.client.create_user(
             u2_name, password=alt_password,
             email=cls.alt_email, enabled=False)['user']
-        cls.data.v3_users.append(cls.non_domain_enabled_user)
+        cls.data.users.append(cls.non_domain_enabled_user)
 
     @test.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
     def test_list_user_domains(self):
@@ -77,9 +77,9 @@
     @test.idempotent_id('b30d4651-a2ea-4666-8551-0c0e49692635')
     def test_list_users(self):
         # List users
-        body = self.client.get_users()['users']
+        body = self.client.list_users()['users']
         fetched_ids = [u['id'] for u in body]
-        missing_users = [u['id'] for u in self.data.v3_users
+        missing_users = [u['id'] for u in self.data.users
                          if u['id'] not in fetched_ids]
         self.assertEqual(0, len(missing_users),
                          "Failed to find user %s in fetched list" %
@@ -88,8 +88,8 @@
     @test.idempotent_id('b4baa3ae-ac00-4b4e-9e27-80deaad7771f')
     def test_get_user(self):
         # Get a user detail
-        user = self.client.get_user(self.data.v3_users[0]['id'])['user']
-        self.assertEqual(self.data.v3_users[0]['id'], user['id'])
-        self.assertEqual(self.data.v3_users[0]['name'], user['name'])
+        user = self.client.show_user(self.data.users[0]['id'])['user']
+        self.assertEqual(self.data.users[0]['id'], user['id'])
+        self.assertEqual(self.data.users[0]['name'], user['name'])
         self.assertEqual(self.alt_email, user['email'])
         self.assertEqual(self.data.domain['id'], user['domain_id'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index d079fec..3b5e5d4 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -21,7 +21,7 @@
 class PoliciesTestJSON(base.BaseIdentityV3AdminTest):
 
     def _delete_policy(self, policy_id):
-        self.policy_client.delete_policy(policy_id)
+        self.policies_client.delete_policy(policy_id)
 
     @test.idempotent_id('1a0ad286-2d06-4123-ab0d-728893a76201')
     def test_list_policies(self):
@@ -31,13 +31,13 @@
         for _ in range(3):
             blob = data_utils.rand_name('BlobName')
             policy_type = data_utils.rand_name('PolicyType')
-            policy = self.policy_client.create_policy(blob,
-                                                      policy_type)['policy']
+            policy = self.policies_client.create_policy(
+                blob=blob, type=policy_type)['policy']
             # Delete the Policy at the end of this method
             self.addCleanup(self._delete_policy, policy['id'])
             policy_ids.append(policy['id'])
         # List and Verify Policies
-        body = self.policy_client.list_policies()['policies']
+        body = self.policies_client.list_policies()['policies']
         for p in body:
             fetched_ids.append(p['id'])
         missing_pols = [p for p in policy_ids if p not in fetched_ids]
@@ -49,7 +49,8 @@
         # Test to update policy
         blob = data_utils.rand_name('BlobName')
         policy_type = data_utils.rand_name('PolicyType')
-        policy = self.policy_client.create_policy(blob, policy_type)['policy']
+        policy = self.policies_client.create_policy(blob=blob,
+                                                    type=policy_type)['policy']
         self.addCleanup(self._delete_policy, policy['id'])
         self.assertIn('id', policy)
         self.assertIn('type', policy)
@@ -59,11 +60,12 @@
         self.assertEqual(policy_type, policy['type'])
         # Update policy
         update_type = data_utils.rand_name('UpdatedPolicyType')
-        data = self.policy_client.update_policy(
+        data = self.policies_client.update_policy(
             policy['id'], type=update_type)['policy']
         self.assertIn('type', data)
         # Assertion for updated value with fetched value
-        fetched_policy = self.policy_client.get_policy(policy['id'])['policy']
+        fetched_policy = self.policies_client.show_policy(
+            policy['id'])['policy']
         self.assertIn('id', fetched_policy)
         self.assertIn('blob', fetched_policy)
         self.assertIn('type', fetched_policy)
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index f014307..2f4cc51 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -32,7 +32,7 @@
         desc1 = project['description']
         self.assertEqual(desc1, project_desc, 'Description should have '
                          'been sent in response for create')
-        body = self.client.get_project(project_id)['project']
+        body = self.client.show_project(project_id)['project']
         desc2 = body['description']
         self.assertEqual(desc2, project_desc, 'Description does not appear'
                          'to be set')
@@ -48,7 +48,7 @@
         project_id = project['id']
         self.assertEqual(project_name, project['name'])
         self.assertEqual(self.data.domain['id'], project['domain_id'])
-        body = self.client.get_project(project_id)['project']
+        body = self.client.show_project(project_id)['project']
         self.assertEqual(project_name, body['name'])
         self.assertEqual(self.data.domain['id'], body['domain_id'])
 
@@ -62,7 +62,7 @@
         project_id = project['id']
         en1 = project['enabled']
         self.assertTrue(en1, 'Enable should be True in response')
-        body = self.client.get_project(project_id)['project']
+        body = self.client.show_project(project_id)['project']
         en2 = body['enabled']
         self.assertTrue(en2, 'Enable should be True in lookup')
 
@@ -76,7 +76,7 @@
         en1 = project['enabled']
         self.assertEqual('false', str(en1).lower(),
                          'Enable should be False in response')
-        body = self.client.get_project(project['id'])['project']
+        body = self.client.show_project(project['id'])['project']
         en2 = body['enabled']
         self.assertEqual('false', str(en2).lower(),
                          'Enable should be False in lookup')
@@ -96,7 +96,7 @@
         resp2_name = body['name']
         self.assertNotEqual(resp1_name, resp2_name)
 
-        body = self.client.get_project(project['id'])['project']
+        body = self.client.show_project(project['id'])['project']
         resp3_name = body['name']
 
         self.assertNotEqual(resp1_name, resp3_name)
@@ -119,7 +119,7 @@
         resp2_desc = body['description']
         self.assertNotEqual(resp1_desc, resp2_desc)
 
-        body = self.client.get_project(project['id'])['project']
+        body = self.client.show_project(project['id'])['project']
         resp3_desc = body['description']
 
         self.assertNotEqual(resp1_desc, resp3_desc)
@@ -142,7 +142,7 @@
         resp2_en = body['enabled']
         self.assertNotEqual(resp1_en, resp2_en)
 
-        body = self.client.get_project(project['id'])['project']
+        body = self.client.show_project(project['id'])['project']
         resp3_en = body['enabled']
 
         self.assertNotEqual(resp1_en, resp3_en)
@@ -161,7 +161,7 @@
         u_name = data_utils.rand_name('user')
         u_desc = u_name + 'description'
         u_email = u_name + '@testmail.tm'
-        u_password = data_utils.rand_name('pass')
+        u_password = data_utils.rand_password()
         user = self.client.create_user(
             u_name, description=u_desc, password=u_password,
             email=u_email, project_id=project['id'])['user']
@@ -169,7 +169,7 @@
         self.addCleanup(self.client.delete_user, user['id'])
 
         # Get User To validate the user details
-        new_user_get = self.client.get_user(user['id'])['user']
+        new_user_get = self.client.show_user(user['id'])['user']
         # Assert response body of GET
         self.assertEqual(u_name, new_user_get['name'])
         self.assertEqual(u_desc, new_user_get['description'])
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index e96e0f5..8bba3cb 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -25,7 +25,7 @@
     @classmethod
     def setup_clients(cls):
         super(RegionsTestJSON, cls).setup_clients()
-        cls.client = cls.region_client
+        cls.client = cls.regions_client
 
     @classmethod
     def resource_setup(cls):
@@ -33,7 +33,8 @@
         cls.setup_regions = list()
         for i in range(2):
             r_description = data_utils.rand_name('description')
-            region = cls.client.create_region(r_description)['region']
+            region = cls.client.create_region(
+                description=r_description)['region']
             cls.setup_regions.append(region)
 
     @classmethod
@@ -45,13 +46,13 @@
     def _delete_region(self, region_id):
         self.client.delete_region(region_id)
         self.assertRaises(lib_exc.NotFound,
-                          self.client.get_region, region_id)
+                          self.client.show_region, region_id)
 
     @test.idempotent_id('56186092-82e4-43f2-b954-91013218ba42')
     def test_create_update_get_delete_region(self):
         r_description = data_utils.rand_name('description')
         region = self.client.create_region(
-            r_description,
+            description=r_description,
             parent_region_id=self.setup_regions[0]['id'])['region']
         self.addCleanup(self._delete_region, region['id'])
         self.assertEqual(r_description, region['description'])
@@ -67,7 +68,7 @@
         self.assertEqual(self.setup_regions[1]['id'],
                          region['parent_region_id'])
         # Get the details of region
-        region = self.client.get_region(region['id'])['region']
+        region = self.client.show_region(region['id'])['region']
         self.assertEqual(r_alt_description, region['description'])
         self.assertEqual(self.setup_regions[1]['id'],
                          region['parent_region_id'])
@@ -79,7 +80,7 @@
         r_region_id = data_utils.rand_uuid()
         r_description = data_utils.rand_name('description')
         region = self.client.create_region(
-            r_description, unique_region_id=r_region_id)['region']
+            region_id=r_region_id, description=r_description)['region']
         self.addCleanup(self._delete_region, region['id'])
         # Asserting Create Region with specific id response body
         self.assertEqual(r_region_id, region['id'])
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index ffc991a..f1f06ee 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -25,13 +25,13 @@
         super(RolesV3TestJSON, cls).resource_setup()
         for _ in range(3):
             role_name = data_utils.rand_name(name='role')
-            role = cls.client.create_role(role_name)['role']
-            cls.data.v3_roles.append(role)
+            role = cls.client.create_role(name=role_name)['role']
+            cls.data.roles.append(role)
         cls.fetched_role_ids = list()
         u_name = data_utils.rand_name('user')
         u_desc = '%s description' % u_name
         u_email = '%s@testmail.tm' % u_name
-        cls.u_password = data_utils.rand_name('pass')
+        cls.u_password = data_utils.rand_password()
         cls.domain = cls.client.create_domain(
             data_utils.rand_name('domain'),
             description=data_utils.rand_name('domain-desc'))['domain']
@@ -39,20 +39,20 @@
             data_utils.rand_name('project'),
             description=data_utils.rand_name('project-desc'),
             domain_id=cls.domain['id'])['project']
-        cls.group_body = cls.client.create_group(
-            data_utils.rand_name('Group'), project_id=cls.project['id'],
+        cls.group_body = cls.groups_client.create_group(
+            name=data_utils.rand_name('Group'), project_id=cls.project['id'],
             domain_id=cls.domain['id'])['group']
         cls.user_body = cls.client.create_user(
             u_name, description=u_desc, password=cls.u_password,
             email=u_email, project_id=cls.project['id'],
             domain_id=cls.domain['id'])['user']
         cls.role = cls.client.create_role(
-            data_utils.rand_name('Role'))['role']
+            name=data_utils.rand_name('Role'))['role']
 
     @classmethod
     def resource_cleanup(cls):
         cls.client.delete_role(cls.role['id'])
-        cls.client.delete_group(cls.group_body['id'])
+        cls.groups_client.delete_group(cls.group_body['id'])
         cls.client.delete_user(cls.user_body['id'])
         cls.client.delete_project(cls.project['id'])
         # NOTE(harika-vakadi): It is necessary to disable the domain
@@ -69,19 +69,20 @@
     @test.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
     def test_role_create_update_get_list(self):
         r_name = data_utils.rand_name('Role')
-        role = self.client.create_role(r_name)['role']
+        role = self.client.create_role(name=r_name)['role']
         self.addCleanup(self.client.delete_role, role['id'])
         self.assertIn('name', role)
         self.assertEqual(role['name'], r_name)
 
         new_name = data_utils.rand_name('NewRole')
-        updated_role = self.client.update_role(new_name, role['id'])['role']
+        updated_role = self.client.update_role(role['id'],
+                                               name=new_name)['role']
         self.assertIn('name', updated_role)
         self.assertIn('id', updated_role)
         self.assertIn('links', updated_role)
         self.assertNotEqual(r_name, updated_role['name'])
 
-        new_role = self.client.get_role(role['id'])['role']
+        new_role = self.client.show_role(role['id'])['role']
         self.assertEqual(new_name, new_role['name'])
         self.assertEqual(updated_role['id'], new_role['id'])
 
@@ -102,7 +103,10 @@
         self._list_assertions(roles, self.fetched_role_ids,
                               self.role['id'])
 
-        self.client.revoke_role_from_user_on_project(
+        self.client.check_user_role_existence_on_project(
+            self.project['id'], self.user_body['id'], self.role['id'])
+
+        self.client.delete_role_from_user_on_project(
             self.project['id'], self.user_body['id'], self.role['id'])
 
     @test.idempotent_id('6c9a2940-3625-43a3-ac02-5dcec62ef3bd')
@@ -119,7 +123,10 @@
         self._list_assertions(roles, self.fetched_role_ids,
                               self.role['id'])
 
-        self.client.revoke_role_from_user_on_domain(
+        self.client.check_user_role_existence_on_domain(
+            self.domain['id'], self.user_body['id'], self.role['id'])
+
+        self.client.delete_role_from_user_on_domain(
             self.domain['id'], self.user_body['id'], self.role['id'])
 
     @test.idempotent_id('cbf11737-1904-4690-9613-97bcbb3df1c4')
@@ -137,8 +144,9 @@
         self._list_assertions(roles, self.fetched_role_ids,
                               self.role['id'])
         # Add user to group, and insure user has role on project
-        self.client.add_group_user(self.group_body['id'], self.user_body['id'])
-        self.addCleanup(self.client.delete_group_user,
+        self.groups_client.add_group_user(self.group_body['id'],
+                                          self.user_body['id'])
+        self.addCleanup(self.groups_client.delete_group_user,
                         self.group_body['id'], self.user_body['id'])
         body = self.token.auth(user_id=self.user_body['id'],
                                password=self.u_password,
@@ -148,8 +156,12 @@
         roles = body['token']['roles']
         self.assertEqual(len(roles), 1)
         self.assertEqual(roles[0]['id'], self.role['id'])
+
+        self.client.check_role_from_group_on_project_existence(
+            self.project['id'], self.group_body['id'], self.role['id'])
+
         # Revoke role to group on project
-        self.client.revoke_role_from_group_on_project(
+        self.client.delete_role_from_group_on_project(
             self.project['id'], self.group_body['id'], self.role['id'])
 
     @test.idempotent_id('4bf8a70b-e785-413a-ad53-9f91ce02faa7')
@@ -166,12 +178,15 @@
         self._list_assertions(roles, self.fetched_role_ids,
                               self.role['id'])
 
-        self.client.revoke_role_from_group_on_domain(
+        self.client.check_role_from_group_on_domain_existence(
+            self.domain['id'], self.group_body['id'], self.role['id'])
+
+        self.client.delete_role_from_group_on_domain(
             self.domain['id'], self.group_body['id'], self.role['id'])
 
     @test.idempotent_id('f5654bcc-08c4-4f71-88fe-05d64e06de94')
     def test_list_roles(self):
         # Return a list of all roles
         body = self.client.list_roles()['roles']
-        found = [role for role in body if role in self.data.v3_roles]
-        self.assertEqual(len(found), len(self.data.v3_roles))
+        found = [role for role in body if role in self.data.roles]
+        self.assertEqual(len(found), len(self.data.roles))
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index d920f64..c6e3df4 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -24,9 +24,9 @@
 
     def _del_service(self, service_id):
         # Used for deleting the services created in this class
-        self.service_client.delete_service(service_id)
+        self.services_client.delete_service(service_id)
         # Checking whether service is deleted successfully
-        self.assertRaises(lib_exc.NotFound, self.service_client.get_service,
+        self.assertRaises(lib_exc.NotFound, self.services_client.show_service,
                           service_id)
 
     @test.attr(type='smoke')
@@ -36,8 +36,8 @@
         name = data_utils.rand_name('service')
         serv_type = data_utils.rand_name('type')
         desc = data_utils.rand_name('description')
-        create_service = self.service_client.create_service(
-            serv_type, name=name, description=desc)['service']
+        create_service = self.services_client.create_service(
+            type=serv_type, name=name, description=desc)['service']
         self.addCleanup(self._del_service, create_service['id'])
         self.assertIsNotNone(create_service['id'])
 
@@ -49,14 +49,14 @@
         s_id = create_service['id']
         resp1_desc = create_service['description']
         s_desc2 = data_utils.rand_name('desc2')
-        update_service = self.service_client.update_service(
+        update_service = self.services_client.update_service(
             s_id, description=s_desc2)['service']
         resp2_desc = update_service['description']
 
         self.assertNotEqual(resp1_desc, resp2_desc)
 
         # Get service
-        fetched_service = self.service_client.get_service(s_id)['service']
+        fetched_service = self.services_client.show_service(s_id)['service']
         resp3_desc = fetched_service['description']
 
         self.assertEqual(resp2_desc, resp3_desc)
@@ -67,9 +67,9 @@
         # Create a service only with name and type
         name = data_utils.rand_name('service')
         serv_type = data_utils.rand_name('type')
-        service = self.service_client.create_service(
-            serv_type, name=name)['service']
-        self.addCleanup(self.service_client.delete_service, service['id'])
+        service = self.services_client.create_service(
+            type=serv_type, name=name)['service']
+        self.addCleanup(self.services_client.delete_service, service['id'])
         self.assertIn('id', service)
         expected_data = {'name': name, 'type': serv_type}
         self.assertDictContainsSubset(expected_data, service)
@@ -81,14 +81,14 @@
         for _ in range(3):
             name = data_utils.rand_name('service')
             serv_type = data_utils.rand_name('type')
-            create_service = self.service_client.create_service(
-                serv_type, name=name)['service']
-            self.addCleanup(self.service_client.delete_service,
+            create_service = self.services_client.create_service(
+                type=serv_type, name=name)['service']
+            self.addCleanup(self.services_client.delete_service,
                             create_service['id'])
             service_ids.append(create_service['id'])
 
         # List and Verify Services
-        services = self.service_client.list_services()['services']
+        services = self.services_client.list_services()['services']
         fetched_ids = [service['id'] for service in services]
         found = [s for s in fetched_ids if s in service_ids]
         self.assertEqual(len(found), len(service_ids))
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index b5f86da..b1446cf 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -29,7 +29,7 @@
         u_name = data_utils.rand_name('user')
         u_desc = '%s-description' % u_name
         u_email = '%s@testmail.tm' % u_name
-        u_password = data_utils.rand_name('pass')
+        u_password = data_utils.rand_password()
         user = self.client.create_user(
             u_name, description=u_desc, password=u_password,
             email=u_email)['user']
@@ -39,13 +39,13 @@
                                password=u_password).response
         subject_token = resp['x-subject-token']
         # Perform GET Token
-        token_details = self.client.get_token(subject_token)['token']
+        token_details = self.client.show_token(subject_token)['token']
         self.assertEqual(resp['x-subject-token'], subject_token)
         self.assertEqual(token_details['user']['id'], user['id'])
         self.assertEqual(token_details['user']['name'], u_name)
         # Perform Delete Token
         self.client.delete_token(subject_token)
-        self.assertRaises(lib_exc.NotFound, self.client.get_token,
+        self.assertRaises(lib_exc.NotFound, self.client.show_token,
                           subject_token)
 
     @test.idempotent_id('565fa210-1da1-4563-999b-f7b5b67cf112')
@@ -60,7 +60,7 @@
 
         # Create a user.
         user_name = data_utils.rand_name(name='user')
-        user_password = data_utils.rand_name(name='pass')
+        user_password = data_utils.rand_password()
         user = self.client.create_user(user_name,
                                        password=user_password)['user']
         self.addCleanup(self.client.delete_user, user['id'])
@@ -76,7 +76,7 @@
 
         # Create a role
         role_name = data_utils.rand_name(name='role')
-        role = self.client.create_role(role_name)['role']
+        role = self.client.create_role(name=role_name)['role']
         self.addCleanup(self.client.delete_role, role['id'])
 
         # Grant the user the role on both projects.
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index fac8826..2ffc596 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -18,7 +18,7 @@
 
 from tempest.api.identity import base
 from tempest import clients
-from tempest.common import cred_provider
+from tempest.common import credentials_factory as common_creds
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import test
@@ -55,7 +55,7 @@
         self.trustor_username = data_utils.rand_name('user')
         u_desc = self.trustor_username + 'description'
         u_email = self.trustor_username + '@testmail.xx'
-        self.trustor_password = data_utils.rand_name('pass')
+        self.trustor_password = data_utils.rand_password()
         user = self.client.create_user(
             self.trustor_username,
             description=u_desc,
@@ -69,10 +69,10 @@
         self.delegated_role = data_utils.rand_name('DelegatedRole')
         self.not_delegated_role = data_utils.rand_name('NotDelegatedRole')
 
-        role = self.client.create_role(self.delegated_role)['role']
+        role = self.client.create_role(name=self.delegated_role)['role']
         self.delegated_role_id = role['id']
 
-        role = self.client.create_role(self.not_delegated_role)['role']
+        role = self.client.create_role(name=self.not_delegated_role)['role']
         self.not_delegated_role_id = role['id']
 
         # Assign roles to trustor
@@ -89,7 +89,7 @@
         self.assertIsNotNone(self.trustee_user_id)
 
         # Initialize a new client with the trustor credentials
-        creds = cred_provider.get_credentials(
+        creds = common_creds.get_credentials(
             identity_version='v3',
             username=self.trustor_username,
             password=self.trustor_password,
@@ -115,7 +115,7 @@
             trustor_user_id=self.trustor_user_id,
             trustee_user_id=self.trustee_user_id,
             project_id=self.trustor_project_id,
-            role_names=[self.delegated_role],
+            roles=[{'name': self.delegated_role}],
             impersonation=impersonate,
             expires_at=expires)['trust']
         self.trust_id = trust_create['id']
@@ -139,8 +139,8 @@
             self.assertEqual(self.delegated_role, trust['roles'][0]['name'])
             self.assertEqual(1, len(trust['roles']))
 
-    def get_trust(self):
-        trust_get = self.trustor_client.get_trust(self.trust_id)['trust']
+    def show_trust(self):
+        trust_get = self.trustor_client.show_trust(self.trust_id)['trust']
         return trust_get
 
     def validate_role(self, role):
@@ -155,12 +155,12 @@
 
     def check_trust_roles(self):
         # Check we find the delegated role
-        roles_get = self.trustor_client.get_trust_roles(
+        roles_get = self.trustor_client.list_trust_roles(
             self.trust_id)['roles']
         self.assertEqual(1, len(roles_get))
         self.validate_role(roles_get[0])
 
-        role_get = self.trustor_client.get_trust_role(
+        role_get = self.trustor_client.show_trust_role(
             self.trust_id, self.delegated_role_id)['role']
         self.validate_role(role_get)
 
@@ -169,7 +169,7 @@
 
         # And that we don't find not_delegated_role
         self.assertRaises(lib_exc.NotFound,
-                          self.trustor_client.get_trust_role,
+                          self.trustor_client.show_trust_role,
                           self.trust_id,
                           self.not_delegated_role_id)
 
@@ -181,7 +181,7 @@
     def delete_trust(self):
         self.trustor_client.delete_trust(self.trust_id)
         self.assertRaises(lib_exc.NotFound,
-                          self.trustor_client.get_trust,
+                          self.trustor_client.show_trust,
                           self.trust_id)
         self.trust_id = None
 
@@ -200,7 +200,7 @@
         trust = self.create_trust()
         self.validate_trust(trust)
 
-        trust_get = self.get_trust()
+        trust_get = self.show_trust()
         self.validate_trust(trust_get)
 
         self.check_trust_roles()
@@ -212,7 +212,7 @@
         trust = self.create_trust(impersonate=False)
         self.validate_trust(trust, impersonate=False)
 
-        trust_get = self.get_trust()
+        trust_get = self.show_trust()
         self.validate_trust(trust_get, impersonate=False)
 
         self.check_trust_roles()
@@ -236,7 +236,7 @@
         trust = self.create_trust(expires=expires_str)
         self.validate_trust(trust, expires=expires_str)
 
-        trust_get = self.get_trust()
+        trust_get = self.show_trust()
 
         self.validate_trust(trust_get, expires=expires_str)
 
@@ -255,7 +255,7 @@
     @test.idempotent_id('6268b345-87ca-47c0-9ce3-37792b43403a')
     def test_get_trusts_query(self):
         self.create_trust()
-        trusts_get = self.trustor_client.get_trusts(
+        trusts_get = self.trustor_client.list_trusts(
             trustor_user_id=self.trustor_user_id)['trusts']
         self.assertEqual(1, len(trusts_get))
         self.validate_trust(trusts_get[0], summary=True)
@@ -264,7 +264,7 @@
     @test.idempotent_id('4773ebd5-ecbf-4255-b8d8-b63e6f72b65d')
     def test_get_trusts_all(self):
         self.create_trust()
-        trusts_get = self.client.get_trusts()['trusts']
+        trusts_get = self.client.list_trusts()['trusts']
         trusts = [t for t in trusts_get
                   if t['id'] == self.trust_id]
         self.assertEqual(1, len(trusts))
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 8fac0b3..7c0c223 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
 from tempest import test
@@ -27,7 +29,7 @@
         u_name = data_utils.rand_name('user')
         u_desc = u_name + 'description'
         u_email = u_name + '@testmail.tm'
-        u_password = data_utils.rand_name('pass')
+        u_password = data_utils.rand_password()
         user = self.client.create_user(
             u_name, description=u_desc, password=u_password,
             email=u_email, enabled=False)['user']
@@ -54,7 +56,7 @@
         self.assertEqual(u_email2, update_user['email'])
         self.assertEqual(False, update_user['enabled'])
         # GET by id after updation
-        new_user_get = self.client.get_user(user['id'])['user']
+        new_user_get = self.client.show_user(user['id'])['user']
         # Assert response body of GET after updation
         self.assertEqual(u_name2, new_user_get['name'])
         self.assertEqual(u_description2, new_user_get['description'])
@@ -67,20 +69,28 @@
     def test_update_user_password(self):
         # Creating User to check password updation
         u_name = data_utils.rand_name('user')
-        original_password = data_utils.rand_name('pass')
+        original_password = data_utils.rand_password()
         user = self.client.create_user(
             u_name, password=original_password)['user']
         # Delete the User at the end all test methods
         self.addCleanup(self.client.delete_user, user['id'])
         # Update user with new password
-        new_password = data_utils.rand_name('pass1')
-        self.client.update_user_password(user['id'], new_password,
-                                         original_password)
+        new_password = data_utils.rand_password()
+        self.client.update_user_password(user['id'], password=new_password,
+                                         original_password=original_password)
+        # TODO(lbragstad): Sleeping after the response status has been checked
+        # and the body loaded as JSON allows requests to fail-fast. The sleep
+        # is necessary because keystone will err on the side of security and
+        # invalidate tokens within a small margin of error (within the same
+        # wall clock second) after a revocation event is issued (such as a
+        # password change). Remove this once keystone and Fernet support
+        # sub-second precision, see bug 1517697 for more details.
+        time.sleep(1)
         resp = self.token.auth(user_id=user['id'],
                                password=new_password).response
         subject_token = resp['x-subject-token']
         # Perform GET Token to verify and confirm password is updated
-        token_details = self.client.get_token(subject_token)['token']
+        token_details = self.client.show_token(subject_token)['token']
         self.assertEqual(resp['x-subject-token'], subject_token)
         self.assertEqual(token_details['user']['id'], user['id'])
         self.assertEqual(token_details['user']['name'], u_name)
@@ -99,7 +109,7 @@
         u_name = data_utils.rand_name('user')
         u_desc = u_name + 'description'
         u_email = u_name + '@testmail.tm'
-        u_password = data_utils.rand_name('pass')
+        u_password = data_utils.rand_password()
         user_body = self.client.create_user(
             u_name, description=u_desc, password=u_password,
             email=u_email, enabled=False, project_id=u_project['id'])['user']
@@ -107,18 +117,18 @@
         self.addCleanup(self.client.delete_user, user_body['id'])
         # Creating Role
         role_body = self.client.create_role(
-            data_utils.rand_name('role'))['role']
+            name=data_utils.rand_name('role'))['role']
         # Delete the Role at the end of this method
         self.addCleanup(self.client.delete_role, role_body['id'])
 
-        user = self.client.get_user(user_body['id'])['user']
-        role = self.client.get_role(role_body['id'])['role']
+        user = self.client.show_user(user_body['id'])['user']
+        role = self.client.show_role(role_body['id'])['role']
         for i in range(2):
             # Creating project so as to assign role
             project_body = self.client.create_project(
                 data_utils.rand_name('project'),
                 description=data_utils.rand_name('project-desc'))['project']
-            project = self.client.get_project(project_body['id'])['project']
+            project = self.client.show_project(project_body['id'])['project']
             # Delete the Project at the end of this method
             self.addCleanup(self.client.delete_project, project_body['id'])
             # Assigning roles to user on project
@@ -141,6 +151,6 @@
     @test.idempotent_id('c10dcd90-461d-4b16-8e23-4eb836c00644')
     def test_get_user(self):
         # Get a user detail
-        self.data.setup_test_v3_user()
-        user = self.client.get_user(self.data.v3_user['id'])['user']
-        self.assertEqual(self.data.v3_user['id'], user['id'])
+        self.data.setup_test_user()
+        user = self.client.show_user(self.data.user['id'])['user']
+        self.assertEqual(self.data.user['id'], user['id'])
diff --git a/tempest/api/identity/admin/v3/test_users_negative.py b/tempest/api/identity/admin/v3/test_users_negative.py
new file mode 100644
index 0000000..39c89a5
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_users_negative.py
@@ -0,0 +1,46 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest_lib import exceptions as lib_exc
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class UsersNegativeTest(base.BaseIdentityV3AdminTest):
+
+    @test.attr(type=['negative'])
+    @test.idempotent_id('e75f006c-89cc-477b-874d-588e4eab4b17')
+    def test_create_user_for_non_existent_domain(self):
+        # Attempt to create a user in a non-existent domain should fail
+        u_name = data_utils.rand_name('user')
+        u_email = u_name + '@testmail.tm'
+        u_password = data_utils.rand_password()
+        self.assertRaises(lib_exc.NotFound, self.client.create_user,
+                          u_name, u_password,
+                          email=u_email,
+                          domain_id=data_utils.rand_uuid_hex())
+
+    @test.attr(type=['negative'])
+    @test.idempotent_id('b3c9fccc-4134-46f5-b600-1da6fb0a3b1f')
+    def test_authentication_for_disabled_user(self):
+        # Attempt to authenticate for disabled user should fail
+        self.data.setup_test_user()
+        self.disable_user(self.data.user['name'])
+        self.assertRaises(lib_exc.Unauthorized, self.token.auth,
+                          username=self.data.user['name'],
+                          password=self.data.user_password,
+                          user_domain_id='default')
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 95826b0..d31569b 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -16,7 +16,6 @@
 from oslo_log import log as logging
 from tempest_lib import exceptions as lib_exc
 
-from tempest.common import cred_provider
 from tempest.common.utils import data_utils
 from tempest import config
 import tempest.test
@@ -30,16 +29,16 @@
     @classmethod
     def disable_user(cls, user_name):
         user = cls.get_user_by_name(user_name)
-        cls.client.enable_disable_user(user['id'], False)
+        cls.users_client.enable_disable_user(user['id'], enabled=False)
 
     @classmethod
     def disable_tenant(cls, tenant_name):
         tenant = cls.get_tenant_by_name(tenant_name)
-        cls.client.update_tenant(tenant['id'], enabled=False)
+        cls.tenants_client.update_tenant(tenant['id'], enabled=False)
 
     @classmethod
     def get_user_by_name(cls, name):
-        users = cls.client.get_users()['users']
+        users = cls.users_client.list_users()['users']
         user = [u for u in users if u['name'] == name]
         if len(user) > 0:
             return user[0]
@@ -47,7 +46,7 @@
     @classmethod
     def get_tenant_by_name(cls, name):
         try:
-            tenants = cls.client.list_tenants()['tenants']
+            tenants = cls.tenants_client.list_tenants()['tenants']
         except AttributeError:
             tenants = cls.client.list_projects()['projects']
         tenant = [t for t in tenants if t['name'] == name]
@@ -56,7 +55,7 @@
 
     @classmethod
     def get_role_by_name(cls, name):
-        roles = cls.client.list_roles()['roles']
+        roles = cls.roles_client.list_roles()['roles']
         role = [r for r in roles if r['name'] == name]
         if len(role) > 0:
             return role[0]
@@ -75,6 +74,8 @@
         super(BaseIdentityV2Test, cls).setup_clients()
         cls.non_admin_client = cls.os.identity_public_client
         cls.non_admin_token_client = cls.os.token_client
+        cls.non_admin_tenants_client = cls.os.tenants_public_client
+        cls.non_admin_users_client = cls.os.users_public_client
 
     @classmethod
     def resource_setup(cls):
@@ -95,11 +96,20 @@
         cls.client = cls.os_adm.identity_client
         cls.non_admin_client = cls.os.identity_client
         cls.token_client = cls.os_adm.token_client
+        cls.tenants_client = cls.os_adm.tenants_client
+        cls.non_admin_tenants_client = cls.os.tenants_client
+        cls.roles_client = cls.os_adm.roles_client
+        cls.non_admin_roles_client = cls.os.roles_client
+        cls.users_client = cls.os_adm.users_client
+        cls.non_admin_users_client = cls.os.users_client
+        cls.services_client = cls.os_adm.services_v2_client
+        cls.endpoints_client = cls.os_adm.endpoints_v2_client
 
     @classmethod
     def resource_setup(cls):
         super(BaseIdentityV2AdminTest, cls).resource_setup()
-        cls.data = DataGenerator(cls.client)
+        cls.data = DataGeneratorV2(cls.client, cls.tenants_client,
+                                   cls.users_client, cls.roles_client)
 
     @classmethod
     def resource_cleanup(cls):
@@ -120,11 +130,6 @@
         super(BaseIdentityV3Test, cls).setup_clients()
         cls.non_admin_client = cls.os.identity_v3_client
         cls.non_admin_token = cls.os.token_v3_client
-        cls.non_admin_endpoints_client = cls.os.endpoints_client
-        cls.non_admin_region_client = cls.os.region_client
-        cls.non_admin_service_client = cls.os.service_client
-        cls.non_admin_policy_client = cls.os.policy_client
-        cls.non_admin_creds_client = cls.os.credentials_client
 
     @classmethod
     def resource_cleanup(cls):
@@ -141,11 +146,16 @@
         cls.client = cls.os_adm.identity_v3_client
         cls.token = cls.os_adm.token_v3_client
         cls.endpoints_client = cls.os_adm.endpoints_client
-        cls.region_client = cls.os_adm.region_client
-        cls.data = DataGenerator(cls.client)
-        cls.service_client = cls.os_adm.service_client
-        cls.policy_client = cls.os_adm.policy_client
+        cls.regions_client = cls.os_adm.regions_client
+        cls.services_client = cls.os_adm.identity_services_client
+        cls.policies_client = cls.os_adm.policies_client
         cls.creds_client = cls.os_adm.credentials_client
+        cls.groups_client = cls.os_adm.groups_client
+
+    @classmethod
+    def resource_setup(cls):
+        super(BaseIdentityV3AdminTest, cls).resource_setup()
+        cls.data = DataGeneratorV3(cls.client)
 
     @classmethod
     def resource_cleanup(cls):
@@ -154,7 +164,7 @@
 
     @classmethod
     def get_user_by_name(cls, name):
-        users = cls.client.get_users()['users']
+        users = cls.client.list_users()['users']
         user = [u for u in users if u['name'] == name]
         if len(user) > 0:
             return user[0]
@@ -173,6 +183,11 @@
         if len(role) > 0:
             return role[0]
 
+    @classmethod
+    def disable_user(cls, user_name):
+        user = cls.get_user_by_name(user_name)
+        cls.client.update_user(user['id'], user_name, enabled=False)
+
     def delete_domain(self, domain_id):
         # NOTE(mpavlase) It is necessary to disable the domain before deleting
         # otherwise it raises Forbidden exception
@@ -180,121 +195,98 @@
         self.client.delete_domain(domain_id)
 
 
-class DataGenerator(object):
+class BaseDataGenerator(object):
 
-        def __init__(self, client):
-            self.client = client
-            self.users = []
-            self.tenants = []
-            self.roles = []
-            self.role_name = None
-            self.v3_users = []
-            self.projects = []
-            self.v3_roles = []
-            self.domains = []
+    def __init__(self, client, projects_client=None,
+                 users_client=None, roles_client=None):
+        self.client = client
+        self.projects_client = projects_client or client
+        self.users_client = users_client or client
+        self.roles_client = roles_client or client
 
-        @property
-        def test_credentials(self):
-            return cred_provider.get_credentials(username=self.test_user,
-                                                 user_id=self.user['id'],
-                                                 password=self.test_password,
-                                                 tenant_name=self.test_tenant,
-                                                 tenant_id=self.tenant['id'])
+        self.user_password = None
+        self.user = None
+        self.tenant = None
+        self.project = None
+        self.role = None
+        self.domain = None
 
-        def setup_test_user(self):
-            """Set up a test user."""
-            self.setup_test_tenant()
-            self.test_user = data_utils.rand_name('test_user')
-            self.test_password = data_utils.rand_name('pass')
-            self.test_email = self.test_user + '@testmail.tm'
-            self.user = self.client.create_user(self.test_user,
-                                                self.test_password,
-                                                self.tenant['id'],
-                                                self.test_email)['user']
-            self.users.append(self.user)
+        self.users = []
+        self.tenants = []
+        self.projects = []
+        self.roles = []
+        self.domains = []
 
-        def setup_test_tenant(self):
-            """Set up a test tenant."""
-            self.test_tenant = data_utils.rand_name('test_tenant')
-            self.test_description = data_utils.rand_name('desc')
-            self.tenant = self.client.create_tenant(
-                name=self.test_tenant,
-                description=self.test_description)['tenant']
-            self.tenants.append(self.tenant)
+    def _create_test_user(self, **kwargs):
+        username = data_utils.rand_name('test_user')
+        self.user_password = data_utils.rand_password()
+        self.user = self.users_client.create_user(
+            username, password=self.user_password,
+            email=username + '@testmail.tm', **kwargs)['user']
+        self.users.append(self.user)
 
-        def setup_test_role(self):
-            """Set up a test role."""
-            self.test_role = data_utils.rand_name('role')
-            self.role = self.client.create_role(self.test_role)['role']
-            self.roles.append(self.role)
+    def setup_test_role(self):
+        """Set up a test role."""
+        self.role = self.roles_client.create_role(
+            name=data_utils.rand_name('test_role'))['role']
+        self.roles.append(self.role)
 
-        def setup_test_v3_user(self):
-            """Set up a test v3 user."""
-            self.setup_test_project()
-            self.test_user = data_utils.rand_name('test_user')
-            self.test_password = data_utils.rand_name('pass')
-            self.test_email = self.test_user + '@testmail.tm'
-            self.v3_user = self.client.create_user(
-                self.test_user,
-                password=self.test_password,
-                project_id=self.project['id'],
-                email=self.test_email)['user']
-            self.v3_users.append(self.v3_user)
+    @staticmethod
+    def _try_wrapper(func, item, **kwargs):
+        try:
+            func(item['id'], **kwargs)
+        except lib_exc.NotFound:
+            pass
+        except Exception:
+            LOG.exception("Unexpected exception occurred in %s deletion. "
+                          "But ignored here." % item['id'])
 
-        def setup_test_project(self):
-            """Set up a test project."""
-            self.test_project = data_utils.rand_name('test_project')
-            self.test_description = data_utils.rand_name('desc')
-            self.project = self.client.create_project(
-                name=self.test_project,
-                description=self.test_description)['project']
-            self.projects.append(self.project)
+    def teardown_all(self):
+        for user in self.users:
+            self._try_wrapper(self.users_client.delete_user, user)
+        for tenant in self.tenants:
+            self._try_wrapper(self.projects_client.delete_tenant, tenant)
+        for project in self.projects:
+            self._try_wrapper(self.projects_client.delete_project, project)
+        for role in self.roles:
+            self._try_wrapper(self.roles_client.delete_role, role)
+        for domain in self.domains:
+            self._try_wrapper(self.client.update_domain, domain, enabled=False)
+            self._try_wrapper(self.client.delete_domain, domain)
 
-        def setup_test_v3_role(self):
-            """Set up a test v3 role."""
-            self.test_role = data_utils.rand_name('role')
-            self.v3_role = self.client.create_role(self.test_role)['role']
-            self.v3_roles.append(self.v3_role)
 
-        def setup_test_domain(self):
-            """Set up a test domain."""
-            self.test_domain = data_utils.rand_name('test_domain')
-            self.test_description = data_utils.rand_name('desc')
-            self.domain = self.client.create_domain(
-                name=self.test_domain,
-                description=self.test_description)['domain']
-            self.domains.append(self.domain)
+class DataGeneratorV2(BaseDataGenerator):
 
-        @staticmethod
-        def _try_wrapper(func, item, **kwargs):
-            try:
-                if kwargs:
-                    func(item['id'], **kwargs)
-                else:
-                    func(item['id'])
-            except lib_exc.NotFound:
-                pass
-            except Exception:
-                LOG.exception("Unexpected exception occurred in %s deletion."
-                              " But ignored here." % item['id'])
+    def setup_test_user(self):
+        """Set up a test user."""
+        self.setup_test_tenant()
+        self._create_test_user(tenant_id=self.tenant['id'])
 
-        def teardown_all(self):
-            # NOTE(masayukig): v3 client doesn't have v2 method.
-            # (e.g. delete_tenant) So we need to check resources existence
-            # before using client methods.
-            for user in self.users:
-                self._try_wrapper(self.client.delete_user, user)
-            for tenant in self.tenants:
-                self._try_wrapper(self.client.delete_tenant, tenant)
-            for role in self.roles:
-                self._try_wrapper(self.client.delete_role, role)
-            for v3_user in self.v3_users:
-                self._try_wrapper(self.client.delete_user, v3_user)
-            for v3_project in self.projects:
-                self._try_wrapper(self.client.delete_project, v3_project)
-            for v3_role in self.v3_roles:
-                self._try_wrapper(self.client.delete_role, v3_role)
-            for domain in self.domains:
-                self._try_wrapper(self.client.update_domain, domain,
-                                  enabled=False)
-                self._try_wrapper(self.client.delete_domain, domain)
+    def setup_test_tenant(self):
+        """Set up a test tenant."""
+        self.tenant = self.projects_client.create_tenant(
+            name=data_utils.rand_name('test_tenant'),
+            description=data_utils.rand_name('desc'))['tenant']
+        self.tenants.append(self.tenant)
+
+
+class DataGeneratorV3(BaseDataGenerator):
+
+    def setup_test_user(self):
+        """Set up a test user."""
+        self.setup_test_project()
+        self._create_test_user(project_id=self.project['id'])
+
+    def setup_test_project(self):
+        """Set up a test project."""
+        self.project = self.projects_client.create_project(
+            name=data_utils.rand_name('test_project'),
+            description=data_utils.rand_name('desc'))['project']
+        self.projects.append(self.project)
+
+    def setup_test_domain(self):
+        """Set up a test domain."""
+        self.domain = self.client.create_domain(
+            name=data_utils.rand_name('test_domain'),
+            description=data_utils.rand_name('desc'))['domain']
+        self.domains.append(self.domain)
diff --git a/tempest/api/identity/v2/test_api_discovery.py b/tempest/api/identity/v2/test_api_discovery.py
index 57c78ef..ca807a4 100644
--- a/tempest/api/identity/v2/test_api_discovery.py
+++ b/tempest/api/identity/v2/test_api_discovery.py
@@ -23,7 +23,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('ea889a68-a15f-4166-bfb1-c12456eae853')
     def test_api_version_resources(self):
-        descr = self.non_admin_client.get_api_description()['version']
+        descr = self.non_admin_client.show_api_description()['version']
         expected_resources = ('id', 'links', 'media-types', 'status',
                               'updated')
 
@@ -34,7 +34,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('007a0be0-78fe-4fdb-bbee-e9216cc17bb2')
     def test_api_media_types(self):
-        descr = self.non_admin_client.get_api_description()['version']
+        descr = self.non_admin_client.show_api_description()['version']
         # Get MIME type bases and descriptions
         media_types = [(media_type['base'], media_type['type']) for
                        media_type in descr['media-types']]
@@ -49,7 +49,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('77fd6be0-8801-48e6-b9bf-38cdd2f253ec')
     def test_api_version_statuses(self):
-        descr = self.non_admin_client.get_api_description()['version']
+        descr = self.non_admin_client.show_api_description()['version']
         status = descr['status'].lower()
         supported_statuses = ['current', 'stable', 'experimental',
                               'supported', 'deprecated']
diff --git a/tempest/api/identity/v2/test_ec2_credentials.py b/tempest/api/identity/v2/test_ec2_credentials.py
index 763d8de..bd49326 100644
--- a/tempest/api/identity/v2/test_ec2_credentials.py
+++ b/tempest/api/identity/v2/test_ec2_credentials.py
@@ -36,12 +36,12 @@
     @test.idempotent_id('b580fab9-7ae9-46e8-8138-417260cb6f9f')
     def test_create_ec2_credentials(self):
         """Create user ec2 credentials."""
-        resp = self.non_admin_client.create_user_ec2_credentials(
+        resp = self.non_admin_users_client.create_user_ec2_credentials(
             self.creds.credentials.user_id,
-            self.creds.credentials.tenant_id)["credential"]
+            tenant_id=self.creds.credentials.tenant_id)["credential"]
         access = resp['access']
         self.addCleanup(
-            self.non_admin_client.delete_user_ec2_credentials,
+            self.non_admin_users_client.delete_user_ec2_credentials,
             self.creds.credentials.user_id, access)
         self.assertNotEmpty(resp['access'])
         self.assertNotEmpty(resp['secret'])
@@ -54,24 +54,24 @@
         created_creds = []
         fetched_creds = []
         # create first ec2 credentials
-        creds1 = self.non_admin_client.create_user_ec2_credentials(
+        creds1 = self.non_admin_users_client.create_user_ec2_credentials(
             self.creds.credentials.user_id,
-            self.creds.credentials.tenant_id)["credential"]
+            tenant_id=self.creds.credentials.tenant_id)["credential"]
         created_creds.append(creds1['access'])
         # create second ec2 credentials
-        creds2 = self.non_admin_client.create_user_ec2_credentials(
+        creds2 = self.non_admin_users_client.create_user_ec2_credentials(
             self.creds.credentials.user_id,
-            self.creds.credentials.tenant_id)["credential"]
+            tenant_id=self.creds.credentials.tenant_id)["credential"]
         created_creds.append(creds2['access'])
         # add credentials to be cleaned up
         self.addCleanup(
-            self.non_admin_client.delete_user_ec2_credentials,
+            self.non_admin_users_client.delete_user_ec2_credentials,
             self.creds.credentials.user_id, creds1['access'])
         self.addCleanup(
-            self.non_admin_client.delete_user_ec2_credentials,
+            self.non_admin_users_client.delete_user_ec2_credentials,
             self.creds.credentials.user_id, creds2['access'])
         # get the list of user ec2 credentials
-        resp = self.non_admin_client.list_user_ec2_credentials(
+        resp = self.non_admin_users_client.list_user_ec2_credentials(
             self.creds.credentials.user_id)["credentials"]
         fetched_creds = [cred['access'] for cred in resp]
         # created credentials should be in a fetched list
@@ -84,14 +84,14 @@
     @test.idempotent_id('cb284075-b613-440d-83ca-fe0b33b3c2b8')
     def test_show_ec2_credentials(self):
         """Get the definite user ec2 credentials."""
-        resp = self.non_admin_client.create_user_ec2_credentials(
+        resp = self.non_admin_users_client.create_user_ec2_credentials(
             self.creds.credentials.user_id,
-            self.creds.credentials.tenant_id)["credential"]
+            tenant_id=self.creds.credentials.tenant_id)["credential"]
         self.addCleanup(
-            self.non_admin_client.delete_user_ec2_credentials,
+            self.non_admin_users_client.delete_user_ec2_credentials,
             self.creds.credentials.user_id, resp['access'])
 
-        ec2_creds = self.non_admin_client.show_user_ec2_credentials(
+        ec2_creds = self.non_admin_users_client.show_user_ec2_credentials(
             self.creds.credentials.user_id, resp['access']
         )["credential"]
         for key in ['access', 'secret', 'user_id', 'tenant_id']:
@@ -100,14 +100,14 @@
     @test.idempotent_id('6aba0d4c-b76b-4e46-aa42-add79bc1551d')
     def test_delete_ec2_credentials(self):
         """Delete user ec2 credentials."""
-        resp = self.non_admin_client.create_user_ec2_credentials(
+        resp = self.non_admin_users_client.create_user_ec2_credentials(
             self.creds.credentials.user_id,
-            self.creds.credentials.tenant_id)["credential"]
+            tenant_id=self.creds.credentials.tenant_id)["credential"]
         access = resp['access']
-        self.non_admin_client.delete_user_ec2_credentials(
+        self.non_admin_users_client.delete_user_ec2_credentials(
             self.creds.credentials.user_id, access)
         self.assertRaises(
             lib_exc.NotFound,
-            self.non_admin_client.show_user_ec2_credentials,
+            self.non_admin_users_client.show_user_ec2_credentials,
             self.creds.credentials.user_id,
             access)
diff --git a/tempest/api/identity/v2/test_tenants.py b/tempest/api/identity/v2/test_tenants.py
index 1fcff8d..4e31557 100644
--- a/tempest/api/identity/v2/test_tenants.py
+++ b/tempest/api/identity/v2/test_tenants.py
@@ -26,7 +26,7 @@
     @test.idempotent_id('ecae2459-243d-4ba1-ad02-65f15dc82b78')
     def test_list_tenants_returns_only_authorized_tenants(self):
         alt_tenant_name = self.alt_manager.credentials.credentials.tenant_name
-        resp = self.non_admin_client.list_tenants()
+        resp = self.non_admin_tenants_client.list_tenants()
 
         # check that user can see only that tenants that he presents in so user
         # can successfully authenticate using his credentials and tenant name
diff --git a/tempest/api/identity/v2/test_users.py b/tempest/api/identity/v2/test_users.py
index 3b89b66..a59a1a0 100644
--- a/tempest/api/identity/v2/test_users.py
+++ b/tempest/api/identity/v2/test_users.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 import copy
+import time
 
 from tempest_lib.common.utils import data_utils
 from tempest_lib import exceptions
@@ -40,8 +41,9 @@
         # we need new non-admin Identity Client with new credentials, since
         # current non_admin_client token will be revoked after updating
         # password
-        self.non_admin_client_for_cleanup = copy.copy(self.non_admin_client)
-        self.non_admin_client_for_cleanup.auth_provider = (
+        self.non_admin_users_client_for_cleanup = copy.copy(
+            self.non_admin_users_client)
+        self.non_admin_users_client_for_cleanup.auth_provider = (
             manager.get_auth_provider(self.new_creds))
         user_id = self.creds.credentials.user_id
         old_pass = self.creds.credentials.password
@@ -49,17 +51,20 @@
 
         # to change password back. important for allow_tenant_isolation = false
         self.addCleanup(
-            self.non_admin_client_for_cleanup.update_user_own_password,
-            user_id=user_id,
-            new_pass=old_pass,
-            old_pass=new_pass)
-
+            self.non_admin_users_client_for_cleanup.update_user_own_password,
+            user_id, original_password=new_pass, password=old_pass)
         # user updates own password
-        resp = self.non_admin_client.update_user_own_password(
-            user_id=user_id, new_pass=new_pass, old_pass=old_pass)['access']
+        self.non_admin_users_client.update_user_own_password(
+            user_id, password=new_pass, original_password=old_pass)
+        # TODO(lbragstad): Sleeping after the response status has been checked
+        # and the body loaded as JSON allows requests to fail-fast. The sleep
+        # is necessary because keystone will err on the side of security and
+        # invalidate tokens within a small margin of error (within the same
+        # wall clock second) after a revocation event is issued (such as a
+        # password change). Remove this once keystone and Fernet support
+        # sub-second precision.
+        time.sleep(1)
 
-        # check authorization with new token
-        self.non_admin_token_client.auth_token(resp['token']['id'])
         # check authorization with new password
         self.non_admin_token_client.auth(self.username,
                                          new_pass,
@@ -68,7 +73,7 @@
         # authorize with old token should lead to Unauthorized
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_token_client.auth_token,
-                          self.non_admin_client.token)
+                          self.non_admin_users_client.token)
 
         # authorize with old password should lead to Unauthorized
         self.assertRaises(exceptions.Unauthorized,
diff --git a/tempest/api/identity/v3/test_api_discovery.py b/tempest/api/identity/v3/test_api_discovery.py
index e0207a9..74e9ec1 100644
--- a/tempest/api/identity/v3/test_api_discovery.py
+++ b/tempest/api/identity/v3/test_api_discovery.py
@@ -23,7 +23,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('b9232f5e-d9e5-4d97-b96c-28d3db4de1bd')
     def test_api_version_resources(self):
-        descr = self.non_admin_client.get_api_description()['version']
+        descr = self.non_admin_client.show_api_description()['version']
         expected_resources = ('id', 'links', 'media-types', 'status',
                               'updated')
 
@@ -34,7 +34,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('657c1970-4722-4189-8831-7325f3bc4265')
     def test_api_media_types(self):
-        descr = self.non_admin_client.get_api_description()['version']
+        descr = self.non_admin_client.show_api_description()['version']
         # Get MIME type bases and descriptions
         media_types = [(media_type['base'], media_type['type']) for
                        media_type in descr['media-types']]
@@ -49,7 +49,7 @@
     @test.attr(type='smoke')
     @test.idempotent_id('8879a470-abfb-47bb-bb8d-5a7fd279ad1e')
     def test_api_version_statuses(self):
-        descr = self.non_admin_client.get_api_description()['version']
+        descr = self.non_admin_client.show_api_description()['version']
         status = descr['status'].lower()
         supported_statuses = ['current', 'stable', 'experimental',
                               'supported', 'deprecated']
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index a1f664f..93814d3 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 import copy
+import time
 
 from tempest_lib.common.utils import data_utils
 from tempest_lib import exceptions
@@ -49,13 +50,22 @@
         # to change password back. important for allow_tenant_isolation = false
         self.addCleanup(
             self.non_admin_client_for_cleanup.update_user_password,
-            user_id=user_id,
+            user_id,
             password=old_pass,
             original_password=new_pass)
 
         # user updates own password
         self.non_admin_client.update_user_password(
-            user_id=user_id, password=new_pass, original_password=old_pass)
+            user_id, password=new_pass, original_password=old_pass)
+
+        # TODO(lbragstad): Sleeping after the response status has been checked
+        # and the body loaded as JSON allows requests to fail-fast. The sleep
+        # is necessary because keystone will err on the side of security and
+        # invalidate tokens within a small margin of error (within the same
+        # wall clock second) after a revocation event is issued (such as a
+        # password change). Remove this once keystone and Fernet support
+        # sub-second precision.
+        time.sleep(1)
 
         # check authorization with new password
         self.non_admin_token.auth(user_id=self.user_id, password=new_pass)
diff --git a/tempest/api/image/admin/v2/test_images.py b/tempest/api/image/admin/v2/test_images.py
index 09877ba..b171da3 100644
--- a/tempest/api/image/admin/v2/test_images.py
+++ b/tempest/api/image/admin/v2/test_images.py
@@ -26,10 +26,8 @@
 
 
 class BasicAdminOperationsImagesTest(base.BaseV2ImageAdminTest):
+    """Here we test admin operations of images"""
 
-    """
-    Here we test admin operations of images
-    """
     @testtools.skipUnless(CONF.image_feature_enabled.deactivate_image,
                           'deactivate-image is not available.')
     @test.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
@@ -51,12 +49,12 @@
         body = self.client.show_image(image_id)
         self.assertEqual("deactivated", body['status'])
         # non-admin user unable to download deactivated image
-        self.assertRaises(lib_exc.Forbidden, self.client.load_image_file,
+        self.assertRaises(lib_exc.Forbidden, self.client.show_image_file,
                           image_id)
         # reactivate image
         self.admin_client.reactivate_image(image_id)
         body = self.client.show_image(image_id)
         self.assertEqual("active", body['status'])
         # non-admin user able to download image after reactivation by admin
-        body = self.client.load_image_file(image_id)
+        body = self.client.show_image_file(image_id)
         self.assertEqual(content, body.data)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index da0ce83..ade7b67 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from six import moves
 from tempest_lib import exceptions as lib_exc
 
@@ -22,8 +21,6 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class BaseImageTest(tempest.test.BaseTestCase):
     """Base test class for Image API tests."""
@@ -62,16 +59,12 @@
     @classmethod
     def create_image(cls, **kwargs):
         """Wrapper that returns a test image."""
-        name = data_utils.rand_name(cls.__name__ + "-instance")
 
-        if 'name' in kwargs:
-            name = kwargs.pop('name')
+        if 'name' not in kwargs:
+            name = data_utils.rand_name(cls.__name__ + "-instance")
+            kwargs['name'] = name
 
-        container_format = kwargs.pop('container_format')
-        disk_format = kwargs.pop('disk_format')
-
-        image = cls.client.create_image(name, container_format,
-                                        disk_format, **kwargs)
+        image = cls.client.create_image(**kwargs)
         # Image objects returned by the v1 client have the image
         # data inside a dict that is keyed against 'image'.
         if 'image' in image:
@@ -156,7 +149,7 @@
 
     def _create_image(self):
         name = data_utils.rand_name('image')
-        image = self.os_img_client.create_image(name,
+        image = self.os_img_client.create_image(name=name,
                                                 container_format='bare',
                                                 disk_format='raw')
         image_id = image['id']
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index d4dbfcd..1a84d06 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -18,11 +18,27 @@
 from tempest.api.image import base
 from tempest.common.utils import data_utils
 from tempest import config
+from tempest import exceptions
 from tempest import test
 
 CONF = config.CONF
 
 
+def get_container_and_disk_format():
+    a_formats = ['ami', 'ari', 'aki']
+
+    container_format = CONF.image.container_formats[0]
+    disk_format = CONF.image.disk_formats[0]
+
+    if container_format in a_formats and container_format != disk_format:
+        msg = ("The container format and the disk format don't match. "
+               "Contaiter format: %(container)s, Disk format: %(disk)s." %
+               {'container': container_format, 'disk': disk_format})
+        raise exceptions.InvalidConfiguration(message=msg)
+
+    return container_format, disk_format
+
+
 class CreateRegisterImagesTest(base.BaseV1ImageTest):
     """Here we test the registration and creation of images."""
 
@@ -30,9 +46,10 @@
     def test_register_then_upload(self):
         # Register, then upload an image
         properties = {'prop1': 'val1'}
+        container_format, disk_format = get_container_and_disk_format()
         body = self.create_image(name='New Name',
-                                 container_format='bare',
-                                 disk_format='raw',
+                                 container_format=container_format,
+                                 disk_format=disk_format,
                                  is_public=False,
                                  properties=properties)
         self.assertIn('id', body)
@@ -52,9 +69,10 @@
     @test.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
     def test_register_remote_image(self):
         # Register a new remote image
+        container_format, disk_format = get_container_and_disk_format()
         body = self.create_image(name='New Remote Image',
-                                 container_format='bare',
-                                 disk_format='raw', is_public=False,
+                                 container_format=container_format,
+                                 disk_format=disk_format, is_public=False,
                                  location=CONF.image.http_image,
                                  properties={'key1': 'value1',
                                              'key2': 'value2'})
@@ -68,9 +86,10 @@
 
     @test.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
     def test_register_http_image(self):
+        container_format, disk_format = get_container_and_disk_format()
         body = self.create_image(name='New Http Image',
-                                 container_format='bare',
-                                 disk_format='raw', is_public=False,
+                                 container_format=container_format,
+                                 disk_format=disk_format, is_public=False,
                                  copy_from=CONF.image.http_image)
         self.assertIn('id', body)
         image_id = body.get('id')
@@ -82,10 +101,11 @@
     @test.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
     def test_register_image_with_min_ram(self):
         # Register an image with min ram
+        container_format, disk_format = get_container_and_disk_format()
         properties = {'prop1': 'val1'}
         body = self.create_image(name='New_image_with_min_ram',
-                                 container_format='bare',
-                                 disk_format='raw',
+                                 container_format=container_format,
+                                 disk_format=disk_format,
                                  is_public=False,
                                  min_ram=40,
                                  properties=properties)
@@ -100,28 +120,54 @@
 
 
 class ListImagesTest(base.BaseV1ImageTest):
+    """Here we test the listing of image information"""
 
-    """
-    Here we test the listing of image information
-    """
+    @classmethod
+    def skip_checks(cls):
+        super(ListImagesTest, cls).skip_checks()
+        if (len(CONF.image.container_formats) < 2
+           or len(CONF.image.disk_formats) < 2):
+            skip_msg = ("%s skipped as multiple container formats "
+                        "or disk formats are not available." % cls.__name__)
+            raise cls.skipException(skip_msg)
 
     @classmethod
     def resource_setup(cls):
         super(ListImagesTest, cls).resource_setup()
         # We add a few images here to test the listing functionality of
         # the images API
-        img1 = cls._create_remote_image('one', 'bare', 'raw')
-        img2 = cls._create_remote_image('two', 'ami', 'ami')
-        img3 = cls._create_remote_image('dup', 'bare', 'raw')
-        img4 = cls._create_remote_image('dup', 'bare', 'raw')
-        img5 = cls._create_standard_image('1', 'ami', 'ami', 42)
-        img6 = cls._create_standard_image('2', 'ami', 'ami', 142)
-        img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
-        img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
+        a_formats = ['ami', 'ari', 'aki']
+
+        (cls.container_format,
+         cls.container_format_alt) = CONF.image.container_formats[:2]
+        cls.disk_format, cls.disk_format_alt = CONF.image.disk_formats[:2]
+        if cls.container_format in a_formats:
+            cls.disk_format = cls.container_format
+        if cls.container_format_alt in a_formats:
+            cls.disk_format_alt = cls.container_format_alt
+
+        img1 = cls._create_remote_image('one', cls.container_format,
+                                        cls.disk_format)
+        img2 = cls._create_remote_image('two', cls.container_format_alt,
+                                        cls.disk_format_alt)
+        img3 = cls._create_remote_image('dup', cls.container_format,
+                                        cls.disk_format)
+        img4 = cls._create_remote_image('dup', cls.container_format,
+                                        cls.disk_format)
+        img5 = cls._create_standard_image('1', cls.container_format_alt,
+                                          cls.disk_format_alt, 42)
+        img6 = cls._create_standard_image('2', cls.container_format_alt,
+                                          cls.disk_format_alt, 142)
+        img7 = cls._create_standard_image('33', cls.container_format,
+                                          cls.disk_format, 142)
+        img8 = cls._create_standard_image('33', cls.container_format,
+                                          cls.disk_format, 142)
         cls.created_set = set(cls.created_images)
-        # 5x bare, 3x ami
-        cls.bare_set = set((img1, img3, img4, img7, img8))
-        cls.ami_set = set((img2, img5, img6))
+        # same container format
+        cls.same_container_format_set = set((img1, img3, img4, img7, img8))
+        # same disk format
+        cls.same_disk_format_set = set((img2, img5, img6))
+
         # 1x with size 42
         cls.size42_set = set((img5,))
         # 3x with size 142
@@ -131,10 +177,8 @@
 
     @classmethod
     def _create_remote_image(cls, name, container_format, disk_format):
-        """
-        Create a new remote image and return the ID of the newly-registered
-        image
-        """
+        """Create a new remote image and return newly-registered image-id"""
+
         name = 'New Remote Image %s' % name
         location = CONF.image.http_image
         image = cls.create_image(name=name,
@@ -148,9 +192,9 @@
     @classmethod
     def _create_standard_image(cls, name, container_format,
                                disk_format, size):
-        """
-        Create a new standard image and return the ID of the newly-registered
-        image. Note that the size of the new image is a random number between
+        """Create a new standard image and return newly-registered image-id
+
+        Note that the size of the new image is a random number between
         1024 and 4096
         """
         image_file = moves.cStringIO(data_utils.random_bytes(size))
@@ -172,22 +216,25 @@
 
     @test.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
     def test_index_disk_format(self):
-        images_list = self.client.list_images(disk_format='ami')['images']
+        images_list = self.client.list_images(
+            disk_format=self.disk_format_alt)['images']
         for image in images_list:
-            self.assertEqual(image['disk_format'], 'ami')
+            self.assertEqual(image['disk_format'], self.disk_format_alt)
         result_set = set(map(lambda x: x['id'], images_list))
-        self.assertTrue(self.ami_set <= result_set)
-        self.assertFalse(self.created_set - self.ami_set <= result_set)
+        self.assertTrue(self.same_disk_format_set <= result_set)
+        self.assertFalse(self.created_set - self.same_disk_format_set
+                         <= result_set)
 
     @test.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
     def test_index_container_format(self):
-        images_list = (self.client.list_images(container_format='bare')
-                       ['images'])
+        images_list = self.client.list_images(
+            container_format=self.container_format)['images']
         for image in images_list:
-            self.assertEqual(image['container_format'], 'bare')
+            self.assertEqual(image['container_format'], self.container_format)
         result_set = set(map(lambda x: x['id'], images_list))
-        self.assertTrue(self.bare_set <= result_set)
-        self.assertFalse(self.created_set - self.bare_set <= result_set)
+        self.assertTrue(self.same_container_format_set <= result_set)
+        self.assertFalse(self.created_set - self.same_container_format_set
+                         <= result_set)
 
     @test.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
     def test_index_max_size(self):
@@ -236,15 +283,15 @@
     @classmethod
     def resource_setup(cls):
         super(UpdateImageMetaTest, cls).resource_setup()
-        cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
+        container_format, disk_format = get_container_and_disk_format()
+        cls.image_id = cls._create_standard_image('1', container_format,
+                                                  disk_format, 42)
 
     @classmethod
     def _create_standard_image(cls, name, container_format,
                                disk_format, size):
-        """
-        Create a new standard image and return the ID of the newly-registered
-        image.
-        """
+        """Create a new standard image and return newly-registered image-id"""
+
         image_file = moves.cStringIO(data_utils.random_bytes(size))
         name = 'New Standard Image %s' % name
         image = cls.create_image(name=name,
diff --git a/tempest/api/image/v1/test_images_negative.py b/tempest/api/image/v1/test_images_negative.py
index 3d94408..f16b80e 100644
--- a/tempest/api/image/v1/test_images_negative.py
+++ b/tempest/api/image/v1/test_images_negative.py
@@ -27,13 +27,17 @@
     def test_register_with_invalid_container_format(self):
         # Negative tests for invalid data supplied to POST /images
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          'test', 'wrong', 'vhd')
+                          name='test',
+                          container_format='wrong',
+                          disk_format='vhd',)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('993face5-921d-4e84-aabf-c1bba4234a67')
     def test_register_with_invalid_disk_format(self):
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          'test', 'bare', 'wrong')
+                          name='test',
+                          container_format='bare',
+                          disk_format='wrong',)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('bb016f15-0820-4f27-a92d-09b2f67d2488')
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index bacf211..04582c6 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -29,17 +29,15 @@
 
 
 class BasicOperationsImagesTest(base.BaseV2ImageTest):
-    """
-    Here we test the basic operations of images
-    """
+    """Here we test the basic operations of images"""
 
     @test.attr(type='smoke')
     @test.idempotent_id('139b765e-7f3d-4b3d-8b37-3ca3876ee318')
     def test_register_upload_get_image_file(self):
+        """Here we test these functionalities
 
-        """
-        Here we test these functionalities - Register image,
-        upload the image file, get image and get image file api's
+        Register image, upload the image file, get image and get image
+        file api's
         """
 
         uuid = '00000000-1111-2222-3333-444455556666'
@@ -74,7 +72,7 @@
         self.assertEqual(1024, body.get('size'))
 
         # Now try get image file
-        body = self.client.load_image_file(image_id)
+        body = self.client.show_image_file(image_id)
         self.assertEqual(file_content, body.data)
 
     @test.attr(type='smoke')
@@ -135,9 +133,7 @@
 
 
 class ListImagesTest(base.BaseV2ImageTest):
-    """
-    Here we test the listing of image information
-    """
+    """Here we test the listing of image information"""
 
     @classmethod
     def resource_setup(cls):
@@ -151,16 +147,16 @@
                      for disk_fmt in disk_fmts]
 
         for (container_fmt, disk_fmt) in all_pairs[:6]:
-            LOG.debug("Creating a image"
+            LOG.debug("Creating an image"
                       "(Container format: %s, Disk format: %s).",
                       container_fmt, disk_fmt)
             cls._create_standard_image(container_fmt, disk_fmt)
 
     @classmethod
     def _create_standard_image(cls, container_format, disk_format):
-        """
-        Create a new standard image and return the ID of the newly-registered
-        image. Note that the size of the new image is a random number between
+        """Create a new standard image and return the newly-registered image-id
+
+        Note that the size of the new image is a random number between
         1024 and 4096
         """
         size = random.randint(1024, 4096)
@@ -176,9 +172,8 @@
         return image_id
 
     def _list_by_param_value_and_assert(self, params):
-        """
-        Perform list action with given params and validates result.
-        """
+        """Perform list action with given params and validates result."""
+
         images_list = self.client.list_images(params=params)['images']
         # Validating params of fetched images
         for image in images_list:
diff --git a/tempest/api/image/v2/test_images_member.py b/tempest/api/image/v2/test_images_member.py
index d89803d..bb73318 100644
--- a/tempest/api/image/v2/test_images_member.py
+++ b/tempest/api/image/v2/test_images_member.py
@@ -19,15 +19,15 @@
     @test.idempotent_id('5934c6ea-27dc-4d6e-9421-eeb5e045494a')
     def test_image_share_accept(self):
         image_id = self._create_image()
-        member = self.os_img_client.add_image_member(image_id,
-                                                     self.alt_tenant_id)
+        member = self.os_img_client.create_image_member(
+            image_id, member=self.alt_tenant_id)
         self.assertEqual(member['member_id'], self.alt_tenant_id)
         self.assertEqual(member['image_id'], image_id)
         self.assertEqual(member['status'], 'pending')
         self.assertNotIn(image_id, self._list_image_ids_as_alt())
         self.alt_img_client.update_image_member(image_id,
                                                 self.alt_tenant_id,
-                                                {'status': 'accepted'})
+                                                status='accepted')
         self.assertIn(image_id, self._list_image_ids_as_alt())
         body = self.os_img_client.list_image_members(image_id)
         members = body['members']
@@ -40,25 +40,25 @@
     @test.idempotent_id('d9e83e5f-3524-4b38-a900-22abcb26e90e')
     def test_image_share_reject(self):
         image_id = self._create_image()
-        member = self.os_img_client.add_image_member(image_id,
-                                                     self.alt_tenant_id)
+        member = self.os_img_client.create_image_member(
+            image_id, member=self.alt_tenant_id)
         self.assertEqual(member['member_id'], self.alt_tenant_id)
         self.assertEqual(member['image_id'], image_id)
         self.assertEqual(member['status'], 'pending')
         self.assertNotIn(image_id, self._list_image_ids_as_alt())
         self.alt_img_client.update_image_member(image_id,
                                                 self.alt_tenant_id,
-                                                {'status': 'rejected'})
+                                                status='rejected')
         self.assertNotIn(image_id, self._list_image_ids_as_alt())
 
     @test.idempotent_id('a6ee18b9-4378-465e-9ad9-9a6de58a3287')
     def test_get_image_member(self):
         image_id = self._create_image()
-        self.os_img_client.add_image_member(image_id,
-                                            self.alt_tenant_id)
+        self.os_img_client.create_image_member(
+            image_id, member=self.alt_tenant_id)
         self.alt_img_client.update_image_member(image_id,
                                                 self.alt_tenant_id,
-                                                {'status': 'accepted'})
+                                                status='accepted')
 
         self.assertIn(image_id, self._list_image_ids_as_alt())
         member = self.os_img_client.show_image_member(image_id,
@@ -70,14 +70,14 @@
     @test.idempotent_id('72989bc7-2268-48ed-af22-8821e835c914')
     def test_remove_image_member(self):
         image_id = self._create_image()
-        self.os_img_client.add_image_member(image_id,
-                                            self.alt_tenant_id)
+        self.os_img_client.create_image_member(
+            image_id, member=self.alt_tenant_id)
         self.alt_img_client.update_image_member(image_id,
                                                 self.alt_tenant_id,
-                                                {'status': 'accepted'})
+                                                status='accepted')
 
         self.assertIn(image_id, self._list_image_ids_as_alt())
-        self.os_img_client.remove_image_member(image_id, self.alt_tenant_id)
+        self.os_img_client.delete_image_member(image_id, self.alt_tenant_id)
         self.assertNotIn(image_id, self._list_image_ids_as_alt())
 
     @test.idempotent_id('634dcc3f-f6e2-4409-b8fd-354a0bb25d83')
@@ -93,15 +93,15 @@
     @test.idempotent_id('cb961424-3f68-4d21-8e36-30ad66fb6bfb')
     def test_get_private_image(self):
         image_id = self._create_image()
-        member = self.os_img_client.add_image_member(image_id,
-                                                     self.alt_tenant_id)
+        member = self.os_img_client.create_image_member(
+            image_id, member=self.alt_tenant_id)
         self.assertEqual(member['member_id'], self.alt_tenant_id)
         self.assertEqual(member['image_id'], image_id)
         self.assertEqual(member['status'], 'pending')
         self.assertNotIn(image_id, self._list_image_ids_as_alt())
         self.alt_img_client.update_image_member(image_id,
                                                 self.alt_tenant_id,
-                                                {'status': 'accepted'})
+                                                status='accepted')
         self.assertIn(image_id, self._list_image_ids_as_alt())
-        self.os_img_client.remove_image_member(image_id, self.alt_tenant_id)
+        self.os_img_client.delete_image_member(image_id, self.alt_tenant_id)
         self.assertNotIn(image_id, self._list_image_ids_as_alt())
diff --git a/tempest/api/image/v2/test_images_member_negative.py b/tempest/api/image/v2/test_images_member_negative.py
index ae8913c..eb90719 100644
--- a/tempest/api/image/v2/test_images_member_negative.py
+++ b/tempest/api/image/v2/test_images_member_negative.py
@@ -22,23 +22,23 @@
     @test.idempotent_id('b79efb37-820d-4cf0-b54c-308b00cf842c')
     def test_image_share_invalid_status(self):
         image_id = self._create_image()
-        member = self.os_img_client.add_image_member(image_id,
-                                                     self.alt_tenant_id)
+        member = self.os_img_client.create_image_member(
+            image_id, member=self.alt_tenant_id)
         self.assertEqual(member['status'], 'pending')
         self.assertRaises(lib_exc.BadRequest,
                           self.alt_img_client.update_image_member,
                           image_id, self.alt_tenant_id,
-                          {'status': 'notavalidstatus'})
+                          status='notavalidstatus')
 
     @test.attr(type=['negative'])
     @test.idempotent_id('27002f74-109e-4a37-acd0-f91cd4597967')
     def test_image_share_owner_cannot_accept(self):
         image_id = self._create_image()
-        member = self.os_img_client.add_image_member(image_id,
-                                                     self.alt_tenant_id)
+        member = self.os_img_client.create_image_member(
+            image_id, member=self.alt_tenant_id)
         self.assertEqual(member['status'], 'pending')
         self.assertNotIn(image_id, self._list_image_ids_as_alt())
         self.assertRaises(lib_exc.Forbidden,
                           self.os_img_client.update_image_member,
-                          image_id, self.alt_tenant_id, {'status': 'accepted'})
+                          image_id, self.alt_tenant_id, status='accepted')
         self.assertNotIn(image_id, self._list_image_ids_as_alt())
diff --git a/tempest/api/image/v2/test_images_metadefs_namespaces.py b/tempest/api/image/v2/test_images_metadefs_namespaces.py
index 21247b1..efb7b8b 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespaces.py
+++ b/tempest/api/image/v2/test_images_metadefs_namespaces.py
@@ -20,9 +20,8 @@
 
 
 class MetadataNamespacesTest(base.BaseV2ImageTest):
-    """
-    Here we will test the Metadata definition Namespaces basic functionality.
-    """
+    """Test the Metadata definition Namespaces basic functionality"""
+
     @test.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
     def test_basic_metadata_definition_namespaces(self):
         # get the available resource types and use one resource_type
@@ -30,42 +29,42 @@
         resource_name = body['resource_types'][0]['name']
         name = [{'name': resource_name}]
         namespace_name = data_utils.rand_name('namespace')
-        # create the metadef namespaces
-        body = self.client.create_namespaces(namespace=namespace_name,
-                                             visibility='public',
-                                             description='Tempest',
-                                             display_name=namespace_name,
-                                             resource_type_associations=name,
-                                             protected=True)
+        # create the metadef namespace
+        body = self.client.create_namespace(namespace=namespace_name,
+                                            visibility='public',
+                                            description='Tempest',
+                                            display_name=namespace_name,
+                                            resource_type_associations=name,
+                                            protected=True)
         self.addCleanup(self._cleanup_namespace, namespace_name)
-        # get namespaces details
-        body = self.client.show_namespaces(namespace_name)
+        # get namespace details
+        body = self.client.show_namespace(namespace_name)
         self.assertEqual(namespace_name, body['namespace'])
         self.assertEqual('public', body['visibility'])
         # unable to delete protected namespace
-        self.assertRaises(lib_exc.Forbidden, self.client.delete_namespaces,
+        self.assertRaises(lib_exc.Forbidden, self.client.delete_namespace,
                           namespace_name)
         # update the visibility to private and protected to False
-        body = self.client.update_namespaces(namespace=namespace_name,
-                                             description='Tempest',
-                                             visibility='private',
-                                             display_name=namespace_name,
-                                             protected=False)
+        body = self.client.update_namespace(namespace=namespace_name,
+                                            description='Tempest',
+                                            visibility='private',
+                                            display_name=namespace_name,
+                                            protected=False)
         self.assertEqual('private', body['visibility'])
         self.assertEqual(False, body['protected'])
         # now able to delete the non-protected namespace
-        self.client.delete_namespaces(namespace_name)
+        self.client.delete_namespace(namespace_name)
 
     def _cleanup_namespace(self, namespace_name):
         # this is used to cleanup the resources
         try:
-            body = self.client.show_namespaces(namespace_name)
+            body = self.client.show_namespace(namespace_name)
             self.assertEqual(namespace_name, body['namespace'])
-            body = self.client.update_namespaces(namespace=namespace_name,
-                                                 description='Tempest',
-                                                 visibility='private',
-                                                 display_name=namespace_name,
-                                                 protected=False)
-            self.client.delete_namespaces(namespace_name)
+            body = self.client.update_namespace(namespace=namespace_name,
+                                                description='Tempest',
+                                                visibility='private',
+                                                display_name=namespace_name,
+                                                protected=False)
+            self.client.delete_namespace(namespace_name)
         except lib_exc.NotFound:
             pass
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index c5c5e8b..485942e 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -24,8 +24,7 @@
 
 class ImagesNegativeTest(base.BaseV2ImageTest):
 
-    """
-    here we have -ve tests for show_image and delete_image api
+    """here we have -ve tests for show_image and delete_image api
 
     Tests
         ** get non-existent image
@@ -91,10 +90,12 @@
     def test_register_with_invalid_container_format(self):
         # Negative tests for invalid data supplied to POST /images
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          'test', 'wrong', 'vhd')
+                          name='test', container_format='wrong',
+                          disk_format='vhd')
 
     @test.attr(type=['negative'])
     @test.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
     def test_register_with_invalid_disk_format(self):
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          'test', 'bare', 'wrong')
+                          name='test', container_format='bare',
+                          disk_format='wrong')
diff --git a/tempest/api/messaging/base.py b/tempest/api/messaging/base.py
index 64a7fd5..a324c37 100644
--- a/tempest/api/messaging/base.py
+++ b/tempest/api/messaging/base.py
@@ -13,21 +13,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from oslo_log import log as logging
-
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import test
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class BaseMessagingTest(test.BaseTestCase):
 
-    """
-    Base class for the Messaging tests that use the Tempest Zaqar REST client
+    """Base class for the Messaging (Zaqar) tests
 
     It is assumed that the following option is defined in the
     [service_available] section of etc/tempest.conf
@@ -147,7 +142,7 @@
     @classmethod
     def release_claim(cls, claim_uri):
         """Wrapper utility that deletes a claim."""
-        resp, body = cls.client.release_claim(claim_uri)
+        resp, body = cls.client.delete_claim(claim_uri)
 
         return resp, body
 
diff --git a/tempest/api/messaging/test_claims.py b/tempest/api/messaging/test_claims.py
index e54bed1..99edde1 100644
--- a/tempest/api/messaging/test_claims.py
+++ b/tempest/api/messaging/test_claims.py
@@ -13,8 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
-
 from six.moves.urllib import parse as urlparse
 from tempest_lib import decorators
 
@@ -24,7 +22,6 @@
 from tempest import test
 
 
-LOG = logging.getLogger(__name__)
 CONF = config.CONF
 
 
@@ -115,7 +112,7 @@
         claim_uri = resp['location']
 
         # Release Claim
-        self.client.release_claim(claim_uri)
+        self.client.delete_claim(claim_uri)
 
         # Delete Claimed message
         # This will implicitly verify that the claim is deleted.
diff --git a/tempest/api/messaging/test_messages.py b/tempest/api/messaging/test_messages.py
index efbbf56..7f4182a 100644
--- a/tempest/api/messaging/test_messages.py
+++ b/tempest/api/messaging/test_messages.py
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
 
 from tempest.api.messaging import base
 from tempest.common.utils import data_utils
@@ -21,7 +20,6 @@
 from tempest import test
 
 
-LOG = logging.getLogger(__name__)
 CONF = config.CONF
 
 
diff --git a/tempest/api/messaging/test_queues.py b/tempest/api/messaging/test_queues.py
index df49663..dcb5450 100644
--- a/tempest/api/messaging/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import logging
 
 from six import moves
 from tempest_lib import exceptions as lib_exc
@@ -24,9 +23,6 @@
 from tempest import test
 
 
-LOG = logging.getLogger(__name__)
-
-
 class TestQueues(base.BaseMessagingTest):
 
     @test.attr(type='smoke')
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index 128398b..61f8e15 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -29,13 +29,13 @@
     @classmethod
     def resource_setup(cls):
         super(AgentManagementTestJSON, cls).resource_setup()
-        body = cls.admin_client.list_agents()
+        body = cls.admin_agents_client.list_agents()
         agents = body['agents']
         cls.agent = agents[0]
 
     @test.idempotent_id('9c80f04d-11f3-44a4-8738-ed2f879b0ff4')
     def test_list_agent(self):
-        body = self.admin_client.list_agents()
+        body = self.admin_agents_client.list_agents()
         agents = body['agents']
         # Hearthbeats must be excluded from comparison
         self.agent.pop('heartbeat_timestamp', None)
@@ -47,12 +47,12 @@
 
     @test.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
     def test_list_agents_non_admin(self):
-        body = self.client.list_agents()
+        body = self.agents_client.list_agents()
         self.assertEqual(len(body["agents"]), 0)
 
     @test.idempotent_id('869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f')
     def test_show_agent(self):
-        body = self.admin_client.show_agent(self.agent['id'])
+        body = self.admin_agents_client.show_agent(self.agent['id'])
         agent = body['agent']
         self.assertEqual(agent['id'], self.agent['id'])
 
@@ -62,8 +62,8 @@
         # Try to update the 'admin_state_up' to the original
         # one to avoid the negative effect.
         agent_status = {'admin_state_up': origin_status}
-        body = self.admin_client.update_agent(agent_id=self.agent['id'],
-                                              agent_info=agent_status)
+        body = self.admin_agents_client.update_agent(agent_id=self.agent['id'],
+                                                     agent=agent_status)
         updated_status = body['agent']['admin_state_up']
         self.assertEqual(origin_status, updated_status)
 
@@ -72,17 +72,16 @@
         self.useFixture(fixtures.LockFixture('agent_description'))
         description = 'description for update agent.'
         agent_description = {'description': description}
-        body = self.admin_client.update_agent(agent_id=self.agent['id'],
-                                              agent_info=agent_description)
+        body = self.admin_agents_client.update_agent(agent_id=self.agent['id'],
+                                                     agent=agent_description)
         self.addCleanup(self._restore_agent)
         updated_description = body['agent']['description']
         self.assertEqual(updated_description, description)
 
     def _restore_agent(self):
-        """
-        Restore the agent description after update test.
-        """
+        """Restore the agent description after update test"""
+
         description = self.agent['description'] or ''
         origin_agent = {'description': description}
-        self.admin_client.update_agent(agent_id=self.agent['id'],
-                                       agent_info=origin_agent)
+        self.admin_agents_client.update_agent(agent_id=self.agent['id'],
+                                              agent=origin_agent)
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 86b4973..fcb6fce 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -51,7 +51,7 @@
 
     def _check_network_in_dhcp_agent(self, network_id, agent):
         network_ids = []
-        body = self.admin_client.list_networks_hosted_by_one_dhcp_agent(
+        body = self.admin_agents_client.list_networks_hosted_by_one_dhcp_agent(
             agent['id'])
         networks = body['networks']
         for network in networks:
@@ -61,11 +61,11 @@
     @test.idempotent_id('a0856713-6549-470c-a656-e97c8df9a14d')
     def test_add_remove_network_from_dhcp_agent(self):
         # The agent is now bound to the network, we can free the port
-        self.client.delete_port(self.port['id'])
+        self.ports_client.delete_port(self.port['id'])
         self.ports.remove(self.port)
         agent = dict()
         agent['agent_type'] = None
-        body = self.admin_client.list_agents()
+        body = self.admin_agents_client.list_agents()
         agents = body['agents']
         for a in agents:
             if a['agent_type'] == 'DHCP agent':
@@ -84,14 +84,14 @@
             self._remove_network_from_dhcp_agent(network_id, agent)
 
     def _remove_network_from_dhcp_agent(self, network_id, agent):
-        self.admin_client.remove_network_from_dhcp_agent(
+        self.admin_agents_client.delete_network_from_dhcp_agent(
             agent_id=agent['id'],
             network_id=network_id)
         self.assertFalse(self._check_network_in_dhcp_agent(
             network_id, agent))
 
     def _add_dhcp_agent_to_network(self, network_id, agent):
-        self.admin_client.add_dhcp_agent_to_network(agent['id'],
-                                                    network_id)
+        self.admin_agents_client.add_dhcp_agent_to_network(
+            agent['id'], network_id=network_id)
         self.assertTrue(self._check_network_in_dhcp_agent(
             network_id, agent))
diff --git a/tempest/api/network/admin/test_external_network_extension.py b/tempest/api/network/admin/test_external_network_extension.py
index ac53587..a32bfbc 100644
--- a/tempest/api/network/admin/test_external_network_extension.py
+++ b/tempest/api/network/admin/test_external_network_extension.py
@@ -91,12 +91,9 @@
 
     @test.idempotent_id('82068503-2cf2-4ed4-b3be-ecb89432e4bb')
     def test_delete_external_networks_with_floating_ip(self):
-        """Verifies external network can be deleted while still holding
-        (unassociated) floating IPs
+        # Verifies external network can be deleted while still holding
+        # (unassociated) floating IPs
 
-        """
-        # Set cls.client to admin to use base.create_subnet()
-        client = self.admin_client
         body = self.admin_networks_client.create_network(
             **{'router:external': True})
         external_network = body['network']
@@ -106,19 +103,19 @@
         subnet = self.create_subnet(
             external_network, client=self.admin_subnets_client,
             enable_dhcp=False)
-        body = client.create_floatingip(
+        body = self.admin_floating_ips_client.create_floatingip(
             floating_network_id=external_network['id'])
         created_floating_ip = body['floatingip']
         self.addCleanup(self._try_delete_resource,
-                        client.delete_floatingip,
+                        self.admin_floating_ips_client.delete_floatingip,
                         created_floating_ip['id'])
-        floatingip_list = client.list_floatingips(
+        floatingip_list = self.admin_floating_ips_client.list_floatingips(
             network=external_network['id'])
         self.assertIn(created_floating_ip['id'],
                       (f['id'] for f in floatingip_list['floatingips']))
         self.admin_networks_client.delete_network(external_network['id'])
         # Verifies floating ip is deleted
-        floatingip_list = client.list_floatingips()
+        floatingip_list = self.admin_floating_ips_client.list_floatingips()
         self.assertNotIn(created_floating_ip['id'],
                          (f['id'] for f in floatingip_list['floatingips']))
         # Verifies subnet is deleted
diff --git a/tempest/api/network/admin/test_external_networks_negative.py b/tempest/api/network/admin/test_external_networks_negative.py
index c2fa0dd..d031108 100644
--- a/tempest/api/network/admin/test_external_networks_negative.py
+++ b/tempest/api/network/admin/test_external_networks_negative.py
@@ -27,19 +27,16 @@
     @test.attr(type=['negative'])
     @test.idempotent_id('d402ae6c-0be0-4d8e-833b-a738895d98d0')
     def test_create_port_with_precreated_floatingip_as_fixed_ip(self):
-        """
-        External networks can be used to create both floating-ip as well
-        as instance-ip. So, creating an instance-ip with a value of a
-        pre-created floating-ip should be denied.
-        """
+        # NOTE: External networks can be used to create both floating-ip as
+        # well as instance-ip. So, creating an instance-ip with a value of a
+        # pre-created floating-ip should be denied.
 
         # create a floating ip
-        client = self.admin_client
-        body = client.create_floatingip(
+        body = self.admin_floating_ips_client.create_floatingip(
             floating_network_id=CONF.network.public_network_id)
         created_floating_ip = body['floatingip']
         self.addCleanup(self._try_delete_resource,
-                        client.delete_floatingip,
+                        self.admin_floating_ips_client.delete_floatingip,
                         created_floating_ip['id'])
         floating_ip_address = created_floating_ip['floating_ip_address']
         self.assertIsNotNone(floating_ip_address)
@@ -49,6 +46,6 @@
 
         # create a port which will internally create an instance-ip
         self.assertRaises(lib_exc.Conflict,
-                          client.create_port,
+                          self.admin_ports_client.create_port,
                           network_id=CONF.network.public_network_id,
                           fixed_ips=fixed_ips)
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index dfe7307..6ad374b 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -29,6 +29,7 @@
     def setup_clients(cls):
         super(FloatingIPAdminTestJSON, cls).setup_clients()
         cls.alt_client = cls.alt_manager.network_client
+        cls.alt_floating_ips_client = cls.alt_manager.floating_ips_client
 
     @classmethod
     def resource_setup(cls):
@@ -45,18 +46,18 @@
     @test.idempotent_id('64f2100b-5471-4ded-b46c-ddeeeb4f231b')
     def test_list_floating_ips_from_admin_and_nonadmin(self):
         # Create floating ip from admin user
-        floating_ip_admin = self.admin_client.create_floatingip(
+        floating_ip_admin = self.admin_floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id)
-        self.addCleanup(self.admin_client.delete_floatingip,
+        self.addCleanup(self.admin_floating_ips_client.delete_floatingip,
                         floating_ip_admin['floatingip']['id'])
         # Create floating ip from alt user
-        body = self.alt_client.create_floatingip(
+        body = self.alt_floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id)
         floating_ip_alt = body['floatingip']
-        self.addCleanup(self.alt_client.delete_floatingip,
+        self.addCleanup(self.alt_floating_ips_client.delete_floatingip,
                         floating_ip_alt['id'])
         # List floating ips from admin
-        body = self.admin_client.list_floatingips()
+        body = self.admin_floating_ips_client.list_floatingips()
         floating_ip_ids_admin = [f['id'] for f in body['floatingips']]
         # Check that admin sees all floating ips
         self.assertIn(self.floating_ip['id'], floating_ip_ids_admin)
@@ -64,7 +65,7 @@
                       floating_ip_ids_admin)
         self.assertIn(floating_ip_alt['id'], floating_ip_ids_admin)
         # List floating ips from nonadmin
-        body = self.client.list_floatingips()
+        body = self.floating_ips_client.list_floatingips()
         floating_ip_ids = [f['id'] for f in body['floatingips']]
         # Check that nonadmin user doesn't see floating ip created from admin
         # and floating ip that is created in another tenant (alt user)
@@ -76,12 +77,12 @@
     @test.idempotent_id('32727cc3-abe2-4485-a16e-48f2d54c14f2')
     def test_create_list_show_floating_ip_with_tenant_id_by_admin(self):
         # Creates a floating IP
-        body = self.admin_client.create_floatingip(
+        body = self.admin_floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
             tenant_id=self.network['tenant_id'],
             port_id=self.port['id'])
         created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
+        self.addCleanup(self.floating_ips_client.delete_floatingip,
                         created_floating_ip['id'])
         self.assertIsNotNone(created_floating_ip['id'])
         self.assertIsNotNone(created_floating_ip['tenant_id'])
@@ -93,7 +94,7 @@
         self.assertEqual(created_floating_ip['fixed_ip_address'],
                          port[0]['ip_address'])
         # Verifies the details of a floating_ip
-        floating_ip = self.admin_client.show_floatingip(
+        floating_ip = self.admin_floating_ips_client.show_floatingip(
             created_floating_ip['id'])
         shown_floating_ip = floating_ip['floatingip']
         self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
@@ -105,6 +106,6 @@
                          created_floating_ip['floating_ip_address'])
         self.assertEqual(shown_floating_ip['port_id'], self.port['id'])
         # Verify the floating ip exists in the list of all floating_ips
-        floating_ips = self.admin_client.list_floatingips()
+        floating_ips = self.admin_floating_ips_client.list_floatingips()
         floatingip_id_list = [f['id'] for f in floating_ips['floatingips']]
         self.assertIn(created_floating_ip['id'], floatingip_id_list)
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index d5556b8..78d6aea 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -14,9 +14,11 @@
 
 from tempest.api.network import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
 AGENT_TYPE = 'L3 agent'
 AGENT_MODES = (
     'legacy',
@@ -51,7 +53,7 @@
     @classmethod
     def resource_setup(cls):
         super(L3AgentSchedulerTestJSON, cls).resource_setup()
-        body = cls.admin_client.list_agents()
+        body = cls.admin_agents_client.list_agents()
         agents = body['agents']
         for agent in agents:
             # TODO(armax): falling back on default _agent_mode can be
@@ -78,26 +80,39 @@
                 cls.network = cls.create_network()
                 cls.subnet = cls.create_subnet(cls.network)
                 cls.port = cls.create_port(cls.network)
-                cls.client.add_router_interface_with_port_id(
-                    cls.router['id'], cls.port['id'])
+                cls.client.add_router_interface(cls.router['id'],
+                                                port_id=cls.port['id'])
+                # NOTE: Sometimes we have seen this test fail with dvr in,
+                # multinode tests, since the dhcp port is not created before
+                # the test gets executed and so the router is not scheduled
+                # on the given agent. By adding the external gateway info to
+                # the router, the router should be properly scheduled in the
+                # dvr_snat node.
+                # This is a temporary work around to prevent a race condition.
+                external_gateway_info = {
+                    'network_id': CONF.network.public_network_id,
+                    'enable_snat': True}
+                cls.admin_client.update_router_with_snat_gw_info(
+                    cls.router['id'],
+                    external_gateway_info=external_gateway_info)
 
     @classmethod
     def resource_cleanup(cls):
         if cls.is_dvr_router:
-            cls.client.remove_router_interface_with_port_id(
-                cls.router['id'], cls.port['id'])
+            cls.client.remove_router_interface(cls.router['id'],
+                                               port_id=cls.port['id'])
         super(L3AgentSchedulerTestJSON, cls).resource_cleanup()
 
     @test.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a')
     def test_list_routers_on_l3_agent(self):
-        self.admin_client.list_routers_on_l3_agent(self.agent['id'])
+        self.admin_agents_client.list_routers_on_l3_agent(self.agent['id'])
 
     @test.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66')
     def test_add_list_remove_router_on_l3_agent(self):
         l3_agent_ids = list()
-        self.admin_client.add_router_to_l3_agent(
+        self.admin_agents_client.create_router_on_l3_agent(
             self.agent['id'],
-            self.router['id'])
+            router_id=self.router['id'])
         body = (
             self.admin_client.list_l3_agents_hosting_router(self.router['id']))
         for agent in body['agents']:
@@ -105,7 +120,7 @@
             self.assertIn('agent_type', agent)
             self.assertEqual('L3 agent', agent['agent_type'])
         self.assertIn(self.agent['id'], l3_agent_ids)
-        body = self.admin_client.remove_router_from_l3_agent(
+        body = self.admin_agents_client.delete_router_from_l3_agent(
             self.agent['id'],
             self.router['id'])
         # NOTE(afazekas): The deletion not asserted, because neutron
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
new file mode 100644
index 0000000..47da08c
--- /dev/null
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -0,0 +1,69 @@
+# Copyright 2015 Cloudwatt
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.network import base
+from tempest import test
+from tempest_lib import exceptions as lib_exc
+
+
+class QuotasNegativeTest(base.BaseAdminNetworkTest):
+    """Tests the following operations in the Neutron API:
+
+        set network quota and exceed this quota
+
+    v2.0 of the API is assumed.
+    It is also assumed that the per-tenant quota extension API is configured
+    in /etc/neutron/neutron.conf as follows:
+
+        quota_driver = neutron.db.quota_db.DbQuotaDriver
+    """
+    force_tenant_isolation = True
+
+    @classmethod
+    def skip_checks(cls):
+        super(QuotasNegativeTest, cls).skip_checks()
+        if not test.is_extension_enabled('quotas', 'network'):
+            msg = "quotas extension not enabled."
+            raise cls.skipException(msg)
+
+    @classmethod
+    def setup_clients(cls):
+        super(QuotasNegativeTest, cls).setup_clients()
+        cls.identity_admin_client = cls.os_adm.identity_client
+
+    @test.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
+    def test_network_quota_exceeding(self):
+        # Set the network quota to two
+        self.admin_quotas_client.update_quotas(self.networks_client.tenant_id,
+                                               network=2)
+        self.addCleanup(self.admin_quotas_client.reset_quotas,
+                        self.networks_client.tenant_id)
+
+        # Create two networks
+        n1 = self.networks_client.create_network()
+        self.addCleanup(self.networks_client.delete_network,
+                        n1['network']['id'])
+        n2 = self.networks_client.create_network()
+        self.addCleanup(self.networks_client.delete_network,
+                        n2['network']['id'])
+
+        # Try to create a third network while the quota is two
+        with self.assertRaisesRegexp(
+                lib_exc.Conflict,
+                "An object with that identifier already exists\\n" +
+                "Details.*Quota exceeded for resources: \['network'\].*"):
+            n3 = self.networks_client.create_network()
+            self.addCleanup(self.networks_client.delete_network,
+                            n3['network']['id'])
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index f5c5784..45d35cf 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -21,9 +21,7 @@
 
 
 class QuotasTest(base.BaseAdminNetworkTest):
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
+    """Tests the following operations in the Neutron API:
 
         list quotas for tenants who have non-default quota values
         show quotas for a specified tenant
@@ -59,14 +57,14 @@
         self.addCleanup(self.identity_utils.delete_project, project_id)
 
         # Change quotas for tenant
-        quota_set = self.admin_client.update_quotas(project_id,
-                                                    **new_quotas)['quota']
-        self.addCleanup(self.admin_client.reset_quotas, project_id)
+        quota_set = self.admin_quotas_client.update_quotas(
+            project_id, **new_quotas)['quota']
+        self.addCleanup(self.admin_quotas_client.reset_quotas, project_id)
         for key, value in six.iteritems(new_quotas):
             self.assertEqual(value, quota_set[key])
 
         # Confirm our tenant is listed among tenants with non default quotas
-        non_default_quotas = self.admin_client.list_quotas()
+        non_default_quotas = self.admin_quotas_client.list_quotas()
         found = False
         for qs in non_default_quotas['quotas']:
             if qs['tenant_id'] == project_id:
@@ -74,14 +72,14 @@
         self.assertTrue(found)
 
         # Confirm from API quotas were changed as requested for tenant
-        quota_set = self.admin_client.show_quotas(project_id)
+        quota_set = self.admin_quotas_client.show_quotas(project_id)
         quota_set = quota_set['quota']
         for key, value in six.iteritems(new_quotas):
             self.assertEqual(value, quota_set[key])
 
         # Reset quotas to default and confirm
-        self.admin_client.reset_quotas(project_id)
-        non_default_quotas = self.admin_client.list_quotas()
+        self.admin_quotas_client.reset_quotas(project_id)
+        non_default_quotas = self.admin_quotas_client.list_quotas()
         for q in non_default_quotas['quotas']:
             self.assertNotEqual(project_id, q['tenant_id'])
 
diff --git a/tempest/api/network/admin/test_routers_dvr.py b/tempest/api/network/admin/test_routers_dvr.py
index 365698d..3e787af 100644
--- a/tempest/api/network/admin/test_routers_dvr.py
+++ b/tempest/api/network/admin/test_routers_dvr.py
@@ -42,7 +42,8 @@
 
     @test.idempotent_id('08a2a0a8-f1e4-4b34-8e30-e522e836c44e')
     def test_distributed_router_creation(self):
-        """
+        """Test distributed router creation
+
         Test uses administrative credentials to creates a
         DVR (Distributed Virtual Routing) router using the
         distributed=True.
@@ -59,7 +60,8 @@
 
     @test.idempotent_id('8a0a72b4-7290-4677-afeb-b4ffe37bc352')
     def test_centralized_router_creation(self):
-        """
+        """Test centralized router creation
+
         Test uses administrative credentials to creates a
         CVR (Centralized Virtual Routing) router using the
         distributed=False.
@@ -77,10 +79,11 @@
 
     @test.idempotent_id('acd43596-c1fb-439d-ada8-31ad48ae3c2e')
     def test_centralized_router_update_to_dvr(self):
-        """
+        """Test centralized router update
+
         Test uses administrative credentials to creates a
         CVR (Centralized Virtual Routing) router using the
-        distributed=False.Then it will "update" the router
+        distributed=False. Then it will "update" the router
         distributed attribute to True
 
         Acceptance
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 17adfa5..f209f89 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -14,7 +14,6 @@
 #    under the License.
 
 import netaddr
-from oslo_log import log as logging
 from tempest_lib import exceptions as lib_exc
 
 from tempest.common.utils import data_utils
@@ -24,13 +23,9 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class BaseNetworkTest(tempest.test.BaseTestCase):
-
-    """
-    Base class for the Neutron tests that use the Tempest Neutron REST client
+    """Base class for the Neutron tests
 
     Per the Neutron API Guide, API v1.x was removed from the source code tree
     (docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html)
@@ -73,8 +68,17 @@
     def setup_clients(cls):
         super(BaseNetworkTest, cls).setup_clients()
         cls.client = cls.os.network_client
+        cls.agents_client = cls.os.network_agents_client
+        cls.network_extensions_client = cls.os.network_extensions_client
         cls.networks_client = cls.os.networks_client
+        cls.subnetpools_client = cls.os.subnetpools_client
         cls.subnets_client = cls.os.subnets_client
+        cls.ports_client = cls.os.ports_client
+        cls.quotas_client = cls.os.network_quotas_client
+        cls.floating_ips_client = cls.os.floating_ips_client
+        cls.security_groups_client = cls.os.security_groups_client
+        cls.security_group_rules_client = (
+            cls.os.security_group_rules_client)
 
     @classmethod
     def resource_setup(cls):
@@ -93,22 +97,26 @@
         if CONF.service_available.neutron:
             # Clean up floating IPs
             for floating_ip in cls.floating_ips:
-                cls._try_delete_resource(cls.client.delete_floatingip,
-                                         floating_ip['id'])
+                cls._try_delete_resource(
+                    cls.floating_ips_client.delete_floatingip,
+                    floating_ip['id'])
 
             # Clean up metering label rules
+            # Not all classes in the hierarchy have the client class variable
+            if len(cls.metering_label_rules) > 0:
+                label_rules_client = cls.admin_metering_label_rules_client
             for metering_label_rule in cls.metering_label_rules:
                 cls._try_delete_resource(
-                    cls.admin_client.delete_metering_label_rule,
+                    label_rules_client.delete_metering_label_rule,
                     metering_label_rule['id'])
             # Clean up metering labels
             for metering_label in cls.metering_labels:
                 cls._try_delete_resource(
-                    cls.admin_client.delete_metering_label,
+                    cls.admin_metering_labels_client.delete_metering_label,
                     metering_label['id'])
             # Clean up ports
             for port in cls.ports:
-                cls._try_delete_resource(cls.client.delete_port,
+                cls._try_delete_resource(cls.ports_client.delete_port,
                                          port['id'])
             # Clean up routers
             for router in cls.routers:
@@ -201,8 +209,8 @@
     @classmethod
     def create_port(cls, network, **kwargs):
         """Wrapper utility that returns a test port."""
-        body = cls.client.create_port(network_id=network['id'],
-                                      **kwargs)
+        body = cls.ports_client.create_port(network_id=network['id'],
+                                            **kwargs)
         port = body['port']
         cls.ports.append(port)
         return port
@@ -210,8 +218,8 @@
     @classmethod
     def update_port(cls, port, **kwargs):
         """Wrapper utility that updates a test port."""
-        body = cls.client.update_port(port['id'],
-                                      **kwargs)
+        body = cls.ports_client.update_port(port['id'],
+                                            **kwargs)
         return body['port']
 
     @classmethod
@@ -233,7 +241,7 @@
     @classmethod
     def create_floatingip(cls, external_network_id):
         """Wrapper utility that returns a test floating IP."""
-        body = cls.client.create_floatingip(
+        body = cls.floating_ips_client.create_floatingip(
             floating_network_id=external_network_id)
         fip = body['floatingip']
         cls.floating_ips.append(fip)
@@ -242,8 +250,8 @@
     @classmethod
     def create_router_interface(cls, router_id, subnet_id):
         """Wrapper utility that returns a router interface."""
-        interface = cls.client.add_router_interface_with_subnet_id(
-            router_id, subnet_id)
+        interface = cls.client.add_router_interface(router_id,
+                                                    subnet_id=subnet_id)
         return interface
 
     @classmethod
@@ -252,8 +260,9 @@
         interfaces = body['ports']
         for i in interfaces:
             try:
-                cls.client.remove_router_interface_with_subnet_id(
-                    router['id'], i['fixed_ips'][0]['subnet_id'])
+                cls.client.remove_router_interface(
+                    router['id'],
+                    subnet_id=i['fixed_ips'][0]['subnet_id'])
             except lib_exc.NotFound:
                 pass
         cls.client.delete_router(router['id'])
@@ -267,13 +276,20 @@
     def setup_clients(cls):
         super(BaseAdminNetworkTest, cls).setup_clients()
         cls.admin_client = cls.os_adm.network_client
+        cls.admin_agents_client = cls.os_adm.network_agents_client
         cls.admin_networks_client = cls.os_adm.networks_client
         cls.admin_subnets_client = cls.os_adm.subnets_client
+        cls.admin_ports_client = cls.os_adm.ports_client
+        cls.admin_quotas_client = cls.os_adm.network_quotas_client
+        cls.admin_floating_ips_client = cls.os_adm.floating_ips_client
+        cls.admin_metering_labels_client = cls.os_adm.metering_labels_client
+        cls.admin_metering_label_rules_client = (
+            cls.os_adm.metering_label_rules_client)
 
     @classmethod
     def create_metering_label(cls, name, description):
         """Wrapper utility that returns a test metering label."""
-        body = cls.admin_client.create_metering_label(
+        body = cls.admin_metering_labels_client.create_metering_label(
             description=description,
             name=data_utils.rand_name("metering-label"))
         metering_label = body['metering_label']
@@ -284,7 +300,8 @@
     def create_metering_label_rule(cls, remote_ip_prefix, direction,
                                    metering_label_id):
         """Wrapper utility that returns a test metering label rule."""
-        body = cls.admin_client.create_metering_label_rule(
+        client = cls.admin_metering_label_rules_client
+        body = client.create_metering_label_rule(
             remote_ip_prefix=remote_ip_prefix, direction=direction,
             metering_label_id=metering_label_id)
         metering_label_rule = body['metering_label_rule']
diff --git a/tempest/api/network/base_routers.py b/tempest/api/network/base_routers.py
index 739e6f9..3495b76f 100644
--- a/tempest/api/network/base_routers.py
+++ b/tempest/api/network/base_routers.py
@@ -45,19 +45,19 @@
         self.assertNotIn(router_id, routers_list)
 
     def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
-        interface = self.client.add_router_interface_with_subnet_id(
-            router_id, subnet_id)
+        interface = self.client.add_router_interface(router_id,
+                                                     subnet_id=subnet_id)
         self.addCleanup(self._remove_router_interface_with_subnet_id,
                         router_id, subnet_id)
         self.assertEqual(subnet_id, interface['subnet_id'])
         return interface
 
     def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
-        body = self.client.remove_router_interface_with_subnet_id(
-            router_id, subnet_id)
+        body = self.client.remove_router_interface(router_id,
+                                                   subnet_id=subnet_id)
         self.assertEqual(subnet_id, body['subnet_id'])
 
     def _remove_router_interface_with_port_id(self, router_id, port_id):
-        body = self.client.remove_router_interface_with_port_id(router_id,
-                                                                port_id)
+        body = self.client.remove_router_interface(router_id,
+                                                   port_id=port_id)
         self.assertEqual(port_id, body['port_id'])
diff --git a/tempest/api/network/base_security_groups.py b/tempest/api/network/base_security_groups.py
index 1cef2cc..3ea3aea 100644
--- a/tempest/api/network/base_security_groups.py
+++ b/tempest/api/network/base_security_groups.py
@@ -22,27 +22,29 @@
     def _create_security_group(self):
         # Create a security group
         name = data_utils.rand_name('secgroup-')
-        group_create_body = self.client.create_security_group(name=name)
+        group_create_body = (
+            self.security_groups_client.create_security_group(name=name))
         self.addCleanup(self._delete_security_group,
                         group_create_body['security_group']['id'])
         self.assertEqual(group_create_body['security_group']['name'], name)
         return group_create_body, name
 
     def _delete_security_group(self, secgroup_id):
-        self.client.delete_security_group(secgroup_id)
+        self.security_groups_client.delete_security_group(secgroup_id)
         # Asserting that the security group is not found in the list
         # after deletion
-        list_body = self.client.list_security_groups()
+        list_body = self.security_groups_client.list_security_groups()
         secgroup_list = list()
         for secgroup in list_body['security_groups']:
             secgroup_list.append(secgroup['id'])
         self.assertNotIn(secgroup_id, secgroup_list)
 
     def _delete_security_group_rule(self, rule_id):
-        self.client.delete_security_group_rule(rule_id)
+        self.security_group_rules_client.delete_security_group_rule(rule_id)
         # Asserting that the security group is not found in the list
         # after deletion
-        list_body = self.client.list_security_group_rules()
+        list_body = (
+            self.security_group_rules_client.list_security_group_rules())
         rules_list = list()
         for rule in list_body['security_group_rules']:
             rules_list.append(rule['id'])
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index 5d7f00e..394aec1 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -23,9 +23,9 @@
 
 
 class AllowedAddressPairTestJSON(base.BaseNetworkTest):
-    """
-    Tests the Neutron Allowed Address Pair API extension using the Tempest
-    ReST client. The following API operations are tested with this extension:
+    """Tests the Neutron Allowed Address Pair API extension
+
+    The following API operations are tested with this extension:
 
         create port
         list ports
@@ -60,14 +60,14 @@
         # Create port with allowed address pair attribute
         allowed_address_pairs = [{'ip_address': self.ip_address,
                                   'mac_address': self.mac_address}]
-        body = self.client.create_port(
+        body = self.ports_client.create_port(
             network_id=self.network['id'],
             allowed_address_pairs=allowed_address_pairs)
         port_id = body['port']['id']
-        self.addCleanup(self.client.delete_port, port_id)
+        self.addCleanup(self.ports_client.delete_port, port_id)
 
         # Confirm port was created with allowed address pair attribute
-        body = self.client.list_ports()
+        body = self.ports_client.list_ports()
         ports = body['ports']
         port = [p for p in ports if p['id'] == port_id]
         msg = 'Created port not found in list of ports returned by Neutron'
@@ -76,9 +76,9 @@
 
     def _update_port_with_address(self, address, mac_address=None, **kwargs):
         # Create a port without allowed address pair
-        body = self.client.create_port(network_id=self.network['id'])
+        body = self.ports_client.create_port(network_id=self.network['id'])
         port_id = body['port']['id']
-        self.addCleanup(self.client.delete_port, port_id)
+        self.addCleanup(self.ports_client.delete_port, port_id)
         if mac_address is None:
             mac_address = self.mac_address
 
@@ -87,7 +87,7 @@
                                   'mac_address': mac_address}]
         if kwargs:
             allowed_address_pairs.append(kwargs['allowed_address_pairs'])
-        body = self.client.update_port(
+        body = self.ports_client.update_port(
             port_id, allowed_address_pairs=allowed_address_pairs)
         allowed_address_pair = body['port']['allowed_address_pairs']
         self.assertEqual(allowed_address_pair, allowed_address_pairs)
@@ -106,9 +106,9 @@
     @test.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
     def test_update_port_with_multiple_ip_mac_address_pair(self):
         # Create an ip _address and mac_address through port create
-        resp = self.client.create_port(network_id=self.network['id'])
+        resp = self.ports_client.create_port(network_id=self.network['id'])
         newportid = resp['port']['id']
-        self.addCleanup(self.client.delete_port, newportid)
+        self.addCleanup(self.ports_client.delete_port, newportid)
         ipaddress = resp['port']['fixed_ips'][0]['ip_address']
         macaddress = resp['port']['mac_address']
 
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index 631a38b..dbb0d14 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -63,17 +63,16 @@
         del things_list[index]
 
     def _clean_network(self):
-        body = self.client.list_ports()
+        body = self.ports_client.list_ports()
         ports = body['ports']
         for port in ports:
             if (port['device_owner'].startswith('network:router_interface')
                 and port['device_id'] in [r['id'] for r in self.routers]):
-                self.client.remove_router_interface_with_port_id(
-                    port['device_id'], port['id']
-                )
+                self.client.remove_router_interface(port['device_id'],
+                                                    port_id=port['id'])
             else:
                 if port['id'] in [p['id'] for p in self.ports]:
-                    self.client.delete_port(port['id'])
+                    self.ports_client.delete_port(port['id'])
                     self._remove_from_list_by_index(self.ports, port)
         body = self.subnets_client.list_subnets()
         subnets = body['subnets']
@@ -99,10 +98,9 @@
 
     @test.idempotent_id('e5517e62-6f16-430d-a672-f80875493d4c')
     def test_dhcpv6_stateless_eui64(self):
-        """When subnets configured with RAs SLAAC (AOM=100) and DHCP stateless
-        (AOM=110) both for radvd and dnsmasq, port shall receive IP address
-        calculated from its MAC.
-        """
+        # NOTE: When subnets configured with RAs SLAAC (AOM=100) and DHCP
+        # stateless (AOM=110) both for radvd and dnsmasq, port shall receive
+        # IP address calculated from its MAC.
         for ra_mode, add_mode in (
                 ('slaac', 'slaac'),
                 ('dhcpv6-stateless', 'dhcpv6-stateless'),
@@ -118,17 +116,16 @@
 
     @test.idempotent_id('ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832')
     def test_dhcpv6_stateless_no_ra(self):
-        """When subnets configured with dnsmasq SLAAC and DHCP stateless
-        and there is no radvd, port shall receive IP address calculated
-        from its MAC and mask of subnet.
-        """
+        # NOTE: When subnets configured with dnsmasq SLAAC and DHCP stateless
+        # and there is no radvd, port shall receive IP address calculated
+        # from its MAC and mask of subnet.
         for ra_mode, add_mode in (
                 (None, 'slaac'),
                 (None, 'dhcpv6-stateless'),
         ):
             kwargs = {'ipv6_ra_mode': ra_mode,
                       'ipv6_address_mode': add_mode}
-            kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
+            kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
             real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
             self._clean_network()
             self.assertEqual(eui_ip, real_ip,
@@ -158,9 +155,8 @@
 
     @test.idempotent_id('21635b6f-165a-4d42-bf49-7d195e47342f')
     def test_dhcpv6_stateless_no_ra_no_dhcp(self):
-        """If no radvd option and no dnsmasq option is configured
-        port shall receive IP from fixed IPs list of subnet.
-        """
+        # NOTE: If no radvd option and no dnsmasq option is configured
+        # port shall receive IP from fixed IPs list of subnet.
         real_ip, eui_ip = self._get_ips_from_subnet()
         self._clean_network()
         self.assertNotEqual(eui_ip, real_ip,
@@ -171,11 +167,10 @@
 
     @test.idempotent_id('4544adf7-bb5f-4bdc-b769-b3e77026cef2')
     def test_dhcpv6_two_subnets(self):
-        """When one IPv6 subnet configured with dnsmasq SLAAC or DHCP stateless
-        and other IPv6 is with DHCP stateful, port shall receive EUI-64 IP
-        addresses from first subnet and DHCP address from second one.
-        Order of subnet creating should be unimportant.
-        """
+        # NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
+        # stateless and other IPv6 is with DHCP stateful, port shall receive
+        # EUI-64 IP addresses from first subnet and DHCP address from second
+        # one. Order of subnet creating should be unimportant.
         for order in ("slaac_first", "dhcp_first"):
             for ra_mode, add_mode in (
                     ('slaac', 'slaac'),
@@ -203,9 +198,9 @@
                 real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
                                              for sub in [subnet_dhcp,
                                              subnet_slaac]]
-                self.client.delete_port(port['id'])
+                self.ports_client.delete_port(port['id'])
                 self.ports.pop()
-                body = self.client.list_ports()
+                body = self.ports_client.list_ports()
                 ports_id_list = [i['id'] for i in body['ports']]
                 self.assertNotIn(port['id'], ports_id_list)
                 self._clean_network()
@@ -221,11 +216,10 @@
 
     @test.idempotent_id('4256c61d-c538-41ea-9147-3c450c36669e')
     def test_dhcpv6_64_subnets(self):
-        """When one IPv6 subnet configured with dnsmasq SLAAC or DHCP stateless
-        and other IPv4 is with DHCP of IPv4, port shall receive EUI-64 IP
-        addresses from first subnet and IPv4 DHCP address from second one.
-        Order of subnet creating should be unimportant.
-        """
+        # NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
+        # stateless and other IPv4 is with DHCP of IPv4, port shall receive
+        # EUI-64 IP addresses from first subnet and IPv4 DHCP address from
+        # second one. Order of subnet creating should be unimportant.
         for order in ("slaac_first", "dhcp_first"):
             for ra_mode, add_mode in (
                     ('slaac', 'slaac'),
@@ -265,9 +259,8 @@
 
     @test.idempotent_id('4ab211a0-276f-4552-9070-51e27f58fecf')
     def test_dhcp_stateful(self):
-        """With all options below, DHCPv6 shall allocate address
-        from subnet pool to port.
-        """
+        # NOTE: With all options below, DHCPv6 shall allocate address from
+        # subnet pool to port.
         for ra_mode, add_mode in (
                 ('dhcpv6-stateful', 'dhcpv6-stateful'),
                 ('dhcpv6-stateful', None),
@@ -275,7 +268,7 @@
         ):
             kwargs = {'ipv6_ra_mode': ra_mode,
                       'ipv6_address_mode': add_mode}
-            kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
+            kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
             subnet = self.create_subnet(self.network, **kwargs)
             port = self.create_port(self.network)
             port_ip = next(iter(port['fixed_ips']), None)['ip_address']
@@ -287,10 +280,9 @@
 
     @test.idempotent_id('51a5e97f-f02e-4e4e-9a17-a69811d300e3')
     def test_dhcp_stateful_fixedips(self):
-        """With all options below, port shall be able to get
-        requested IP from fixed IP range not depending on
-        DHCP stateful (not SLAAC!) settings configured.
-        """
+        # NOTE: With all options below, port shall be able to get
+        # requested IP from fixed IP range not depending on
+        # DHCP stateful (not SLAAC!) settings configured.
         for ra_mode, add_mode in (
                 ('dhcpv6-stateful', 'dhcpv6-stateful'),
                 ('dhcpv6-stateful', None),
@@ -298,7 +290,7 @@
         ):
             kwargs = {'ipv6_ra_mode': ra_mode,
                       'ipv6_address_mode': add_mode}
-            kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
+            kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
             subnet = self.create_subnet(self.network, **kwargs)
             ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
                                        subnet["allocation_pools"][0]["end"])
@@ -316,9 +308,8 @@
 
     @test.idempotent_id('98244d88-d990-4570-91d4-6b25d70d08af')
     def test_dhcp_stateful_fixedips_outrange(self):
-        """When port gets IP address from fixed IP range it
-        shall be checked if it's from subnets range.
-        """
+        # NOTE: When port gets IP address from fixed IP range it
+        # shall be checked if it's from subnets range.
         kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
                   'ipv6_address_mode': 'dhcpv6-stateful'}
         subnet = self.create_subnet(self.network, **kwargs)
@@ -334,9 +325,8 @@
 
     @test.idempotent_id('57b8302b-cba9-4fbb-8835-9168df029051')
     def test_dhcp_stateful_fixedips_duplicate(self):
-        """When port gets IP address from fixed IP range it
-        shall be checked if it's not duplicate.
-        """
+        # NOTE: When port gets IP address from fixed IP range it
+        # shall be checked if it's not duplicate.
         kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
                   'ipv6_address_mode': 'dhcpv6-stateful'}
         subnet = self.create_subnet(self.network, **kwargs)
@@ -362,21 +352,20 @@
             admin_state_up=True)
         port = self.create_router_interface(router['id'],
                                             subnet['id'])
-        body = self.client.show_port(port['port_id'])
+        body = self.ports_client.show_port(port['port_id'])
         return subnet, body['port']
 
     @test.idempotent_id('e98f65db-68f4-4330-9fea-abd8c5192d4d')
     def test_dhcp_stateful_router(self):
-        """With all options below the router interface shall
-        receive DHCPv6 IP address from allocation pool.
-        """
+        # NOTE: With all options below the router interface shall
+        # receive DHCPv6 IP address from allocation pool.
         for ra_mode, add_mode in (
                 ('dhcpv6-stateful', 'dhcpv6-stateful'),
                 ('dhcpv6-stateful', None),
         ):
             kwargs = {'ipv6_ra_mode': ra_mode,
                       'ipv6_address_mode': add_mode}
-            kwargs = {k: v for k, v in six.iteritems(kwargs) if v}
+            kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
             subnet, port = self._create_subnet_router(kwargs)
             port_ip = next(iter(port['fixed_ips']), None)['ip_address']
             self._clean_network()
diff --git a/tempest/api/network/test_extensions.py b/tempest/api/network/test_extensions.py
index d6b03eb..d71d600 100644
--- a/tempest/api/network/test_extensions.py
+++ b/tempest/api/network/test_extensions.py
@@ -19,10 +19,7 @@
 
 
 class ExtensionsTestJSON(base.BaseNetworkTest):
-
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
+    """Tests the following operations in the Neutron API:
 
         List all available extensions
 
@@ -44,14 +41,15 @@
         expected_alias = [ext for ext in expected_alias if
                           test.is_extension_enabled(ext, 'network')]
         actual_alias = list()
-        extensions = self.client.list_extensions()
+        extensions = self.network_extensions_client.list_extensions()
         list_extensions = extensions['extensions']
         # Show and verify the details of the available extensions
         for ext in list_extensions:
             ext_name = ext['name']
             ext_alias = ext['alias']
             actual_alias.append(ext['alias'])
-            ext_details = self.client.show_extension(ext_alias)
+            ext_details = self.network_extensions_client.show_extension(
+                ext_alias)
             ext_details = ext_details['extension']
 
             self.assertIsNotNone(ext_details)
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index 87e3413..062bc69 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -19,9 +19,7 @@
 
 
 class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
-    """
-    Tests the following operations with the Extra DHCP Options Neutron API
-    extension:
+    """Tests the following operations with the Extra DHCP Options:
 
         port create
         port list
@@ -59,14 +57,14 @@
     @test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
     def test_create_list_port_with_extra_dhcp_options(self):
         # Create a port with Extra DHCP Options
-        body = self.client.create_port(
+        body = self.ports_client.create_port(
             network_id=self.network['id'],
             extra_dhcp_opts=self.extra_dhcp_opts)
         port_id = body['port']['id']
-        self.addCleanup(self.client.delete_port, port_id)
+        self.addCleanup(self.ports_client.delete_port, port_id)
 
         # Confirm port created has Extra DHCP Options
-        body = self.client.list_ports()
+        body = self.ports_client.list_ports()
         ports = body['ports']
         port = [p for p in ports if p['id'] == port_id]
         self.assertTrue(port)
@@ -76,12 +74,12 @@
     def test_update_show_port_with_extra_dhcp_options(self):
         # Update port with extra dhcp options
         name = data_utils.rand_name('new-port-name')
-        body = self.client.update_port(
+        body = self.ports_client.update_port(
             self.port['id'],
             name=name,
             extra_dhcp_opts=self.extra_dhcp_opts)
         # Confirm extra dhcp options were added to the port
-        body = self.client.show_port(self.port['id'])
+        body = self.ports_client.show_port(self.port['id'])
         self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts)
 
     def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index 4b4a4e2..ce9c4be 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -24,9 +24,7 @@
 
 
 class FloatingIPTestJSON(base.BaseNetworkTest):
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
+    """Tests the following operations in the Neutron API:
 
         Create a Floating IP
         Update a Floating IP
@@ -70,11 +68,11 @@
     @test.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e8718')
     def test_create_list_show_update_delete_floating_ip(self):
         # Creates a floating IP
-        body = self.client.create_floatingip(
+        body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
             port_id=self.ports[0]['id'])
         created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
+        self.addCleanup(self.floating_ips_client.delete_floatingip,
                         created_floating_ip['id'])
         self.assertIsNotNone(created_floating_ip['id'])
         self.assertIsNotNone(created_floating_ip['tenant_id'])
@@ -85,7 +83,8 @@
         self.assertIn(created_floating_ip['fixed_ip_address'],
                       [ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
         # Verifies the details of a floating_ip
-        floating_ip = self.client.show_floatingip(created_floating_ip['id'])
+        floating_ip = self.floating_ips_client.show_floatingip(
+            created_floating_ip['id'])
         shown_floating_ip = floating_ip['floatingip']
         self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
         self.assertEqual(shown_floating_ip['floating_network_id'],
@@ -97,13 +96,13 @@
         self.assertEqual(shown_floating_ip['port_id'], self.ports[0]['id'])
 
         # Verify the floating ip exists in the list of all floating_ips
-        floating_ips = self.client.list_floatingips()
+        floating_ips = self.floating_ips_client.list_floatingips()
         floatingip_id_list = list()
         for f in floating_ips['floatingips']:
             floatingip_id_list.append(f['id'])
         self.assertIn(created_floating_ip['id'], floatingip_id_list)
         # Associate floating IP to the other port
-        floating_ip = self.client.update_floatingip(
+        floating_ip = self.floating_ips_client.update_floatingip(
             created_floating_ip['id'],
             port_id=self.ports[1]['id'])
         updated_floating_ip = floating_ip['floatingip']
@@ -113,7 +112,7 @@
         self.assertEqual(updated_floating_ip['router_id'], self.router['id'])
 
         # Disassociate floating IP from the port
-        floating_ip = self.client.update_floatingip(
+        floating_ip = self.floating_ips_client.update_floatingip(
             created_floating_ip['id'],
             port_id=None)
         updated_floating_ip = floating_ip['floatingip']
@@ -124,21 +123,22 @@
     @test.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
     def test_floating_ip_delete_port(self):
         # Create a floating IP
-        body = self.client.create_floatingip(
+        body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id)
         created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
+        self.addCleanup(self.floating_ips_client.delete_floatingip,
                         created_floating_ip['id'])
         # Create a port
-        port = self.client.create_port(network_id=self.network['id'])
+        port = self.ports_client.create_port(network_id=self.network['id'])
         created_port = port['port']
-        floating_ip = self.client.update_floatingip(
+        floating_ip = self.floating_ips_client.update_floatingip(
             created_floating_ip['id'],
             port_id=created_port['id'])
         # Delete port
-        self.client.delete_port(created_port['id'])
+        self.ports_client.delete_port(created_port['id'])
         # Verifies the details of the floating_ip
-        floating_ip = self.client.show_floatingip(created_floating_ip['id'])
+        floating_ip = self.floating_ips_client.show_floatingip(
+            created_floating_ip['id'])
         shown_floating_ip = floating_ip['floatingip']
         # Confirm the fields are back to None
         self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
@@ -149,11 +149,11 @@
     @test.idempotent_id('1bb2f731-fe5a-4b8c-8409-799ade1bed4d')
     def test_floating_ip_update_different_router(self):
         # Associate a floating IP to a port on a router
-        body = self.client.create_floatingip(
+        body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
             port_id=self.ports[1]['id'])
         created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
+        self.addCleanup(self.floating_ips_client.delete_floatingip,
                         created_floating_ip['id'])
         self.assertEqual(created_floating_ip['router_id'], self.router['id'])
         network2 = self.create_network()
@@ -163,7 +163,7 @@
         self.create_router_interface(router2['id'], subnet2['id'])
         port_other_router = self.create_port(network2)
         # Associate floating IP to the other port on another router
-        floating_ip = self.client.update_floatingip(
+        floating_ip = self.floating_ips_client.update_floatingip(
             created_floating_ip['id'],
             port_id=port_other_router['id'])
         updated_floating_ip = floating_ip['floatingip']
@@ -175,17 +175,17 @@
     @test.attr(type='smoke')
     @test.idempotent_id('36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5')
     def test_create_floating_ip_specifying_a_fixed_ip_address(self):
-        body = self.client.create_floatingip(
+        body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
             port_id=self.ports[1]['id'],
             fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
         created_floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip,
+        self.addCleanup(self.floating_ips_client.delete_floatingip,
                         created_floating_ip['id'])
         self.assertIsNotNone(created_floating_ip['id'])
         self.assertEqual(created_floating_ip['fixed_ip_address'],
                          self.ports[1]['fixed_ips'][0]['ip_address'])
-        floating_ip = self.client.update_floatingip(
+        floating_ip = self.floating_ips_client.update_floatingip(
             created_floating_ip['id'],
             port_id=None)
         self.assertIsNone(floating_ip['floatingip']['port_id'])
@@ -197,23 +197,24 @@
         list_ips = [str(ip) for ip in ips[-3:-1]]
         fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
         # Create port
-        body = self.client.create_port(network_id=self.network['id'],
-                                       fixed_ips=fixed_ips)
+        body = self.ports_client.create_port(network_id=self.network['id'],
+                                             fixed_ips=fixed_ips)
         port = body['port']
-        self.addCleanup(self.client.delete_port, port['id'])
+        self.addCleanup(self.ports_client.delete_port, port['id'])
         # Create floating ip
-        body = self.client.create_floatingip(
+        body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
             port_id=port['id'],
             fixed_ip_address=list_ips[0])
         floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
+        self.addCleanup(self.floating_ips_client.delete_floatingip,
+                        floating_ip['id'])
         self.assertIsNotNone(floating_ip['id'])
         self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
         # Update floating ip
-        body = self.client.update_floatingip(floating_ip['id'],
-                                             port_id=port['id'],
-                                             fixed_ip_address=list_ips[1])
+        body = self.floating_ips_client.update_floatingip(
+            floating_ip['id'], port_id=port['id'],
+            fixed_ip_address=list_ips[1])
         update_floating_ip = body['floatingip']
         self.assertEqual(update_floating_ip['fixed_ip_address'],
                          list_ips[1])
diff --git a/tempest/api/network/test_floating_ips_negative.py b/tempest/api/network/test_floating_ips_negative.py
index e8624d8..f915615 100644
--- a/tempest/api/network/test_floating_ips_negative.py
+++ b/tempest/api/network/test_floating_ips_negative.py
@@ -25,8 +25,7 @@
 
 
 class FloatingIPNegativeTestJSON(base.BaseNetworkTest):
-    """
-    Test the following negative  operations for floating ips:
+    """Test the following negative operations for floating ips:
 
         Create floatingip with a port that is unreachable to external network
         Create floatingip in private network
@@ -54,17 +53,17 @@
     @test.attr(type=['negative'])
     @test.idempotent_id('22996ea8-4a81-4b27-b6e1-fa5df92fa5e8')
     def test_create_floatingip_with_port_ext_net_unreachable(self):
-        self.assertRaises(lib_exc.NotFound, self.client.create_floatingip,
-                          floating_network_id=self.ext_net_id,
-                          port_id=self.port['id'],
-                          fixed_ip_address=self.port['fixed_ips'][0]
-                                                    ['ip_address'])
+        self.assertRaises(
+            lib_exc.NotFound, self.floating_ips_client.create_floatingip,
+            floating_network_id=self.ext_net_id, port_id=self.port['id'],
+            fixed_ip_address=self.port['fixed_ips'][0]
+                                      ['ip_address'])
 
     @test.attr(type=['negative'])
     @test.idempotent_id('50b9aeb4-9f0b-48ee-aa31-fa955a48ff54')
     def test_create_floatingip_in_private_network(self):
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_floatingip,
+                          self.floating_ips_client.create_floatingip,
                           floating_network_id=self.network['id'],
                           port_id=self.port['id'],
                           fixed_ip_address=self.port['fixed_ips'][0]
@@ -74,12 +73,13 @@
     @test.idempotent_id('6b3b8797-6d43-4191-985c-c48b773eb429')
     def test_associate_floatingip_port_ext_net_unreachable(self):
         # Create floating ip
-        body = self.client.create_floatingip(
+        body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id)
         floating_ip = body['floatingip']
-        self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
+        self.addCleanup(
+            self.floating_ips_client.delete_floatingip, floating_ip['id'])
         # Associate floating IP to the other port
-        self.assertRaises(lib_exc.NotFound, self.client.update_floatingip,
-                          floating_ip['id'], port_id=self.port['id'],
-                          fixed_ip_address=self.port['fixed_ips'][0]
-                          ['ip_address'])
+        self.assertRaises(
+            lib_exc.NotFound, self.floating_ips_client.update_floatingip,
+            floating_ip['id'], port_id=self.port['id'],
+            fixed_ip_address=self.port['fixed_ips'][0]['ip_address'])
diff --git a/tempest/api/network/test_metering_extensions.py b/tempest/api/network/test_metering_extensions.py
index ee0dcb0..299700f 100644
--- a/tempest/api/network/test_metering_extensions.py
+++ b/tempest/api/network/test_metering_extensions.py
@@ -12,20 +12,13 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-from oslo_log import log as logging
-
 from tempest.api.network import base
 from tempest.common.utils import data_utils
 from tempest import test
 
 
-LOG = logging.getLogger(__name__)
-
-
 class MeteringTestJSON(base.BaseAdminNetworkTest):
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
+    """Tests the following operations in the Neutron API:
 
         List, Show, Create, Delete Metering labels
         List, Show, Create, Delete Metering labels rules
@@ -53,24 +46,25 @@
 
     def _delete_metering_label(self, metering_label_id):
         # Deletes a label and verifies if it is deleted or not
-        self.admin_client.delete_metering_label(metering_label_id)
+        self.admin_metering_labels_client.delete_metering_label(
+            metering_label_id)
         # Asserting that the label is not found in list after deletion
-        labels = self.admin_client.list_metering_labels(id=metering_label_id)
+        labels = self.admin_metering_labels_client.list_metering_labels(
+            id=metering_label_id)
         self.assertEqual(len(labels['metering_labels']), 0)
 
     def _delete_metering_label_rule(self, metering_label_rule_id):
+        client = self.admin_metering_label_rules_client
         # Deletes a rule and verifies if it is deleted or not
-        self.admin_client.delete_metering_label_rule(
-            metering_label_rule_id)
+        client.delete_metering_label_rule(metering_label_rule_id)
         # Asserting that the rule is not found in list after deletion
-        rules = (self.admin_client.list_metering_label_rules(
-                 id=metering_label_rule_id))
+        rules = client.list_metering_label_rules(id=metering_label_rule_id)
         self.assertEqual(len(rules['metering_label_rules']), 0)
 
     @test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
     def test_list_metering_labels(self):
         # Verify label filtering
-        body = self.admin_client.list_metering_labels(id=33)
+        body = self.admin_metering_labels_client.list_metering_labels(id=33)
         metering_labels = body['metering_labels']
         self.assertEqual(0, len(metering_labels))
 
@@ -79,21 +73,22 @@
         # Creates a label
         name = data_utils.rand_name('metering-label-')
         description = "label created by tempest"
-        body = self.admin_client.create_metering_label(name=name,
-                                                       description=description)
+        body = self.admin_metering_labels_client.create_metering_label(
+            name=name, description=description)
         metering_label = body['metering_label']
         self.addCleanup(self._delete_metering_label,
                         metering_label['id'])
         # Assert whether created labels are found in labels list or fail
         # if created labels are not found in labels list
-        labels = (self.admin_client.list_metering_labels(
+        labels = (self.admin_metering_labels_client.list_metering_labels(
                   id=metering_label['id']))
         self.assertEqual(len(labels['metering_labels']), 1)
 
     @test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
     def test_show_metering_label(self):
         # Verifies the details of a label
-        body = self.admin_client.show_metering_label(self.metering_label['id'])
+        body = self.admin_metering_labels_client.show_metering_label(
+            self.metering_label['id'])
         metering_label = body['metering_label']
         self.assertEqual(self.metering_label['id'], metering_label['id'])
         self.assertEqual(self.metering_label['tenant_id'],
@@ -104,8 +99,9 @@
 
     @test.idempotent_id('cc832399-6681-493b-9d79-0202831a1281')
     def test_list_metering_label_rules(self):
+        client = self.admin_metering_label_rules_client
         # Verify rule filtering
-        body = self.admin_client.list_metering_label_rules(id=33)
+        body = client.list_metering_label_rules(id=33)
         metering_label_rules = body['metering_label_rules']
         self.assertEqual(0, len(metering_label_rules))
 
@@ -114,7 +110,8 @@
         # Creates a rule
         remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4
                             else "fd03::/64")
-        body = (self.admin_client.create_metering_label_rule(
+        client = self.admin_metering_label_rules_client
+        body = (client.create_metering_label_rule(
                 remote_ip_prefix=remote_ip_prefix,
                 direction="ingress",
                 metering_label_id=self.metering_label['id']))
@@ -123,14 +120,14 @@
                         metering_label_rule['id'])
         # Assert whether created rules are found in rules list or fail
         # if created rules are not found in rules list
-        rules = (self.admin_client.list_metering_label_rules(
-                 id=metering_label_rule['id']))
+        rules = client.list_metering_label_rules(id=metering_label_rule['id'])
         self.assertEqual(len(rules['metering_label_rules']), 1)
 
     @test.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7')
     def test_show_metering_label_rule(self):
         # Verifies the details of a rule
-        body = (self.admin_client.show_metering_label_rule(
+        client = self.admin_metering_label_rules_client
+        body = (client.show_metering_label_rule(
                 self.metering_label_rule['id']))
         metering_label_rule = body['metering_label_rule']
         self.assertEqual(self.metering_label_rule['id'],
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index c5b2080..1c446ef 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -28,9 +28,7 @@
 
 
 class NetworksTest(base.BaseNetworkTest):
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
+    """Tests the following operations in the Neutron API:
 
         create a network for a tenant
         list tenant's networks
@@ -95,9 +93,8 @@
 
     @classmethod
     def _create_subnet_with_last_subnet_block(cls, network, ip_version):
-        """Derive last subnet CIDR block from tenant CIDR and
-           create the subnet with that derived CIDR
-        """
+        # Derive last subnet CIDR block from tenant CIDR and
+        # create the subnet with that derived CIDR
         if ip_version == 4:
             cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
             mask_bits = CONF.network.tenant_network_mask_bits
@@ -133,9 +130,8 @@
         return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
 
     def subnet_dict(self, include_keys):
-        """Return a subnet dict which has include_keys and their corresponding
-           value from self._subnet_data
-        """
+        # Return a subnet dict which has include_keys and their corresponding
+        # value from self._subnet_data
         return dict((key, self._subnet_data[self._ip_version][key])
                     for key in include_keys)
 
@@ -405,9 +401,7 @@
 
 
 class BulkNetworkOpsTestJSON(base.BaseNetworkTest):
-    """
-    Tests the following operations in the Neutron API using the REST client for
-    Neutron:
+    """Tests the following operations in the Neutron API:
 
         bulk network creation
         bulk subnet creation
@@ -444,9 +438,9 @@
 
     def _delete_ports(self, created_ports):
         for n in created_ports:
-            self.client.delete_port(n['id'])
+            self.ports_client.delete_port(n['id'])
         # Asserting that the ports are not found in the list after deletion
-        body = self.client.list_ports()
+        body = self.ports_client.list_ports()
         ports_list = [port['id'] for port in body['ports']]
         for n in created_ports:
             self.assertNotIn(n['id'], ports_list)
@@ -455,9 +449,9 @@
     @test.idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')
     def test_bulk_create_delete_network(self):
         # Creates 2 networks in one request
-        network_names = [data_utils.rand_name('network-'),
-                         data_utils.rand_name('network-')]
-        body = self.client.create_bulk_network(network_names)
+        network_list = [{'name': data_utils.rand_name('network-')},
+                        {'name': data_utils.rand_name('network-')}]
+        body = self.client.create_bulk_network(networks=network_list)
         created_networks = body['networks']
         self.addCleanup(self._delete_networks, created_networks)
         # Asserting that the networks are found in the list after creation
@@ -492,7 +486,7 @@
             }
             subnets_list.append(p1)
         del subnets_list[1]['name']
-        body = self.client.create_bulk_subnet(subnets_list)
+        body = self.client.create_bulk_subnet(subnets=subnets_list)
         created_subnets = body['subnets']
         self.addCleanup(self._delete_subnets, created_subnets)
         # Asserting that the subnets are found in the list after creation
@@ -518,11 +512,11 @@
             }
             port_list.append(p1)
         del port_list[1]['name']
-        body = self.client.create_bulk_port(port_list)
+        body = self.client.create_bulk_port(ports=port_list)
         created_ports = body['ports']
         self.addCleanup(self._delete_ports, created_ports)
         # Asserting that the ports are found in the list after creation
-        body = self.client.list_ports()
+        body = self.ports_client.list_ports()
         ports_list = [port['id'] for port in body['ports']]
         for n in created_ports:
             self.assertIsNotNone(n['id'])
diff --git a/tempest/api/network/test_networks_negative.py b/tempest/api/network/test_networks_negative.py
index 4fe31cf..0ef96a6 100644
--- a/tempest/api/network/test_networks_negative.py
+++ b/tempest/api/network/test_networks_negative.py
@@ -41,7 +41,7 @@
     @test.idempotent_id('a954861d-cbfd-44e8-b0a9-7fab111f235d')
     def test_show_non_existent_port(self):
         non_exist_id = data_utils.rand_uuid()
-        self.assertRaises(lib_exc.NotFound, self.client.show_port,
+        self.assertRaises(lib_exc.NotFound, self.ports_client.show_port,
                           non_exist_id)
 
     @test.attr(type=['negative'])
@@ -79,13 +79,14 @@
     def test_create_port_on_non_existent_network(self):
         non_exist_net_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
-                          self.client.create_port, network_id=non_exist_net_id)
+                          self.ports_client.create_port,
+                          network_id=non_exist_net_id)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('cf8eef21-4351-4f53-adcd-cc5cb1e76b92')
     def test_update_non_existent_port(self):
         non_exist_port_id = data_utils.rand_uuid()
-        self.assertRaises(lib_exc.NotFound, self.client.update_port,
+        self.assertRaises(lib_exc.NotFound, self.ports_client.update_port,
                           non_exist_port_id, name='new_name')
 
     @test.attr(type=['negative'])
@@ -93,4 +94,4 @@
     def test_delete_non_existent_port(self):
         non_exist_port_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
-                          self.client.delete_port, non_exist_port_id)
+                          self.ports_client.delete_port, non_exist_port_id)
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index c2c7104..d7b220b 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -29,8 +29,7 @@
 
 
 class PortsTestJSON(sec_base.BaseSecGroupTest):
-    """
-    Test the following operations for ports:
+    """Test the following operations for ports:
 
         port create
         port delete
@@ -46,8 +45,8 @@
         cls.port = cls.create_port(cls.network)
 
     def _delete_port(self, port_id):
-        self.client.delete_port(port_id)
-        body = self.client.list_ports()
+        self.ports_client.delete_port(port_id)
+        body = self.ports_client.list_ports()
         ports_list = body['ports']
         self.assertFalse(port_id in [n['id'] for n in ports_list])
 
@@ -55,16 +54,16 @@
     @test.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
     def test_create_update_delete_port(self):
         # Verify port creation
-        body = self.client.create_port(network_id=self.network['id'])
+        body = self.ports_client.create_port(network_id=self.network['id'])
         port = body['port']
         # Schedule port deletion with verification upon test completion
         self.addCleanup(self._delete_port, port['id'])
         self.assertTrue(port['admin_state_up'])
         # Verify port update
         new_name = "New_Port"
-        body = self.client.update_port(port['id'],
-                                       name=new_name,
-                                       admin_state_up=False)
+        body = self.ports_client.update_port(port['id'],
+                                             name=new_name,
+                                             admin_state_up=False)
         updated_port = body['port']
         self.assertEqual(updated_port['name'], new_name)
         self.assertFalse(updated_port['admin_state_up'])
@@ -76,7 +75,7 @@
         network2 = self.create_network(network_name=name)
         network_list = [network1['id'], network2['id']]
         port_list = [{'network_id': net_id} for net_id in network_list]
-        body = self.client.create_bulk_port(port_list)
+        body = self.client.create_bulk_port(ports=port_list)
         created_ports = body['ports']
         port1 = created_ports[0]
         port2 = created_ports[1]
@@ -116,8 +115,8 @@
                                     mask_bits=address.prefixlen,
                                     **allocation_pools)
         self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
-        body = self.client.create_port(network_id=net_id)
-        self.addCleanup(self.client.delete_port, body['port']['id'])
+        body = self.ports_client.create_port(network_id=net_id)
+        self.addCleanup(self.ports_client.delete_port, body['port']['id'])
         port = body['port']
         ip_address = port['fixed_ips'][0]['ip_address']
         start_ip_address = allocation_pools['allocation_pools'][0]['start']
@@ -129,7 +128,7 @@
     @test.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
     def test_show_port(self):
         # Verify the details of port
-        body = self.client.show_port(self.port['id'])
+        body = self.ports_client.show_port(self.port['id'])
         port = body['port']
         self.assertIn('id', port)
         # TODO(Santosh)- This is a temporary workaround to compare create_port
@@ -143,8 +142,8 @@
     def test_show_port_fields(self):
         # Verify specific fields of a port
         fields = ['id', 'mac_address']
-        body = self.client.show_port(self.port['id'],
-                                     fields=fields)
+        body = self.ports_client.show_port(self.port['id'],
+                                           fields=fields)
         port = body['port']
         self.assertEqual(sorted(port.keys()), sorted(fields))
         for field_name in fields:
@@ -154,7 +153,7 @@
     @test.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
     def test_list_ports(self):
         # Verify the port exists in the list of all ports
-        body = self.client.list_ports()
+        body = self.ports_client.list_ports()
         ports = [port['id'] for port in body['ports']
                  if port['id'] == self.port['id']]
         self.assertNotEmpty(ports, "Created port not found in the list")
@@ -166,14 +165,14 @@
         subnet = self.create_subnet(network)
         self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
         # Create two ports
-        port_1 = self.client.create_port(network_id=network['id'])
-        self.addCleanup(self.client.delete_port, port_1['port']['id'])
-        port_2 = self.client.create_port(network_id=network['id'])
-        self.addCleanup(self.client.delete_port, port_2['port']['id'])
+        port_1 = self.ports_client.create_port(network_id=network['id'])
+        self.addCleanup(self.ports_client.delete_port, port_1['port']['id'])
+        port_2 = self.ports_client.create_port(network_id=network['id'])
+        self.addCleanup(self.ports_client.delete_port, port_2['port']['id'])
         # List ports filtered by fixed_ips
         port_1_fixed_ip = port_1['port']['fixed_ips'][0]['ip_address']
         fixed_ips = 'ip_address=' + port_1_fixed_ip
-        port_list = self.client.list_ports(fixed_ips=fixed_ips)
+        port_list = self.ports_client.list_ports(fixed_ips=fixed_ips)
         # Check that we got the desired port
         ports = port_list['ports']
         tenant_ids = set([port['tenant_id'] for port in ports])
@@ -199,14 +198,14 @@
         self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
         router = self.create_router(data_utils.rand_name('router-'))
         self.addCleanup(self.client.delete_router, router['id'])
-        port = self.client.create_port(network_id=network['id'])
+        port = self.ports_client.create_port(network_id=network['id'])
         # Add router interface to port created above
-        self.client.add_router_interface_with_port_id(
-            router['id'], port['port']['id'])
-        self.addCleanup(self.client.remove_router_interface_with_port_id,
-                        router['id'], port['port']['id'])
+        self.client.add_router_interface(router['id'],
+                                         port_id=port['port']['id'])
+        self.addCleanup(self.client.remove_router_interface, router['id'],
+                        port_id=port['port']['id'])
         # List ports filtered by router_id
-        port_list = self.client.list_ports(device_id=router['id'])
+        port_list = self.ports_client.list_ports(device_id=router['id'])
         ports = port_list['ports']
         self.assertEqual(len(ports), 1)
         self.assertEqual(ports[0]['id'], port['port']['id'])
@@ -216,7 +215,7 @@
     def test_list_ports_fields(self):
         # Verify specific fields of ports
         fields = ['id', 'mac_address']
-        body = self.client.list_ports(fields=fields)
+        body = self.ports_client.list_ports(fields=fields)
         ports = body['ports']
         self.assertNotEmpty(ports, "Port list returned is empty")
         # Asserting the fields returned are correct
@@ -240,7 +239,7 @@
         # Create a port with multiple IP addresses
         port = self.create_port(network,
                                 fixed_ips=fixed_ips)
-        self.addCleanup(self.client.delete_port, port['id'])
+        self.addCleanup(self.ports_client.delete_port, port['id'])
         self.assertEqual(2, len(port['fixed_ips']))
         check_fixed_ips = [subnet_1['id'], subnet_2['id']]
         for item in port['fixed_ips']:
@@ -260,17 +259,19 @@
         fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
 
         security_groups_list = list()
+        sec_grps_client = self.security_groups_client
         for name in security_groups_names:
-            group_create_body = self.client.create_security_group(
+            group_create_body = sec_grps_client.create_security_group(
                 name=name)
-            self.addCleanup(self.client.delete_security_group,
+            self.addCleanup(self.security_groups_client.delete_security_group,
                             group_create_body['security_group']['id'])
             security_groups_list.append(group_create_body['security_group']
                                         ['id'])
         # Create a port
         sec_grp_name = data_utils.rand_name('secgroup')
-        security_group = self.client.create_security_group(name=sec_grp_name)
-        self.addCleanup(self.client.delete_security_group,
+        security_group = sec_grps_client.create_security_group(
+            name=sec_grp_name)
+        self.addCleanup(self.security_groups_client.delete_security_group,
                         security_group['security_group']['id'])
         post_body = {
             "name": data_utils.rand_name('port-'),
@@ -278,8 +279,8 @@
             "network_id": self.network['id'],
             "admin_state_up": True,
             "fixed_ips": fixed_ip_1}
-        body = self.client.create_port(**post_body)
-        self.addCleanup(self.client.delete_port, body['port']['id'])
+        body = self.ports_client.create_port(**post_body)
+        self.addCleanup(self.ports_client.delete_port, body['port']['id'])
         port = body['port']
 
         # Update the port with security groups
@@ -289,7 +290,7 @@
                        "admin_state_up": False,
                        "fixed_ips": fixed_ip_2,
                        "security_groups": security_groups_list}
-        body = self.client.update_port(port['id'], **update_body)
+        body = self.ports_client.update_port(port['id'], **update_body)
         port_show = body['port']
         # Verify the security groups and other attributes updated to port
         exclude_keys = set(port_show).symmetric_difference(update_body)
@@ -317,16 +318,16 @@
     @test.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
     def test_create_show_delete_port_user_defined_mac(self):
         # Create a port for a legal mac
-        body = self.client.create_port(network_id=self.network['id'])
+        body = self.ports_client.create_port(network_id=self.network['id'])
         old_port = body['port']
         free_mac_address = old_port['mac_address']
-        self.client.delete_port(old_port['id'])
+        self.ports_client.delete_port(old_port['id'])
         # Create a new port with user defined mac
-        body = self.client.create_port(network_id=self.network['id'],
-                                       mac_address=free_mac_address)
-        self.addCleanup(self.client.delete_port, body['port']['id'])
+        body = self.ports_client.create_port(network_id=self.network['id'],
+                                             mac_address=free_mac_address)
+        self.addCleanup(self.ports_client.delete_port, body['port']['id'])
         port = body['port']
-        body = self.client.show_port(port['id'])
+        body = self.ports_client.show_port(port['id'])
         show_port = body['port']
         self.assertEqual(free_mac_address,
                          show_port['mac_address'])
@@ -339,7 +340,7 @@
         subnet = self.create_subnet(network)
         self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
         port = self.create_port(network, security_groups=[])
-        self.addCleanup(self.client.delete_port, port['id'])
+        self.addCleanup(self.ports_client.delete_port, port['id'])
         self.assertIsNotNone(port['security_groups'])
         self.assertEmpty(port['security_groups'])
 
@@ -361,9 +362,9 @@
     def test_create_port_binding_ext_attr(self):
         post_body = {"network_id": self.network['id'],
                      "binding:host_id": self.host_id}
-        body = self.admin_client.create_port(**post_body)
+        body = self.admin_ports_client.create_port(**post_body)
         port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
+        self.addCleanup(self.admin_ports_client.delete_port, port['id'])
         host_id = port['binding:host_id']
         self.assertIsNotNone(host_id)
         self.assertEqual(self.host_id, host_id)
@@ -371,11 +372,11 @@
     @test.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
     def test_update_port_binding_ext_attr(self):
         post_body = {"network_id": self.network['id']}
-        body = self.admin_client.create_port(**post_body)
+        body = self.admin_ports_client.create_port(**post_body)
         port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
+        self.addCleanup(self.admin_ports_client.delete_port, port['id'])
         update_body = {"binding:host_id": self.host_id}
-        body = self.admin_client.update_port(port['id'], **update_body)
+        body = self.admin_ports_client.update_port(port['id'], **update_body)
         updated_port = body['port']
         host_id = updated_port['binding:host_id']
         self.assertIsNotNone(host_id)
@@ -385,18 +386,18 @@
     def test_list_ports_binding_ext_attr(self):
         # Create a new port
         post_body = {"network_id": self.network['id']}
-        body = self.admin_client.create_port(**post_body)
+        body = self.admin_ports_client.create_port(**post_body)
         port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
+        self.addCleanup(self.admin_ports_client.delete_port, port['id'])
 
         # Update the port's binding attributes so that is now 'bound'
         # to a host
         update_body = {"binding:host_id": self.host_id}
-        self.admin_client.update_port(port['id'], **update_body)
+        self.admin_ports_client.update_port(port['id'], **update_body)
 
         # List all ports, ensure new port is part of list and its binding
         # attributes are set and accurate
-        body = self.admin_client.list_ports()
+        body = self.admin_ports_client.list_ports()
         ports_list = body['ports']
         pids_list = [p['id'] for p in ports_list]
         self.assertIn(port['id'], pids_list)
@@ -408,10 +409,11 @@
 
     @test.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
     def test_show_port_binding_ext_attr(self):
-        body = self.admin_client.create_port(network_id=self.network['id'])
+        body = self.admin_ports_client.create_port(
+            network_id=self.network['id'])
         port = body['port']
-        self.addCleanup(self.admin_client.delete_port, port['id'])
-        body = self.admin_client.show_port(port['id'])
+        self.addCleanup(self.admin_ports_client.delete_port, port['id'])
+        body = self.admin_ports_client.show_port(port['id'])
         show_port = body['port']
         self.assertEqual(port['binding:host_id'],
                          show_port['binding:host_id'])
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 29855e1..0b64be4 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -137,14 +137,14 @@
         subnet = self.create_subnet(network)
         router = self._create_router(data_utils.rand_name('router-'))
         # Add router interface with subnet id
-        interface = self.client.add_router_interface_with_subnet_id(
-            router['id'], subnet['id'])
+        interface = self.client.add_router_interface(router['id'],
+                                                     subnet_id=subnet['id'])
         self.addCleanup(self._remove_router_interface_with_subnet_id,
                         router['id'], subnet['id'])
         self.assertIn('subnet_id', interface.keys())
         self.assertIn('port_id', interface.keys())
         # Verify router id is equal to device id in port details
-        show_port_body = self.client.show_port(
+        show_port_body = self.ports_client.show_port(
             interface['port_id'])
         self.assertEqual(show_port_body['port']['device_id'],
                          router['id'])
@@ -155,17 +155,18 @@
         network = self.create_network()
         self.create_subnet(network)
         router = self._create_router(data_utils.rand_name('router-'))
-        port_body = self.client.create_port(
+        port_body = self.ports_client.create_port(
             network_id=network['id'])
         # add router interface to port created above
-        interface = self.client.add_router_interface_with_port_id(
-            router['id'], port_body['port']['id'])
+        interface = self.client.add_router_interface(
+            router['id'],
+            port_id=port_body['port']['id'])
         self.addCleanup(self._remove_router_interface_with_port_id,
                         router['id'], port_body['port']['id'])
         self.assertIn('subnet_id', interface.keys())
         self.assertIn('port_id', interface.keys())
         # Verify router id is equal to device id in port details
-        show_port_body = self.client.show_port(
+        show_port_body = self.ports_client.show_port(
             interface['port_id'])
         self.assertEqual(show_port_body['port']['device_id'],
                          router['id'])
@@ -181,7 +182,7 @@
             self.assertEqual(v, actual_ext_gw_info[k])
 
     def _verify_gateway_port(self, router_id):
-        list_body = self.admin_client.list_ports(
+        list_body = self.admin_ports_client.list_ports(
             network_id=CONF.network.public_network_id,
             device_id=router_id)
         self.assertEqual(len(list_body['ports']), 1)
@@ -245,7 +246,7 @@
         self.client.update_router(router['id'], external_gateway_info={})
         self._verify_router_gateway(router['id'])
         # No gateway port expected
-        list_body = self.admin_client.list_ports(
+        list_body = self.admin_ports_client.list_ports(
             network_id=CONF.network.public_network_id,
             device_id=router['id'])
         self.assertFalse(list_body['ports'])
@@ -301,7 +302,7 @@
 
         test_routes.sort(key=lambda x: x['destination'])
         extra_route = self.client.update_extra_routes(router['id'],
-                                                      test_routes)
+                                                      routes=test_routes)
         show_body = self.client.show_router(router['id'])
         # Assert the number of routes
         self.assertEqual(routes_num, len(extra_route['router']['routes']))
@@ -357,7 +358,7 @@
                                       interface02['port_id'])
 
     def _verify_router_interface(self, router_id, subnet_id, port_id):
-        show_port_body = self.client.show_port(port_id)
+        show_port_body = self.ports_client.show_port(port_id)
         interface_port = show_port_body['port']
         self.assertEqual(router_id, interface_port['device_id'])
         self.assertEqual(subnet_id,
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index 90da6fd..7b07d42 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -84,8 +84,8 @@
     @test.attr(type=['negative'])
     @test.idempotent_id('04df80f9-224d-47f5-837a-bf23e33d1c20')
     def test_router_remove_interface_in_use_returns_409(self):
-        self.client.add_router_interface_with_subnet_id(
-            self.router['id'], self.subnet['id'])
+        self.client.add_router_interface(self.router['id'],
+                                         subnet_id=self.subnet['id'])
         self.assertRaises(lib_exc.Conflict,
                           self.client.delete_router,
                           self.router['id'])
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index ccc5232..7d0765e 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -41,7 +41,8 @@
                                            remote_ip_prefix=None):
         # Create Security Group rule with the input params and validate
         # that SG rule is created with the same parameters.
-        rule_create_body = self.client.create_security_group_rule(
+        sec_group_rules_client = self.security_group_rules_client
+        rule_create_body = sec_group_rules_client.create_security_group_rule(
             security_group_id=sg_id,
             direction=direction,
             ethertype=ethertype,
@@ -71,7 +72,7 @@
     @test.idempotent_id('e30abd17-fef9-4739-8617-dc26da88e686')
     def test_list_security_groups(self):
         # Verify the that security group belonging to tenant exist in list
-        body = self.client.list_security_groups()
+        body = self.security_groups_client.list_security_groups()
         security_groups = body['security_groups']
         found = None
         for n in security_groups:
@@ -86,7 +87,7 @@
         group_create_body, name = self._create_security_group()
 
         # List security groups and verify if created group is there in response
-        list_body = self.client.list_security_groups()
+        list_body = self.security_groups_client.list_security_groups()
         secgroup_list = list()
         for secgroup in list_body['security_groups']:
             secgroup_list.append(secgroup['id'])
@@ -94,7 +95,7 @@
         # Update the security group
         new_name = data_utils.rand_name('security-')
         new_description = data_utils.rand_name('security-description')
-        update_body = self.client.update_security_group(
+        update_body = self.security_groups_client.update_security_group(
             group_create_body['security_group']['id'],
             name=new_name,
             description=new_description)
@@ -103,7 +104,7 @@
         self.assertEqual(update_body['security_group']['description'],
                          new_description)
         # Show details of the updated security group
-        show_body = self.client.show_security_group(
+        show_body = self.security_groups_client.show_security_group(
             group_create_body['security_group']['id'])
         self.assertEqual(show_body['security_group']['name'], new_name)
         self.assertEqual(show_body['security_group']['description'],
@@ -116,8 +117,9 @@
 
         # Create rules for each protocol
         protocols = ['tcp', 'udp', 'icmp']
+        client = self.security_group_rules_client
         for protocol in protocols:
-            rule_create_body = self.client.create_security_group_rule(
+            rule_create_body = client.create_security_group_rule(
                 security_group_id=group_create_body['security_group']['id'],
                 protocol=protocol,
                 direction='ingress',
@@ -125,7 +127,7 @@
             )
 
             # Show details of the created security rule
-            show_rule_body = self.client.show_security_group_rule(
+            show_rule_body = client.show_security_group_rule(
                 rule_create_body['security_group_rule']['id']
             )
             create_dict = rule_create_body['security_group_rule']
@@ -135,7 +137,8 @@
                                  "%s does not match." % key)
 
             # List rules and verify created rule is in response
-            rule_list_body = self.client.list_security_group_rules()
+            rule_list_body = (
+                self.security_group_rules_client.list_security_group_rules())
             rule_list = [rule['id']
                          for rule in rule_list_body['security_group_rules']]
             self.assertIn(rule_create_body['security_group_rule']['id'],
@@ -223,7 +226,8 @@
         direction = 'ingress'
         protocol = 17
         security_group_id = group_create_body['security_group']['id']
-        rule_create_body = self.client.create_security_group_rule(
+        client = self.security_group_rules_client
+        rule_create_body = client.create_security_group_rule(
             security_group_id=security_group_id,
             direction=direction,
             protocol=protocol
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index f80ea59..ff38e9e 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -38,23 +38,25 @@
     @test.idempotent_id('424fd5c3-9ddc-486a-b45f-39bf0c820fc6')
     def test_show_non_existent_security_group(self):
         non_exist_id = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound, self.client.show_security_group,
-                          non_exist_id)
+        self.assertRaises(
+            lib_exc.NotFound, self.security_groups_client.show_security_group,
+            non_exist_id)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('4c094c09-000b-4e41-8100-9617600c02a6')
     def test_show_non_existent_security_group_rule(self):
         non_exist_id = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound,
-                          self.client.show_security_group_rule,
-                          non_exist_id)
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.security_group_rules_client.show_security_group_rule,
+            non_exist_id)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('1f1bb89d-5664-4956-9fcd-83ee0fa603df')
     def test_delete_non_existent_security_group(self):
         non_exist_id = str(uuid.uuid4())
         self.assertRaises(lib_exc.NotFound,
-                          self.client.delete_security_group,
+                          self.security_groups_client.delete_security_group,
                           non_exist_id
                           )
 
@@ -66,7 +68,8 @@
         # Create rule with bad protocol name
         pname = 'bad_protocol_name'
         self.assertRaises(
-            lib_exc.BadRequest, self.client.create_security_group_rule,
+            lib_exc.BadRequest,
+            self.security_group_rules_client.create_security_group_rule,
             security_group_id=group_create_body['security_group']['id'],
             protocol=pname, direction='ingress', ethertype=self.ethertype)
 
@@ -79,7 +82,8 @@
         prefix = ['192.168.1./24', '192.168.1.1/33', 'bad_prefix', '256']
         for remote_ip_prefix in prefix:
             self.assertRaises(
-                lib_exc.BadRequest, self.client.create_security_group_rule,
+                lib_exc.BadRequest,
+                self.security_group_rules_client.create_security_group_rule,
                 security_group_id=group_create_body['security_group']['id'],
                 protocol='tcp', direction='ingress', ethertype=self.ethertype,
                 remote_ip_prefix=remote_ip_prefix)
@@ -94,7 +98,8 @@
         group_ids = ['bad_group_id', non_exist_id]
         for remote_group_id in group_ids:
             self.assertRaises(
-                lib_exc.NotFound, self.client.create_security_group_rule,
+                lib_exc.NotFound,
+                self.security_group_rules_client.create_security_group_rule,
                 security_group_id=group_create_body['security_group']['id'],
                 protocol='tcp', direction='ingress', ethertype=self.ethertype,
                 remote_group_id=remote_group_id)
@@ -108,7 +113,8 @@
         # Create rule specifying both remote_ip_prefix and remote_group_id
         prefix = self._tenant_network_cidr
         self.assertRaises(
-            lib_exc.BadRequest, self.client.create_security_group_rule,
+            lib_exc.BadRequest,
+            self.security_group_rules_client.create_security_group_rule,
             security_group_id=sg1_body['security_group']['id'],
             protocol='tcp', direction='ingress',
             ethertype=self.ethertype, remote_ip_prefix=prefix,
@@ -122,7 +128,8 @@
         # Create rule with bad ethertype
         ethertype = 'bad_ethertype'
         self.assertRaises(
-            lib_exc.BadRequest, self.client.create_security_group_rule,
+            lib_exc.BadRequest,
+            self.security_group_rules_client.create_security_group_rule,
             security_group_id=group_create_body['security_group']['id'],
             protocol='udp', direction='ingress', ethertype=ethertype)
 
@@ -139,7 +146,8 @@
                   (-16, 65536, 'Invalid value for port')]
         for pmin, pmax, msg in states:
             ex = self.assertRaises(
-                lib_exc.BadRequest, self.client.create_security_group_rule,
+                lib_exc.BadRequest,
+                self.security_group_rules_client.create_security_group_rule,
                 security_group_id=group_create_body['security_group']['id'],
                 protocol='tcp', port_range_min=pmin, port_range_max=pmax,
                 direction='ingress', ethertype=self.ethertype)
@@ -151,7 +159,8 @@
                   (300, 1, 'Invalid value for ICMP type')]
         for pmin, pmax, msg in states:
             ex = self.assertRaises(
-                lib_exc.BadRequest, self.client.create_security_group_rule,
+                lib_exc.BadRequest,
+                self.security_group_rules_client.create_security_group_rule,
                 security_group_id=group_create_body['security_group']['id'],
                 protocol='icmp', port_range_min=pmin, port_range_max=pmax,
                 direction='ingress', ethertype=self.ethertype)
@@ -163,7 +172,7 @@
         # Create security group named 'default', it should be failed.
         name = 'default'
         self.assertRaises(lib_exc.Conflict,
-                          self.client.create_security_group,
+                          self.security_groups_client.create_security_group,
                           name=name)
 
     @test.attr(type=['negative'])
@@ -175,7 +184,7 @@
         min_port = 66
         max_port = 67
         # Create a rule with valid params
-        self.client.create_security_group_rule(
+        self.security_group_rules_client.create_security_group_rule(
             security_group_id=body['security_group']['id'],
             direction='ingress',
             ethertype=self.ethertype,
@@ -186,7 +195,8 @@
 
         # Try creating the same security group rule, it should fail
         self.assertRaises(
-            lib_exc.Conflict, self.client.create_security_group_rule,
+            lib_exc.Conflict,
+            self.security_group_rules_client.create_security_group_rule,
             security_group_id=body['security_group']['id'],
             protocol='tcp', direction='ingress', ethertype=self.ethertype,
             port_range_min=min_port, port_range_max=max_port)
@@ -196,10 +206,11 @@
     def test_create_security_group_rule_with_non_existent_security_group(self):
         # Create security group rules with not existing security group.
         non_existent_sg = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound,
-                          self.client.create_security_group_rule,
-                          security_group_id=non_existent_sg,
-                          direction='ingress', ethertype=self.ethertype)
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.security_group_rules_client.create_security_group_rule,
+            security_group_id=non_existent_sg,
+            direction='ingress', ethertype=self.ethertype)
 
 
 class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
@@ -220,7 +231,7 @@
             self.assertRaisesRegexp(
                 lib_exc.BadRequest,
                 "Conflicting value ethertype",
-                self.client.create_security_group_rule,
+                self.security_group_rules_client.create_security_group_rule,
                 security_group_id=group_create_body['security_group']['id'],
                 protocol='tcp', direction='ingress',
                 ethertype=pair['ethertype'],
diff --git a/tempest/api/network/test_subnetpools_extensions.py b/tempest/api/network/test_subnetpools_extensions.py
index 09478ca..e5d0462 100644
--- a/tempest/api/network/test_subnetpools_extensions.py
+++ b/tempest/api/network/test_subnetpools_extensions.py
@@ -22,9 +22,7 @@
 
 
 class SubnetPoolsTestJSON(base.BaseNetworkTest):
-    """
-    Tests the following operations in the subnetpools API using the REST client
-    for Neutron:
+    """Tests the following operations in the subnetpools API:
 
         Create a subnet pool.
         Update a subnet pool.
@@ -52,27 +50,28 @@
         subnetpool_name = data_utils.rand_name('subnetpools')
         # create subnet pool
         prefix = CONF.network.default_network
-        body = self.client.create_subnetpools(name=subnetpool_name,
-                                              prefixes=prefix)
+        body = self.subnetpools_client.create_subnetpool(name=subnetpool_name,
+                                                         prefixes=prefix)
         subnetpool_id = body["subnetpool"]["id"]
         self.addCleanup(self._cleanup_subnetpools, subnetpool_id)
         self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
         # get detail about subnet pool
-        body = self.client.show_subnetpools(subnetpool_id)
+        body = self.subnetpools_client.show_subnetpool(subnetpool_id)
         self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
         # update the subnet pool
         subnetpool_name = data_utils.rand_name('subnetpools_update')
-        body = self.client.update_subnetpools(subnetpool_id,
-                                              name=subnetpool_name)
+        body = self.subnetpools_client.update_subnetpool(subnetpool_id,
+                                                         name=subnetpool_name)
         self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
         # delete subnet pool
-        body = self.client.delete_subnetpools(subnetpool_id)
-        self.assertRaises(lib_exc.NotFound, self.client.show_subnetpools,
+        body = self.subnetpools_client.delete_subnetpool(subnetpool_id)
+        self.assertRaises(lib_exc.NotFound,
+                          self.subnetpools_client.show_subnetpool,
                           subnetpool_id)
 
     def _cleanup_subnetpools(self, subnetpool_id):
         # this is used to cleanup the resources
         try:
-            self.client.delete_subnetpools(subnetpool_id)
+            self.subnetpools_client.delete_subnetpool(subnetpool_id)
         except lib_exc.NotFound:
             pass
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 41a7d65..2621581 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -90,10 +90,8 @@
                 pass
 
     def assertHeaders(self, resp, target, method):
-        """
-        Common method to check the existence and the format of common response
-        headers
-        """
+        """Check the existence and the format of response headers"""
+
         self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
                         target, method))
         self.assertThat(resp, custom_matchers.AreAllWellFormatted())
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index 78707d8..0f6a330 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -92,8 +92,7 @@
     @test.idempotent_id('63f51f9f-5f1d-4fc6-b5be-d454d70949d6')
     @test.requires_ext(extension='account_quotas', service='object')
     def test_admin_modify_quota(self):
-        """Test that the ResellerAdmin is able to modify and remove the quota
-        on a user's account.
+        """Test ResellerAdmin can modify/remove the quota on a user's account
 
         Using the account client, the test modifies the quota
         successively to:
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 2bf331a..aee17d3 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -83,9 +83,7 @@
     @test.idempotent_id('d1dc5076-555e-4e6d-9697-28f1fe976324')
     @test.requires_ext(extension='account_quotas', service='object')
     def test_user_modify_quota(self):
-        """Test that a user is not able to modify or remove a quota on
-        its account.
-        """
+        """Test that a user cannot modify or remove a quota on its account."""
 
         # Not able to remove quota
         self.assertRaises(lib_exc.Forbidden,
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index d64efee..e8b035b 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -545,7 +545,7 @@
         self.assertTrue(resp['etag'].strip('\"').isalnum())
         self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
         self.assertNotEqual(len(resp['content-type']), 0)
-        self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+        self.assertTrue(re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*",
                                  resp['x-trans-id']))
         self.assertNotEqual(len(resp['date']), 0)
         self.assertEqual(resp['accept-ranges'], 'bytes')
@@ -637,7 +637,7 @@
         self.assertTrue(resp['etag'].strip('\"').isalnum())
         self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
         self.assertNotEqual(len(resp['content-type']), 0)
-        self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+        self.assertTrue(re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*",
                                  resp['x-trans-id']))
         self.assertNotEqual(len(resp['date']), 0)
         self.assertEqual(resp['accept-ranges'], 'bytes')
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 4968835..f833bf3 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -12,7 +12,6 @@
 
 import os.path
 
-from oslo_log import log as logging
 from tempest_lib import exceptions as lib_exc
 import yaml
 
@@ -22,8 +21,6 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class BaseOrchestrationTest(tempest.test.BaseTestCase):
     """Base test case class for all Orchestration API tests."""
@@ -37,12 +34,6 @@
             raise cls.skipException("Heat support is required")
 
     @classmethod
-    def setup_credentials(cls):
-        super(BaseOrchestrationTest, cls).setup_credentials()
-        stack_owner_role = CONF.orchestration.stack_owner_role
-        cls.os = cls.get_client_manager(roles=[stack_owner_role])
-
-    @classmethod
     def setup_clients(cls):
         super(BaseOrchestrationTest, cls).setup_clients()
         cls.orchestration_client = cls.os.orchestration_client
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
index 0416bc7..9d2b425 100644
--- a/tempest/api/orchestration/stacks/test_environment.py
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -10,8 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import logging
-
 from tempest.api.orchestration import base
 from tempest.common.utils import data_utils
 from tempest import config
@@ -19,7 +17,6 @@
 
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class StackEnvironmentTest(base.BaseOrchestrationTest):
diff --git a/tempest/api/orchestration/stacks/test_limits.py b/tempest/api/orchestration/stacks/test_limits.py
index bb5b89d..2acf97b 100644
--- a/tempest/api/orchestration/stacks/test_limits.py
+++ b/tempest/api/orchestration/stacks/test_limits.py
@@ -10,8 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import logging
-
 from tempest_lib import exceptions as lib_exc
 
 from tempest.api.orchestration import base
@@ -21,8 +19,6 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class TestServerStackLimits(base.BaseOrchestrationTest):
 
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index 070150d..3c9dcb1 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -16,7 +16,6 @@
 import netaddr
 
 from tempest.api.orchestration import base
-from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
@@ -36,15 +35,11 @@
             raise cls.skipException("Neutron support is required")
 
     @classmethod
-    def setup_credentials(cls):
-        super(NeutronResourcesTestJSON, cls).setup_credentials()
-        cls.os = clients.Manager()
-
-    @classmethod
     def setup_clients(cls):
         super(NeutronResourcesTestJSON, cls).setup_clients()
         cls.network_client = cls.os.network_client
         cls.subnets_client = cls.os.subnets_client
+        cls.ports_client = cls.os.ports_client
 
     @classmethod
     def resource_setup(cls):
@@ -88,7 +83,7 @@
                 server_id = body['physical_resource_id']
                 LOG.debug('Console output for %s', server_id)
                 output = cls.servers_client.get_console_output(
-                    server_id, None)['output']
+                    server_id)['output']
                 LOG.debug(output)
             raise e
 
@@ -164,7 +159,7 @@
         router_id = self.test_resources.get('Router')['physical_resource_id']
         network_id = self.test_resources.get('Network')['physical_resource_id']
         subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
-        body = self.network_client.list_ports()
+        body = self.ports_client.list_ports()
         ports = body['ports']
         router_ports = filter(lambda port: port['device_id'] ==
                               router_id, ports)
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index e37587c..3be5bb6 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -10,8 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import logging
-
 from tempest.api.orchestration import base
 from tempest.common.utils import data_utils
 from tempest import config
@@ -19,8 +17,6 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class StacksTestJSON(base.BaseOrchestrationTest):
 
@@ -91,8 +87,7 @@
 
     @test.idempotent_id('c951d55e-7cce-4c1f-83a0-bad735437fa6')
     def test_list_resources(self):
-        """Getting list of created resources for the stack should be possible.
-        """
+        """Get list of created resources for the stack should be possible."""
         resources = self.list_resources(self.stack_identifier)
         self.assertEqual({self.resource_name: self.resource_type}, resources)
 
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index b4d7fa0..0400e76 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -10,17 +10,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-import logging
-
 from tempest.api.orchestration import base
 from tempest.common.utils import data_utils
 from tempest import test
 
 
-LOG = logging.getLogger(__name__)
-
-
 class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
     _tpl_type = 'yaml'
     _resource = 'resources'
diff --git a/tempest/api/orchestration/stacks/test_soft_conf.py b/tempest/api/orchestration/stacks/test_soft_conf.py
index 34d93e4..ab45929 100644
--- a/tempest/api/orchestration/stacks/test_soft_conf.py
+++ b/tempest/api/orchestration/stacks/test_soft_conf.py
@@ -10,7 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from tempest_lib import exceptions as lib_exc
 
 from tempest.api.orchestration import base
@@ -18,7 +17,6 @@
 from tempest import config
 from tempest import test
 
-LOG = logging.getLogger(__name__)
 CONF = config.CONF
 
 
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index f766b00..28463ab 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -10,16 +10,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest.api.orchestration import base
 from tempest.common.utils import data_utils
 from tempest import test
 
 
-LOG = logging.getLogger(__name__)
-
-
 class StacksTestJSON(base.BaseOrchestrationTest):
     empty_template = "HeatTemplateFormatVersion: '2012-12-12'\n"
 
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index c0f1c4b..fea5e37 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -30,14 +30,6 @@
             raise cls.skipException("Swift support is required")
 
     @classmethod
-    def setup_credentials(cls):
-        super(SwiftResourcesTestJSON, cls).setup_credentials()
-        stack_owner_role = CONF.orchestration.stack_owner_role
-        operator_role = CONF.object_storage.operator_role
-        cls.os = cls.get_client_manager(
-            roles=[stack_owner_role, operator_role])
-
-    @classmethod
     def setup_clients(cls):
         super(SwiftResourcesTestJSON, cls).setup_clients()
         cls.account_client = cls.os.account_client
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index ae9a411..e51551b 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -10,8 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import logging
-
 from tempest_lib import exceptions as lib_exc
 
 from tempest.api.orchestration import base
@@ -21,7 +19,6 @@
 
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class CinderResourcesTest(base.BaseOrchestrationTest):
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 8f07614..ff06810 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -17,6 +17,7 @@
 
 from tempest.common import compute
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import exceptions
 import tempest.test
@@ -61,18 +62,9 @@
         cls.glance_v2_notifications = ['image.download', 'image.serve']
 
         cls.server_ids = []
-        cls.alarm_ids = []
         cls.image_ids = []
 
     @classmethod
-    def create_alarm(cls, **kwargs):
-        body = cls.telemetry_client.create_alarm(
-            name=data_utils.rand_name('telemetry_alarm'),
-            type='threshold', **kwargs)
-        cls.alarm_ids.append(body['alarm_id'])
-        return body
-
-    @classmethod
     def create_server(cls):
         tenant_network = cls.get_tenant_network()
         body, server = compute.create_test_server(
@@ -84,10 +76,11 @@
         return body
 
     @classmethod
-    def create_image(cls, client):
-        body = client.create_image(
-            data_utils.rand_name('image'), container_format='bare',
-            disk_format='raw', visibility='private')
+    def create_image(cls, client, **kwargs):
+        body = client.create_image(name=data_utils.rand_name('image'),
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   **kwargs)
         # TODO(jswarren) Move ['image'] up to initial body value assignment
         # once both v1 and v2 glance clients include the full response
         # object.
@@ -105,15 +98,20 @@
                 pass
 
     @classmethod
+    def wait_for_server_termination(cls, server_id):
+        waiters.wait_for_server_termination(cls.servers_client,
+                                            server_id)
+
+    @classmethod
     def resource_cleanup(cls):
-        cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
         cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
+        cls.cleanup_resources(cls.wait_for_server_termination, cls.server_ids)
         cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids)
         super(BaseTelemetryTest, cls).resource_cleanup()
 
     def await_samples(self, metric, query):
-        """
-        This method is to wait for sample to add it to database.
+        """This method is to wait for sample to add it to database.
+
         There are long time delays when using Postgresql (or Mysql)
         database as ceilometer backend
         """
@@ -153,3 +151,46 @@
         raise exceptions.TimeoutException(
             'Event with query:%s has not been added to the '
             'database within %d seconds' % (query, CONF.compute.build_timeout))
+
+
+class BaseAlarmingTest(tempest.test.BaseTestCase):
+    """Base test case class for all Alarming API tests."""
+
+    credentials = ['primary']
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseAlarmingTest, cls).skip_checks()
+        if not CONF.service_available.aodh:
+            raise cls.skipException("Aodh support is required")
+
+    @classmethod
+    def setup_clients(cls):
+        super(BaseAlarmingTest, cls).setup_clients()
+        cls.alarming_client = cls.os.alarming_client
+
+    @classmethod
+    def resource_setup(cls):
+        super(BaseAlarmingTest, cls).resource_setup()
+        cls.alarm_ids = []
+
+    @classmethod
+    def create_alarm(cls, **kwargs):
+        body = cls.alarming_client.create_alarm(
+            name=data_utils.rand_name('telemetry_alarm'),
+            type='threshold', **kwargs)
+        cls.alarm_ids.append(body['alarm_id'])
+        return body
+
+    @staticmethod
+    def cleanup_resources(method, list_of_ids):
+        for resource_id in list_of_ids:
+            try:
+                method(resource_id)
+            except lib_exc.NotFound:
+                pass
+
+    @classmethod
+    def resource_cleanup(cls):
+        cls.cleanup_resources(cls.alarming_client.delete_alarm, cls.alarm_ids)
+        super(BaseAlarmingTest, cls).resource_cleanup()
diff --git a/tempest/api/telemetry/test_telemetry_alarming_api.py b/tempest/api/telemetry/test_alarming_api.py
similarity index 80%
rename from tempest/api/telemetry/test_telemetry_alarming_api.py
rename to tempest/api/telemetry/test_alarming_api.py
index 6c84b98..daa0939 100644
--- a/tempest/api/telemetry/test_telemetry_alarming_api.py
+++ b/tempest/api/telemetry/test_alarming_api.py
@@ -17,7 +17,7 @@
 from tempest import test
 
 
-class TelemetryAlarmingAPITestJSON(base.BaseTelemetryTest):
+class TelemetryAlarmingAPITestJSON(base.BaseAlarmingTest):
 
     @classmethod
     def resource_setup(cls):
@@ -32,7 +32,7 @@
     @test.idempotent_id('1c918e06-210b-41eb-bd45-14676dd77cd6')
     def test_alarm_list(self):
         # List alarms
-        alarm_list = self.telemetry_client.list_alarms()
+        alarm_list = self.alarming_client.list_alarms()
 
         # Verify created alarm in the list
         fetched_ids = [a['alarm_id'] for a in alarm_list]
@@ -46,7 +46,7 @@
     def test_create_update_get_delete_alarm(self):
         # Create an alarm
         alarm_name = data_utils.rand_name('telemetry_alarm')
-        body = self.telemetry_client.create_alarm(
+        body = self.alarming_client.create_alarm(
             name=alarm_name, type='threshold', threshold_rule=self.rule)
         self.assertEqual(alarm_name, body['name'])
         alarm_id = body['alarm_id']
@@ -57,7 +57,7 @@
                     'threshold': 70.0,
                     'period': 60}
         alarm_name_updated = data_utils.rand_name('telemetry-alarm-update')
-        body = self.telemetry_client.update_alarm(
+        body = self.alarming_client.update_alarm(
             alarm_id,
             threshold_rule=new_rule,
             name=alarm_name_updated,
@@ -65,19 +65,19 @@
         self.assertEqual(alarm_name_updated, body['name'])
         self.assertDictContainsSubset(new_rule, body['threshold_rule'])
         # Get and verify details of an alarm after update
-        body = self.telemetry_client.show_alarm(alarm_id)
+        body = self.alarming_client.show_alarm(alarm_id)
         self.assertEqual(alarm_name_updated, body['name'])
         self.assertDictContainsSubset(new_rule, body['threshold_rule'])
         # Get history for the alarm and verify the same
-        body = self.telemetry_client.show_alarm_history(alarm_id)
+        body = self.alarming_client.show_alarm_history(alarm_id)
         self.assertEqual("rule change", body[0]['type'])
         self.assertIn(alarm_name_updated, body[0]['detail'])
         self.assertEqual("creation", body[1]['type'])
         self.assertIn(alarm_name, body[1]['detail'])
         # Delete alarm and verify if deleted
-        self.telemetry_client.delete_alarm(alarm_id)
+        self.alarming_client.delete_alarm(alarm_id)
         self.assertRaises(lib_exc.NotFound,
-                          self.telemetry_client.show_alarm, alarm_id)
+                          self.alarming_client.show_alarm, alarm_id)
 
     @test.idempotent_id('aca49486-70bb-4016-87e0-f6131374f741')
     def test_set_get_alarm_state(self):
@@ -86,11 +86,11 @@
         # Set alarm state and verify
         new_state =\
             [elem for elem in alarm_states if elem != alarm['state']][0]
-        state = self.telemetry_client.alarm_set_state(alarm['alarm_id'],
-                                                      new_state)
+        state = self.alarming_client.alarm_set_state(alarm['alarm_id'],
+                                                     new_state)
         self.assertEqual(new_state, state.data)
         # Get alarm state and verify
-        state = self.telemetry_client.show_alarm_state(alarm['alarm_id'])
+        state = self.alarming_client.show_alarm_state(alarm['alarm_id'])
         self.assertEqual(new_state, state.data)
 
     @test.idempotent_id('08d7e45a-1344-4e5c-ba6f-f6cbb77f55b9')
@@ -99,13 +99,13 @@
                 "operator": "or"}
         # Verifies alarm create
         alarm_name = data_utils.rand_name('combination_alarm')
-        body = self.telemetry_client.create_alarm(name=alarm_name,
-                                                  combination_rule=rule,
-                                                  type='combination')
+        body = self.alarming_client.create_alarm(name=alarm_name,
+                                                 combination_rule=rule,
+                                                 type='combination')
         self.assertEqual(alarm_name, body['name'])
         alarm_id = body['alarm_id']
         self.assertDictContainsSubset(rule, body['combination_rule'])
         # Verify alarm delete
-        self.telemetry_client.delete_alarm(alarm_id)
+        self.alarming_client.delete_alarm(alarm_id)
         self.assertRaises(lib_exc.NotFound,
-                          self.telemetry_client.show_alarm, alarm_id)
+                          self.alarming_client.show_alarm, alarm_id)
diff --git a/tempest/api/telemetry/test_alarming_api_negative.py b/tempest/api/telemetry/test_alarming_api_negative.py
index 7d5a0bf..e945556 100644
--- a/tempest/api/telemetry/test_alarming_api_negative.py
+++ b/tempest/api/telemetry/test_alarming_api_negative.py
@@ -20,9 +20,9 @@
 import uuid
 
 
-class TelemetryAlarmingNegativeTest(base.BaseTelemetryTest):
-    """here we have negative tests for show_alarm, update_alarm, show_alarm_history
-       Tests
+class TelemetryAlarmingNegativeTest(base.BaseAlarmingTest):
+    """Negative tests for show_alarm, update_alarm, show_alarm_history tests
+
         ** show non-existent alarm
         ** show the deleted alarm
         ** delete deleted alarm
@@ -34,7 +34,7 @@
     def test_get_non_existent_alarm(self):
         # get the non-existent alarm
         non_existent_id = str(uuid.uuid4())
-        self.assertRaises(lib_exc.NotFound, self.telemetry_client.show_alarm,
+        self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm,
                           non_existent_id)
 
     @test.attr(type=['negative'])
@@ -46,14 +46,14 @@
                 'comparison_operator': 'eq',
                 'threshold': 100.0,
                 'period': 90}
-        body = self.telemetry_client.create_alarm(
+        body = self.alarming_client.create_alarm(
             name=alarm_name,
             type='threshold',
             threshold_rule=rule)
         alarm_id = body['alarm_id']
-        self.telemetry_client.delete_alarm(alarm_id)
+        self.alarming_client.delete_alarm(alarm_id)
         # get the deleted alarm
-        self.assertRaises(lib_exc.NotFound, self.telemetry_client.show_alarm,
+        self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm,
                           alarm_id)
 
         # update the deleted alarm
@@ -62,10 +62,10 @@
                         'comparison_operator': 'eq',
                         'threshold': 70,
                         'period': 50}
-        self.assertRaises(lib_exc.NotFound, self.telemetry_client.update_alarm,
+        self.assertRaises(lib_exc.NotFound, self.alarming_client.update_alarm,
                           alarm_id, threshold_rule=updated_rule,
                           name=updated_alarm_name,
                           type='threshold')
         # delete the deleted alarm
-        self.assertRaises(lib_exc.NotFound, self.telemetry_client.delete_alarm,
+        self.assertRaises(lib_exc.NotFound, self.alarming_client.delete_alarm,
                           alarm_id)
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 31eff9d..a575125 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -39,7 +39,7 @@
     @testtools.skipIf(not CONF.image_feature_enabled.api_v1,
                       "Glance api v1 is disabled")
     def test_check_glance_v1_notifications(self):
-        body = self.create_image(self.image_client)
+        body = self.create_image(self.image_client, is_public=False)
         self.image_client.update_image(body['id'], data='data')
 
         query = 'resource', 'eq', body['id']
@@ -55,10 +55,10 @@
     @testtools.skipIf(not CONF.image_feature_enabled.api_v2,
                       "Glance api v2 is disabled")
     def test_check_glance_v2_notifications(self):
-        body = self.create_image(self.image_client_v2)
+        body = self.create_image(self.image_client_v2, visibility='private')
 
         self.image_client_v2.store_image_file(body['id'], "file")
-        self.image_client_v2.load_image_file(body['id'])
+        self.image_client_v2.show_image_file(body['id'])
 
         query = 'resource', 'eq', body['id']
 
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 4337922..60e6e6c 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -10,8 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
+import six
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
 from tempest import config
@@ -19,8 +18,6 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class VolumeMultiBackendV2Test(base.BaseVolumeAdminTest):
 
@@ -34,9 +31,14 @@
     @classmethod
     def resource_setup(cls):
         super(VolumeMultiBackendV2Test, cls).resource_setup()
-
-        cls.backend1_name = CONF.volume.backend1_name
-        cls.backend2_name = CONF.volume.backend2_name
+        # support 2 backends names, deprecated_for_removal.
+        # keep support 2 backend names, in case they are not empty
+        if CONF.volume.backend1_name and CONF.volume.backend2_name:
+            cls.backend_names = {CONF.volume.backend1_name,
+                                 CONF.volume.backend2_name}
+        else:
+            # read backend name from a list .
+            cls.backend_names = set(CONF.volume.backend_names)
 
         cls.name_field = cls.special_fields['name_field']
         cls.volume_type_id_list = []
@@ -44,15 +46,15 @@
         cls.volume_id_list_without_prefix = []
 
         # Volume/Type creation (uses volume_backend_name)
-        cls._create_type_and_volume(cls.backend1_name, False)
-        # Volume/Type creation (uses capabilities:volume_backend_name)
-        cls._create_type_and_volume(cls.backend1_name, True)
-
-        if cls.backend1_name != cls.backend2_name:
-            # Volume/Type creation (uses backend2_name)
-            cls._create_type_and_volume(cls.backend2_name, False)
+        # It is not allowed to create the same backend name twice
+        if len(cls.backend_names) < 2:
+            raise cls.skipException("Requires at least two different "
+                                    "backend names")
+        for backend_name in cls.backend_names:
+            # Volume/Type creation (uses backend_name)
+            cls._create_type_and_volume(backend_name, False)
             # Volume/Type creation (uses capabilities:volume_backend_name)
-            cls._create_type_and_volume(cls.backend2_name, True)
+            cls._create_type_and_volume(backend_name, True)
 
     @classmethod
     def _create_type_and_volume(self, backend_name_key, with_prefix):
@@ -66,7 +68,7 @@
         else:
             extra_specs = {spec_key_without_prefix: backend_name_key}
         self.type = self.volume_types_client.create_volume_type(
-            type_name, extra_specs=extra_specs)['volume_type']
+            name=type_name, extra_specs=extra_specs)['volume_type']
         self.volume_type_id_list.append(self.type['id'])
 
         params = {self.name_field: vol_name, 'volume_type': type_name}
@@ -104,32 +106,28 @@
     @test.idempotent_id('c1a41f3f-9dad-493e-9f09-3ff197d477cc')
     def test_backend_name_reporting(self):
         # get volume id which created by type without prefix
-        volume_id = self.volume_id_list_without_prefix[0]
-        self._test_backend_name_reporting_by_volume_id(volume_id)
+        for volume_id in self.volume_id_list_without_prefix:
+            self._test_backend_name_reporting_by_volume_id(volume_id)
 
     @test.idempotent_id('f38e647f-ab42-4a31-a2e7-ca86a6485215')
     def test_backend_name_reporting_with_prefix(self):
         # get volume id which created by type with prefix
-        volume_id = self.volume_id_list_with_prefix[0]
-        self._test_backend_name_reporting_by_volume_id(volume_id)
+        for volume_id in self.volume_id_list_with_prefix:
+            self._test_backend_name_reporting_by_volume_id(volume_id)
 
     @test.idempotent_id('46435ab1-a0af-4401-8373-f14e66b0dd58')
     def test_backend_name_distinction(self):
-        if self.backend1_name == self.backend2_name:
-            raise self.skipException("backends configured with same name")
-        # get volume id which created by type without prefix
-        volume1_id = self.volume_id_list_without_prefix[0]
-        volume2_id = self.volume_id_list_without_prefix[1]
-        self._test_backend_name_distinction(volume1_id, volume2_id)
+        # get volume ids which created by type without prefix
+        self._test_backend_name_distinction(self.volume_id_list_without_prefix)
 
     @test.idempotent_id('4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed')
     def test_backend_name_distinction_with_prefix(self):
-        if self.backend1_name == self.backend2_name:
-            raise self.skipException("backends configured with same name")
-        # get volume id which created by type without prefix
-        volume1_id = self.volume_id_list_with_prefix[0]
-        volume2_id = self.volume_id_list_with_prefix[1]
-        self._test_backend_name_distinction(volume1_id, volume2_id)
+        # get volume ids which created by type without prefix
+        self._test_backend_name_distinction(self.volume_id_list_with_prefix)
+
+    def _get_volume_host(self, volume_id):
+        return self.admin_volume_client.show_volume(
+            volume_id)['volume']['os-vol-host-attr:host']
 
     def _test_backend_name_reporting_by_volume_id(self, volume_id):
         # this test checks if os-vol-attr:host is populated correctly after
@@ -143,19 +141,16 @@
                volume_id)
         self.assertTrue(len(volume1_host.split("@")) > 1, msg)
 
-    def _test_backend_name_distinction(self, volume1_id, volume2_id):
-        # this test checks that the two volumes created at setUp don't
+    def _test_backend_name_distinction(self, volume_id_list):
+        # this test checks that the volumes created at setUp don't
         # belong to the same backend (if they are, than the
         # volume backend distinction is not working properly)
-        volume = self.admin_volume_client.show_volume(volume1_id)['volume']
-        volume1_host = volume['os-vol-host-attr:host']
-
-        volume = self.admin_volume_client.show_volume(volume2_id)['volume']
-        volume2_host = volume['os-vol-host-attr:host']
-
-        msg = ("volumes %s and %s were created in the same backend" %
-               (volume1_id, volume2_id))
-        self.assertNotEqual(volume1_host, volume2_host, msg)
+        volume_hosts = [self._get_volume_host(volume) for volume in
+                        volume_id_list]
+        # assert that volumes are each created on separate hosts:
+        msg = ("volumes %s were created in the same backend" % ", "
+               .join(volume_hosts))
+        six.assertCountEqual(self, volume_hosts, set(volume_hosts), msg)
 
 
 class VolumeMultiBackendV1Test(VolumeMultiBackendV2Test):
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index aa6bfdf..f2bf613 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -50,7 +50,7 @@
         snap_name = data_utils.rand_name(cls.__name__ + '-Snapshot')
         params = {cls.name_field: snap_name}
         cls.snapshot = cls.client.create_snapshot(
-            cls.volume['id'], **params)['snapshot']
+            volume_id=cls.volume['id'], **params)['snapshot']
         cls.client.wait_for_snapshot_status(cls.snapshot['id'],
                                             'available')
 
@@ -77,7 +77,7 @@
     def _create_reset_and_force_delete_temp_snapshot(self, status=None):
         # Create snapshot, reset snapshot status,
         # and force delete temp snapshot
-        temp_snapshot = self.create_snapshot(self.volume['id'])
+        temp_snapshot = self.create_snapshot(volume_id=self.volume['id'])
         if status:
             self.admin_snapshots_client.\
                 reset_snapshot_status(temp_snapshot['id'], status)
@@ -110,7 +110,7 @@
         status = 'error'
         progress_alias = self._get_progress_alias()
         self.client.update_snapshot_status(self.snapshot['id'],
-                                           status, progress)
+                                           status=status, progress=progress)
         snapshot_get = self.admin_snapshots_client.show_snapshot(
             self.snapshot['id'])['snapshot']
         self.assertEqual(status, snapshot_get['status'])
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
index 74fffb9..2b7ee45 100644
--- a/tempest/api/volume/admin/test_volume_services.py
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -18,8 +18,8 @@
 
 
 class VolumesServicesV2TestJSON(base.BaseVolumeAdminTest):
-    """
-    Tests Volume Services API.
+    """Tests Volume Services API.
+
     volume service list requires admin privileges.
     """
 
@@ -39,9 +39,8 @@
 
     @test.idempotent_id('63a3e1ca-37ee-4983-826d-83276a370d25')
     def test_get_service_by_service_binary_name(self):
-        params = {'binary': self.binary_name}
-        services = (self.admin_volume_services_client.list_services(params)
-                    ['services'])
+        services = (self.admin_volume_services_client.list_services(
+            binary=self.binary_name)['services'])
         self.assertNotEqual(0, len(services))
         for service in services:
             self.assertEqual(self.binary_name, service['binary'])
@@ -50,10 +49,9 @@
     def test_get_service_by_host_name(self):
         services_on_host = [service for service in self.services if
                             service['host'] == self.host_name]
-        params = {'host': self.host_name}
 
-        services = (self.admin_volume_services_client.list_services(params)
-                    ['services'])
+        services = (self.admin_volume_services_client.list_services(
+            host=self.host_name)['services'])
 
         # we could have a periodic job checkin between the 2 service
         # lookups, so only compare binary lists.
@@ -65,10 +63,10 @@
 
     @test.idempotent_id('ffa6167c-4497-4944-a464-226bbdb53908')
     def test_get_service_by_service_and_host_name(self):
-        params = {'host': self.host_name, 'binary': self.binary_name}
 
-        services = (self.admin_volume_services_client.list_services(params)
-                    ['services'])
+        services = (self.admin_volume_services_client.list_services(
+            host=self.host_name, binary=self.binary_name))['services']
+
         self.assertEqual(1, len(services))
         self.assertEqual(self.host_name, services[0]['host'])
         self.assertEqual(self.binary_name, services[0]['binary'])
diff --git a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
index ce0b618..c66207f 100644
--- a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
@@ -52,14 +52,14 @@
         # NOTE(gfidente): no need to delete in tearDown as
         # they are created using utility wrapper methods.
         cls.volume = cls.create_volume()
-        cls.snapshot = cls.create_snapshot(cls.volume['id'])
+        cls.snapshot = cls.create_snapshot(volume_id=cls.volume['id'])
 
     @test.attr(type='negative')
     @test.idempotent_id('02bbf63f-6c05-4357-9d98-2926a94064ff')
     def test_quota_volume_snapshots(self):
         self.assertRaises(lib_exc.OverLimit,
                           self.snapshots_client.create_snapshot,
-                          self.volume['id'])
+                          volume_id=self.volume['id'])
 
     @test.attr(type='negative')
     @test.idempotent_id('c99a1ca9-6cdf-498d-9fdf-25832babef27')
@@ -74,7 +74,7 @@
             **new_quota_set)
         self.assertRaises(lib_exc.OverLimit,
                           self.snapshots_client.create_snapshot,
-                          self.volume['id'])
+                          volume_id=self.volume['id'])
 
 
 class VolumeSnapshotNegativeV1TestJSON(VolumeSnapshotQuotasNegativeV2TestJSON):
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 2d9019a..c032d9c 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -32,7 +32,7 @@
 
     @test.idempotent_id('9d9b28e3-1b2e-4483-a2cc-24aa0ea1de54')
     def test_volume_type_list(self):
-        # List Volume types.
+        # List volume types.
         body = self.volume_types_client.list_volume_types()['volume_types']
         self.assertIsInstance(body, list)
 
@@ -50,7 +50,7 @@
         for i in range(2):
             vol_type_name = data_utils.rand_name("volume-type")
             vol_type = self.volume_types_client.create_volume_type(
-                vol_type_name,
+                name=vol_type_name,
                 extra_specs=extra_specs)['volume_type']
             volume_types.append(vol_type)
             self.addCleanup(self._delete_volume_type, vol_type['id'])
@@ -70,7 +70,7 @@
 
         # Update volume with new volume_type
         self.volumes_client.retype_volume(volume['id'],
-                                          volume_type=volume_types[1]['id'])
+                                          new_type=volume_types[1]['id'])
         self.volumes_client.wait_for_volume_status(volume['id'], 'available')
 
         # Get volume details and Verify
@@ -97,7 +97,7 @@
         extra_specs = {"storage_protocol": proto,
                        "vendor_name": vendor}
         body = self.volume_types_client.create_volume_type(
-            name,
+            name=name,
             extra_specs=extra_specs)['volume_type']
         self.assertIn('id', body)
         self.addCleanup(self._delete_volume_type, body['id'])
@@ -125,7 +125,8 @@
         provider = "LuksEncryptor"
         control_location = "front-end"
         name = data_utils.rand_name("volume-type")
-        body = self.volume_types_client.create_volume_type(name)['volume_type']
+        body = self.volume_types_client.create_volume_type(
+            name=name)['volume_type']
         self.addCleanup(self._delete_volume_type, body['id'])
 
         # Create encryption type
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index bec803c..502cd86 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -25,7 +25,7 @@
         super(VolumeTypesExtraSpecsV2Test, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type')
         cls.volume_type = cls.volume_types_client.create_volume_type(
-            vol_type_name)['volume_type']
+            name=vol_type_name)['volume_type']
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 040ef53..6483af3 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -30,7 +30,7 @@
         vol_type_name = data_utils.rand_name('Volume-type')
         cls.extra_specs = {"spec1": "val1"}
         cls.volume_type = cls.volume_types_client.create_volume_type(
-            vol_type_name,
+            name=vol_type_name,
             extra_specs=cls.extra_specs)['volume_type']
 
     @classmethod
@@ -70,7 +70,7 @@
     def test_update_multiple_extra_spec(self):
         # Should not update volume type extra specs with multiple specs as
             # body.
-        extra_spec = {"spec1": "val2", 'spec2': 'val1'}
+        extra_spec = {"spec1": "val2", "spec2": "val1"}
         self.assertRaises(
             lib_exc.BadRequest,
             self.volume_types_client.update_volume_type_extra_specs,
@@ -101,7 +101,7 @@
         self.assertRaises(
             lib_exc.BadRequest,
             self.volume_types_client.create_volume_type_extra_specs,
-            self.volume_type['id'], ['invalid'])
+            self.volume_type['id'], extra_specs=['invalid'])
 
     @test.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
     def test_delete_nonexistent_volume_type_id(self):
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 2694b63..bc32fc9 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -36,7 +36,7 @@
     def test_create_with_empty_name(self):
         # Should not be able to create volume type with an empty name.
         self.assertRaises(lib_exc.BadRequest,
-                          self.volume_types_client.create_volume_type, '')
+                          self.volume_types_client.create_volume_type, name='')
 
     @test.idempotent_id('994610d6-0476-4018-a644-a2602ef5d4aa')
     def test_get_nonexistent_type_id(self):
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 6c32321..253a3e1 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -48,12 +48,12 @@
     def _reset_volume_status(self, volume_id, status):
         # Reset the volume status
         body = self.admin_volume_client.reset_volume_status(volume_id,
-                                                            status)
+                                                            status=status)
         return body
 
     def tearDown(self):
         # Set volume's status to available after test
-        self._reset_volume_status(self.volume['id'], 'available')
+        self._reset_volume_status(self.volume['id'], status='available')
         super(VolumesActionsV2Test, self).tearDown()
 
     def _create_temp_volume(self):
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 0399413..4b2d3f3 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from tempest_lib import decorators
 
 from tempest.api.volume import base
@@ -22,7 +21,6 @@
 from tempest import test
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class VolumesBackupsV2Test(base.BaseVolumeAdminTest):
@@ -48,7 +46,7 @@
         # Create backup
         backup_name = data_utils.rand_name('Backup')
         create_backup = self.backups_adm_client.create_backup
-        backup = create_backup(self.volume['id'],
+        backup = create_backup(volume_id=self.volume['id'],
                                name=backup_name)['backup']
         self.addCleanup(self.backups_adm_client.delete_backup,
                         backup['id'])
@@ -85,9 +83,8 @@
     def test_volume_backup_export_import(self):
         # Create backup
         backup_name = data_utils.rand_name('Backup')
-        backup = (self.backups_adm_client.create_backup(self.volume['id'],
-                                                        name=backup_name)
-                  ['backup'])
+        backup = (self.backups_adm_client.create_backup(
+            volume_id=self.volume['id'], name=backup_name)['backup'])
         self.addCleanup(self._delete_backup, backup['id'])
         self.assertEqual(backup_name, backup['name'])
         self.backups_adm_client.wait_for_backup_status(backup['id'],
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 12e6761..cc906e5 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from tempest_lib import exceptions as lib_exc
 
 from tempest.common import compute
@@ -24,8 +23,6 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class BaseVolumeTest(tempest.test.BaseTestCase):
     """Base test case class for all Cinder API tests."""
@@ -62,7 +59,7 @@
         super(BaseVolumeTest, cls).setup_clients()
         cls.servers_client = cls.os.servers_client
         cls.compute_networks_client = cls.os.compute_networks_client
-        cls.images_client = cls.os.images_client
+        cls.compute_images_client = cls.os.compute_images_client
 
         if cls._api_version == 1:
             cls.snapshots_client = cls.os.snapshots_client
@@ -106,14 +103,14 @@
         super(BaseVolumeTest, cls).resource_cleanup()
 
     @classmethod
-    def create_volume(cls, size=None, **kwargs):
+    def create_volume(cls, **kwargs):
         """Wrapper utility that returns a test volume."""
         name = data_utils.rand_name('Volume')
 
         name_field = cls.special_fields['name_field']
 
         kwargs[name_field] = name
-        volume = cls.volumes_client.create_volume(size, **kwargs)['volume']
+        volume = cls.volumes_client.create_volume(**kwargs)['volume']
 
         cls.volumes.append(volume)
         cls.volumes_client.wait_for_volume_status(volume['id'], 'available')
@@ -123,7 +120,7 @@
     def create_snapshot(cls, volume_id=1, **kwargs):
         """Wrapper utility that returns a test snapshot."""
         snapshot = cls.snapshots_client.create_snapshot(
-            volume_id, **kwargs)['snapshot']
+            volume_id=volume_id, **kwargs)['snapshot']
         cls.snapshots.append(snapshot)
         cls.snapshots_client.wait_for_snapshot_status(snapshot['id'],
                                                       'available')
@@ -217,8 +214,8 @@
         """create a test Qos-Specs."""
         name = name or data_utils.rand_name(cls.__name__ + '-QoS')
         consumer = consumer or 'front-end'
-        qos_specs = cls.volume_qos_client.create_qos(name, consumer,
-                                                     **kwargs)['qos_specs']
+        qos_specs = cls.volume_qos_client.create_qos(
+            name=name, consumer=consumer, **kwargs)['qos_specs']
         cls.qos_specs.append(qos_specs['id'])
         return qos_specs
 
diff --git a/tempest/api/volume/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
index 366b8d2..fe51375 100644
--- a/tempest/api/volume/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -18,10 +18,7 @@
 
 
 class AvailabilityZoneV2TestJSON(base.BaseVolumeTest):
-
-    """
-    Tests Availability Zone V2 API List
-    """
+    """Tests Availability Zone V2 API List"""
 
     @classmethod
     def setup_clients(cls):
diff --git a/tempest/api/volume/test_qos.py b/tempest/api/volume/test_qos.py
index 2f7c3df..722a39a 100644
--- a/tempest/api/volume/test_qos.py
+++ b/tempest/api/volume/test_qos.py
@@ -53,7 +53,7 @@
     def _create_test_volume_type(self):
         vol_type_name = utils.rand_name("volume-type")
         vol_type = self.volume_types_client.create_volume_type(
-            vol_type_name)['volume_type']
+            name=vol_type_name)['volume_type']
         self.addCleanup(self.volume_types_client.delete_volume_type,
                         vol_type['id'])
         return vol_type
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index e50ca95..688baf5 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -45,7 +45,7 @@
 
     def tearDown(self):
         # Update the metadata to {}
-        self.client.update_snapshot_metadata(self.snapshot_id, {})
+        self.client.update_snapshot_metadata(self.snapshot_id, metadata={})
         super(SnapshotV2MetadataTestJSON, self).tearDown()
 
     @test.idempotent_id('a2f20f99-e363-4584-be97-bc33afb1a56c')
@@ -89,7 +89,7 @@
 
         # Update metadata item
         body = self.client.update_snapshot_metadata(
-            self.snapshot_id, update)['metadata']
+            self.snapshot_id, metadata=update)['metadata']
         # Get the metadata of the snapshot
         body = self.client.show_snapshot_metadata(
             self.snapshot_id)['metadata']
@@ -114,7 +114,7 @@
         self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))
         # Update metadata item
         body = self.client.update_snapshot_metadata_item(
-            self.snapshot_id, "key3", update_item)['meta']
+            self.snapshot_id, "key3", meta=update_item)['meta']
         # Get the metadata of the snapshot
         body = self.client.show_snapshot_metadata(
             self.snapshot_id)['metadata']
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index c0b6b7e..7046dcf 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -47,7 +47,8 @@
         self.addCleanup(self._delete_volume, volume['id'])
 
         # Create a volume transfer
-        transfer = self.client.create_volume_transfer(volume['id'])['transfer']
+        transfer = self.client.create_volume_transfer(
+            volume_id=volume['id'])['transfer']
         transfer_id = transfer['id']
         auth_key = transfer['auth_key']
         self.client.wait_for_volume_status(volume['id'],
@@ -63,8 +64,8 @@
         self.assertThat(len(body), matchers.GreaterThan(0))
 
         # Accept a volume transfer by alt_tenant
-        body = self.alt_client.accept_volume_transfer(transfer_id,
-                                                      auth_key)['transfer']
+        body = self.alt_client.accept_volume_transfer(
+            transfer_id, auth_key=auth_key)['transfer']
         self.alt_client.wait_for_volume_status(volume['id'], 'available')
 
     @test.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
@@ -74,7 +75,8 @@
         self.addCleanup(self._delete_volume, volume['id'])
 
         # Create a volume transfer
-        body = self.client.create_volume_transfer(volume['id'])['transfer']
+        body = self.client.create_volume_transfer(
+            volume_id=volume['id'])['transfer']
         transfer_id = body['id']
         self.client.wait_for_volume_status(volume['id'],
                                            'awaiting-transfer')
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index d4636ee..5f9ea7f 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -62,8 +62,8 @@
         # Volume is attached and detached successfully from an instance
         mountpoint = '/dev/vdc'
         self.client.attach_volume(self.volume['id'],
-                                  self.server['id'],
-                                  mountpoint)
+                                  instance_uuid=self.server['id'],
+                                  mountpoint=mountpoint)
         self.client.wait_for_volume_status(self.volume['id'], 'in-use')
         self.client.detach_volume(self.volume['id'])
         self.client.wait_for_volume_status(self.volume['id'], 'available')
@@ -74,7 +74,8 @@
     def test_volume_bootable(self):
         # Verify that a volume bootable flag is retrieved
         for bool_bootable in [True, False]:
-            self.client.set_bootable_volume(self.volume['id'], bool_bootable)
+            self.client.set_bootable_volume(self.volume['id'],
+                                            bootable=bool_bootable)
             fetched_volume = self.client.show_volume(
                 self.volume['id'])['volume']
             # Get Volume information
@@ -88,8 +89,8 @@
         # Verify that a volume's attachment information is retrieved
         mountpoint = '/dev/vdc'
         self.client.attach_volume(self.volume['id'],
-                                  self.server['id'],
-                                  mountpoint)
+                                  instance_uuid=self.server['id'],
+                                  mountpoint=mountpoint)
         self.client.wait_for_volume_status(self.volume['id'], 'in-use')
         # NOTE(gfidente): added in reverse order because functions will be
         # called in reverse order to the order they are added (LIFO)
@@ -114,8 +115,8 @@
         # using the Glance image_client and from Cinder via tearDownClass.
         image_name = data_utils.rand_name('Image')
         body = self.client.upload_volume(
-            self.volume['id'], image_name,
-            CONF.volume.disk_format)['os-volume_upload_image']
+            self.volume['id'], image_name=image_name,
+            disk_format=CONF.volume.disk_format)['os-volume_upload_image']
         image_id = body["image_id"]
         self.addCleanup(self.image_client.delete_image, image_id)
         self.image_client.wait_for_image_status(image_id, 'active')
@@ -142,7 +143,7 @@
         # Update volume readonly true
         readonly = True
         self.client.update_volume_readonly(self.volume['id'],
-                                           readonly)
+                                           readonly=readonly)
         # Get Volume information
         fetched_volume = self.client.show_volume(self.volume['id'])['volume']
         bool_flag = self._is_true(fetched_volume['metadata']['readonly'])
@@ -150,7 +151,8 @@
 
         # Update volume readonly false
         readonly = False
-        self.client.update_volume_readonly(self.volume['id'], readonly)
+        self.client.update_volume_readonly(self.volume['id'],
+                                           readonly=readonly)
 
         # Get Volume information
         fetched_volume = self.client.show_volume(self.volume['id'])['volume']
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 78f5571..ed1e5c5 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -32,7 +32,7 @@
         # Extend Volume Test.
         self.volume = self.create_volume()
         extend_size = int(self.volume['size']) + 1
-        self.client.extend_volume(self.volume['id'], extend_size)
+        self.client.extend_volume(self.volume['id'], new_size=extend_size)
         self.client.wait_for_volume_status(self.volume['id'], 'available')
         volume = self.client.show_volume(self.volume['id'])['volume']
         self.assertEqual(int(volume['size']), extend_size)
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 35c8898..aa3ef2f 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -133,7 +133,8 @@
     @test.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
     @test.services('image')
     def test_volume_create_get_update_delete_from_image(self):
-        image = self.images_client.show_image(CONF.compute.image_ref)['image']
+        image = self.compute_images_client.show_image(
+            CONF.compute.image_ref)['image']
         min_disk = image.get('minDisk')
         disk_size = max(min_disk, CONF.volume.volume_size)
         self._volume_create_get_update_delete(
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 620366a..38a5a80 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -15,25 +15,19 @@
 #    under the License.
 import operator
 
-from oslo_log import log as logging
 from testtools import matchers
 
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
 from tempest import test
 
-LOG = logging.getLogger(__name__)
-
 
 class VolumesV2ListTestJSON(base.BaseVolumeTest):
-
-    """
-    This test creates a number of 1G volumes. To run successfully,
-    ensure that the backing file for the volume group that Nova uses
-    has space for at least 3 1G volumes!
-    If you are running a Devstack environment, ensure that the
-    VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
-    """
+    # NOTE: This test creates a number of 1G volumes. To run successfully,
+    # ensure that the backing file for the volume group that Nova uses
+    # has space for at least 3 1G volumes!
+    # If you are running a Devstack environment, ensure that the
+    # VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
 
     VOLUME_FIELDS = ('id', 'name')
 
@@ -83,10 +77,7 @@
         super(VolumesV2ListTestJSON, cls).resource_cleanup()
 
     def _list_by_param_value_and_assert(self, params, with_detail=False):
-        """
-        Perform list or list_details action with given params
-        and validates result.
-        """
+        """list or list_details with given params and validates result"""
         if with_detail:
             fetched_vol_list = \
                 self.client.list_volumes(detail=True, params=params)['volumes']
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 0af40ea..ad6f556 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -190,8 +190,8 @@
         self.assertRaises(lib_exc.NotFound,
                           self.client.attach_volume,
                           str(uuid.uuid4()),
-                          server['id'],
-                          self.mountpoint)
+                          instance_uuid=server['id'],
+                          mountpoint=self.mountpoint)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
@@ -206,7 +206,7 @@
         # Extend volume with smaller size than original size.
         extend_size = 0
         self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
-                          self.volume['id'], extend_size)
+                          self.volume['id'], new_size=extend_size)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('5d0b480d-e833-439f-8a5a-96ad2ed6f22f')
@@ -214,7 +214,7 @@
         # Extend volume when size is non number.
         extend_size = 'abc'
         self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
-                          self.volume['id'], extend_size)
+                          self.volume['id'], new_size=extend_size)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('355218f1-8991-400a-a6bb-971239287d92')
@@ -222,7 +222,7 @@
         # Extend volume with None size.
         extend_size = None
         self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
-                          self.volume['id'], extend_size)
+                          self.volume['id'], new_size=extend_size)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
@@ -230,7 +230,7 @@
         # Extend volume size when volume is nonexistent.
         extend_size = int(self.volume['size']) + 1
         self.assertRaises(lib_exc.NotFound, self.client.extend_volume,
-                          str(uuid.uuid4()), extend_size)
+                          str(uuid.uuid4()), new_size=extend_size)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
@@ -238,7 +238,7 @@
         # Extend volume size when passing volume id is None.
         extend_size = int(self.volume['size']) + 1
         self.assertRaises(lib_exc.NotFound, self.client.extend_volume,
-                          None, extend_size)
+                          None, new_size=extend_size)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('ac6084c0-0546-45f9-b284-38a367e0e0e2')
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 9866da3..c79235a 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -10,14 +10,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import test
 
-LOG = logging.getLogger(__name__)
 CONF = config.CONF
 
 
@@ -42,17 +39,15 @@
         self.volumes_client.detach_volume(volume_id)
         self.volumes_client.wait_for_volume_status(volume_id, 'available')
 
-    def _list_by_param_values_and_assert(self, params, with_detail=False):
-        """
-        Perform list or list_details action with given params
-        and validates result.
-        """
+    def _list_by_param_values_and_assert(self, with_detail=False, **params):
+        """list or list_details with given params and validates result."""
+
         if with_detail:
             fetched_snap_list = self.snapshots_client.list_snapshots(
-                detail=True, params=params)['snapshots']
+                detail=True, **params)['snapshots']
         else:
             fetched_snap_list = self.snapshots_client.list_snapshots(
-                params=params)['snapshots']
+                **params)['snapshots']
 
         # Validating params of fetched snapshots
         for snap in fetched_snap_list:
@@ -137,16 +132,16 @@
 
         # Verify list snapshots by display_name filter
         params = {self.name_field: snapshot[self.name_field]}
-        self._list_by_param_values_and_assert(params)
+        self._list_by_param_values_and_assert(**params)
 
         # Verify list snapshots by status filter
         params = {'status': 'available'}
-        self._list_by_param_values_and_assert(params)
+        self._list_by_param_values_and_assert(**params)
 
         # Verify list snapshots by status and display name filter
         params = {'status': 'available',
                   self.name_field: snapshot[self.name_field]}
-        self._list_by_param_values_and_assert(params)
+        self._list_by_param_values_and_assert(**params)
 
     @test.idempotent_id('220a1022-1fcd-4a74-a7bd-6b859156cda2')
     def test_snapshots_list_details_with_params(self):
@@ -159,14 +154,14 @@
 
         # Verify list snapshot details by display_name filter
         params = {self.name_field: snapshot[self.name_field]}
-        self._list_by_param_values_and_assert(params, with_detail=True)
+        self._list_by_param_values_and_assert(with_detail=True, **params)
         # Verify list snapshot details by status filter
         params = {'status': 'available'}
-        self._list_by_param_values_and_assert(params, with_detail=True)
+        self._list_by_param_values_and_assert(with_detail=True, **params)
         # Verify list snapshot details by status and display name filter
         params = {'status': 'available',
                   self.name_field: snapshot[self.name_field]}
-        self._list_by_param_values_and_assert(params, with_detail=True)
+        self._list_by_param_values_and_assert(with_detail=True, **params)
 
     @test.idempotent_id('677863d1-3142-456d-b6ac-9924f667a7f4')
     def test_volume_from_snapshot(self):
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index b604360..d46c9b5 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -37,7 +37,7 @@
         s_name = data_utils.rand_name('snap')
         self.assertRaises(lib_exc.NotFound,
                           self.snapshots_client.create_snapshot,
-                          str(uuid.uuid4()), display_name=s_name)
+                          volume_id=str(uuid.uuid4()), display_name=s_name)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('bb9da53e-d335-4309-9c15-7e76fd5e4d6d')
@@ -46,7 +46,7 @@
         s_name = data_utils.rand_name('snap')
         self.assertRaises(lib_exc.NotFound,
                           self.snapshots_client.create_snapshot,
-                          None, display_name=s_name)
+                          volume_id=None, display_name=s_name)
 
 
 class VolumesV1SnapshotNegativeTestJSON(VolumesV2SnapshotNegativeTestJSON):
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 94a9d16..6568627 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -21,9 +21,7 @@
 
 
 class VolumesV2ListTestJSON(base.BaseVolumeTest):
-
-    """
-    volumes v2 specific tests.
+    """volumes v2 specific tests.
 
     This test creates a number of 1G volumes. To run successfully,
     ensure that the backing file for the volume group that Nova uses
diff --git a/tempest/api_schema/response/compute/v2_1/agents.py b/tempest/api_schema/response/compute/v2_1/agents.py
deleted file mode 100644
index da38198..0000000
--- a/tempest/api_schema/response/compute/v2_1/agents.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-common_agent_info = {
-    'type': 'object',
-    'properties': {
-        'agent_id': {'type': ['integer', 'string']},
-        'hypervisor': {'type': 'string'},
-        'os': {'type': 'string'},
-        'architecture': {'type': 'string'},
-        'version': {'type': 'string'},
-        'url': {'type': 'string', 'format': 'uri'},
-        'md5hash': {'type': 'string'}
-    },
-    'additionalProperties': False,
-    'required': ['agent_id', 'hypervisor', 'os', 'architecture',
-                 'version', 'url', 'md5hash']
-}
-
-list_agents = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'agents': {
-                'type': 'array',
-                'items': common_agent_info
-            }
-        },
-        'additionalProperties': False,
-        'required': ['agents']
-    }
-}
-
-create_agent = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'agent': common_agent_info
-        },
-        'additionalProperties': False,
-        'required': ['agent']
-    }
-}
-
-delete_agent = {
-    'status_code': [200]
-}
diff --git a/tempest/api_schema/response/compute/v2_1/floating_ips.py b/tempest/api_schema/response/compute/v2_1/floating_ips.py
deleted file mode 100644
index 3551681..0000000
--- a/tempest/api_schema/response/compute/v2_1/floating_ips.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api_schema.response.compute.v2_1 import parameter_types
-
-common_floating_ip_info = {
-    'type': 'object',
-    'properties': {
-        # NOTE: Now the type of 'id' is integer, but
-        # here allows 'string' also because we will be
-        # able to change it to 'uuid' in the future.
-        'id': {'type': ['integer', 'string']},
-        'pool': {'type': ['string', 'null']},
-        'instance_id': {'type': ['string', 'null']},
-        'ip': parameter_types.ip_address,
-        'fixed_ip': parameter_types.ip_address
-    },
-    'additionalProperties': False,
-    'required': ['id', 'pool', 'instance_id',
-                 'ip', 'fixed_ip'],
-
-}
-list_floating_ips = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ips': {
-                'type': 'array',
-                'items': common_floating_ip_info
-            },
-        },
-        'additionalProperties': False,
-        'required': ['floating_ips'],
-    }
-}
-
-create_get_floating_ip = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ip': common_floating_ip_info
-        },
-        'additionalProperties': False,
-        'required': ['floating_ip'],
-    }
-}
-
-list_floating_ip_pools = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ip_pools': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'name': {'type': 'string'}
-                    },
-                    'additionalProperties': False,
-                    'required': ['name'],
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['floating_ip_pools'],
-    }
-}
-
-add_remove_floating_ip = {
-    'status_code': [202]
-}
-
-create_floating_ips_bulk = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ips_bulk_create': {
-                'type': 'object',
-                'properties': {
-                    'interface': {'type': ['string', 'null']},
-                    'ip_range': {'type': 'string'},
-                    'pool': {'type': ['string', 'null']},
-                },
-                'additionalProperties': False,
-                'required': ['interface', 'ip_range', 'pool'],
-            }
-        },
-        'additionalProperties': False,
-        'required': ['floating_ips_bulk_create'],
-    }
-}
-
-delete_floating_ips_bulk = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ips_bulk_delete': {'type': 'string'}
-        },
-        'additionalProperties': False,
-        'required': ['floating_ips_bulk_delete'],
-    }
-}
-
-list_floating_ips_bulk = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ip_info': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'address': parameter_types.ip_address,
-                        'instance_uuid': {'type': ['string', 'null']},
-                        'interface': {'type': ['string', 'null']},
-                        'pool': {'type': ['string', 'null']},
-                        'project_id': {'type': ['string', 'null']},
-                        'fixed_ip': parameter_types.ip_address
-                    },
-                    'additionalProperties': False,
-                    # NOTE: fixed_ip is introduced after JUNO release,
-                    # So it is not defined as 'required'.
-                    'required': ['address', 'instance_uuid', 'interface',
-                                 'pool', 'project_id'],
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['floating_ip_info'],
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/images.py b/tempest/api_schema/response/compute/v2_1/images.py
deleted file mode 100644
index a513dcb..0000000
--- a/tempest/api_schema/response/compute/v2_1/images.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from tempest.api_schema.response.compute.v2_1 import parameter_types
-
-image_links = copy.deepcopy(parameter_types.links)
-image_links['items']['properties'].update({'type': {'type': 'string'}})
-
-common_image_schema = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': 'string'},
-        'status': {'type': 'string'},
-        'updated': {'type': 'string'},
-        'links': image_links,
-        'name': {'type': 'string'},
-        'created': {'type': 'string'},
-        'minDisk': {'type': 'integer'},
-        'minRam': {'type': 'integer'},
-        'progress': {'type': 'integer'},
-        'metadata': {'type': 'object'},
-        'server': {
-            'type': 'object',
-            'properties': {
-                'id': {'type': 'string'},
-                'links': parameter_types.links
-            },
-            'additionalProperties': False,
-            'required': ['id', 'links']
-        },
-        'OS-EXT-IMG-SIZE:size': {'type': 'integer'},
-        'OS-DCF:diskConfig': {'type': 'string'}
-    },
-    'additionalProperties': False,
-    # 'server' attributes only comes in response body if image is
-    # associated with any server. 'OS-EXT-IMG-SIZE:size' & 'OS-DCF:diskConfig'
-    # are API extension,  So those are not defined as 'required'.
-    'required': ['id', 'status', 'updated', 'links', 'name',
-                 'created', 'minDisk', 'minRam', 'progress',
-                 'metadata']
-}
-
-get_image = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'image': common_image_schema
-        },
-        'additionalProperties': False,
-        'required': ['image']
-    }
-}
-
-list_images = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'images': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': 'string'},
-                        'links': image_links,
-                        'name': {'type': 'string'}
-                    },
-                    'additionalProperties': False,
-                    'required': ['id', 'links', 'name']
-                }
-            },
-            'images_links': parameter_types.links
-        },
-        'additionalProperties': False,
-        # NOTE(gmann): images_links attribute is not necessary to be
-        # present always So it is not 'required'.
-        'required': ['images']
-    }
-}
-
-create_image = {
-    'status_code': [202],
-    'response_header': {
-        'type': 'object',
-        'properties': parameter_types.response_header
-    }
-}
-create_image['response_header']['properties'].update(
-    {'location': {
-        'type': 'string',
-        'format': 'uri'}
-     }
-)
-create_image['response_header']['required'] = ['location']
-
-delete = {
-    'status_code': [204]
-}
-
-image_metadata = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'metadata': {'type': 'object'}
-        },
-        'additionalProperties': False,
-        'required': ['metadata']
-    }
-}
-
-image_meta_item = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'meta': {'type': 'object'}
-        },
-        'additionalProperties': False,
-        'required': ['meta']
-    }
-}
-
-list_images_details = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'images': {
-                'type': 'array',
-                'items': common_image_schema
-            },
-            'images_links': parameter_types.links
-        },
-        'additionalProperties': False,
-        # NOTE(gmann): images_links attribute is not necessary to be
-        # present always So it is not 'required'.
-        'required': ['images']
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/instance_usage_audit_logs.py b/tempest/api_schema/response/compute/v2_1/instance_usage_audit_logs.py
deleted file mode 100644
index c6c4deb..0000000
--- a/tempest/api_schema/response/compute/v2_1/instance_usage_audit_logs.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-common_instance_usage_audit_log = {
-    'type': 'object',
-    'properties': {
-        'hosts_not_run': {
-            'type': 'array',
-            'items': {'type': 'string'}
-        },
-        'log': {'type': 'object'},
-        'num_hosts': {'type': 'integer'},
-        'num_hosts_done': {'type': 'integer'},
-        'num_hosts_not_run': {'type': 'integer'},
-        'num_hosts_running': {'type': 'integer'},
-        'overall_status': {'type': 'string'},
-        'period_beginning': {'type': 'string'},
-        'period_ending': {'type': 'string'},
-        'total_errors': {'type': 'integer'},
-        'total_instances': {'type': 'integer'}
-    },
-    'additionalProperties': False,
-    'required': ['hosts_not_run', 'log', 'num_hosts', 'num_hosts_done',
-                 'num_hosts_not_run', 'num_hosts_running', 'overall_status',
-                 'period_beginning', 'period_ending', 'total_errors',
-                 'total_instances']
-}
-
-get_instance_usage_audit_log = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'instance_usage_audit_log': common_instance_usage_audit_log
-        },
-        'additionalProperties': False,
-        'required': ['instance_usage_audit_log']
-    }
-}
-
-list_instance_usage_audit_log = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'instance_usage_audit_logs': common_instance_usage_audit_log
-        },
-        'additionalProperties': False,
-        'required': ['instance_usage_audit_logs']
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/interfaces.py b/tempest/api_schema/response/compute/v2_1/interfaces.py
deleted file mode 100644
index 130775b..0000000
--- a/tempest/api_schema/response/compute/v2_1/interfaces.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api_schema.response.compute.v2_1 import parameter_types
-
-interface_common_info = {
-    'type': 'object',
-    'properties': {
-        'port_state': {'type': 'string'},
-        'fixed_ips': {
-            'type': 'array',
-            'items': {
-                'type': 'object',
-                'properties': {
-                    'subnet_id': {
-                        'type': 'string',
-                        'format': 'uuid'
-                    },
-                    'ip_address': parameter_types.ip_address
-                },
-                'additionalProperties': False,
-                'required': ['subnet_id', 'ip_address']
-            }
-        },
-        'port_id': {'type': 'string', 'format': 'uuid'},
-        'net_id': {'type': 'string', 'format': 'uuid'},
-        'mac_addr': parameter_types.mac_address
-    },
-    'additionalProperties': False,
-    'required': ['port_state', 'fixed_ips', 'port_id', 'net_id', 'mac_addr']
-}
-
-get_create_interfaces = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'interfaceAttachment': interface_common_info
-        },
-        'additionalProperties': False,
-        'required': ['interfaceAttachment']
-    }
-}
-
-list_interfaces = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'interfaceAttachments': {
-                'type': 'array',
-                'items': interface_common_info
-            }
-        },
-        'additionalProperties': False,
-        'required': ['interfaceAttachments']
-    }
-}
-
-delete_interface = {
-    'status_code': [202]
-}
diff --git a/tempest/api_schema/response/compute/v2_1/limits.py b/tempest/api_schema/response/compute/v2_1/limits.py
deleted file mode 100644
index 81f175f..0000000
--- a/tempest/api_schema/response/compute/v2_1/limits.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-get_limit = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'limits': {
-                'type': 'object',
-                'properties': {
-                    'absolute': {
-                        'type': 'object',
-                        'properties': {
-                            'maxTotalRAMSize': {'type': 'integer'},
-                            'totalCoresUsed': {'type': 'integer'},
-                            'maxTotalInstances': {'type': 'integer'},
-                            'maxTotalFloatingIps': {'type': 'integer'},
-                            'totalSecurityGroupsUsed': {'type': 'integer'},
-                            'maxTotalCores': {'type': 'integer'},
-                            'totalFloatingIpsUsed': {'type': 'integer'},
-                            'maxSecurityGroups': {'type': 'integer'},
-                            'maxServerMeta': {'type': 'integer'},
-                            'maxPersonality': {'type': 'integer'},
-                            'maxImageMeta': {'type': 'integer'},
-                            'maxPersonalitySize': {'type': 'integer'},
-                            'maxSecurityGroupRules': {'type': 'integer'},
-                            'maxTotalKeypairs': {'type': 'integer'},
-                            'totalRAMUsed': {'type': 'integer'},
-                            'totalInstancesUsed': {'type': 'integer'},
-                            'maxServerGroupMembers': {'type': 'integer'},
-                            'maxServerGroups': {'type': 'integer'},
-                            'totalServerGroupsUsed': {'type': 'integer'}
-                        },
-                        'additionalProperties': False,
-                        # NOTE(gmann): maxServerGroupMembers,  maxServerGroups
-                        # and totalServerGroupsUsed are API extension,
-                        # and some environments return a response without these
-                        # attributes.So they are not 'required'.
-                        'required': ['maxImageMeta',
-                                     'maxPersonality',
-                                     'maxPersonalitySize',
-                                     'maxSecurityGroupRules',
-                                     'maxSecurityGroups',
-                                     'maxServerMeta',
-                                     'maxTotalCores',
-                                     'maxTotalFloatingIps',
-                                     'maxTotalInstances',
-                                     'maxTotalKeypairs',
-                                     'maxTotalRAMSize',
-                                     'totalCoresUsed',
-                                     'totalFloatingIpsUsed',
-                                     'totalInstancesUsed',
-                                     'totalRAMUsed',
-                                     'totalSecurityGroupsUsed']
-                    },
-                    'rate': {
-                        'type': 'array',
-                        'items': {
-                            'type': 'object',
-                            'properties': {
-                                'limit': {
-                                    'type': 'array',
-                                    'items': {
-                                        'type': 'object',
-                                        'properties': {
-                                            'next-available':
-                                                {'type': 'string'},
-                                            'remaining':
-                                                {'type': 'integer'},
-                                            'unit':
-                                                {'type': 'string'},
-                                            'value':
-                                                {'type': 'integer'},
-                                            'verb':
-                                                {'type': 'string'}
-                                        },
-                                        'additionalProperties': False,
-                                    }
-                                },
-                                'regex': {'type': 'string'},
-                                'uri': {'type': 'string'}
-                            },
-                            'additionalProperties': False,
-                        }
-                    }
-                },
-                'additionalProperties': False,
-                'required': ['absolute', 'rate']
-            }
-        },
-        'additionalProperties': False,
-        'required': ['limits']
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/migrations.py b/tempest/api_schema/response/compute/v2_1/migrations.py
deleted file mode 100644
index b7d66ea..0000000
--- a/tempest/api_schema/response/compute/v2_1/migrations.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-list_migrations = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'migrations': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': 'integer'},
-                        'status': {'type': ['string', 'null']},
-                        'instance_uuid': {'type': ['string', 'null']},
-                        'source_node': {'type': ['string', 'null']},
-                        'source_compute': {'type': ['string', 'null']},
-                        'dest_node': {'type': ['string', 'null']},
-                        'dest_compute': {'type': ['string', 'null']},
-                        'dest_host': {'type': ['string', 'null']},
-                        'old_instance_type_id': {'type': ['integer', 'null']},
-                        'new_instance_type_id': {'type': ['integer', 'null']},
-                        'created_at': {'type': 'string'},
-                        'updated_at': {'type': ['string', 'null']}
-                    },
-                    'additionalProperties': False,
-                    'required': [
-                        'id', 'status', 'instance_uuid', 'source_node',
-                        'source_compute', 'dest_node', 'dest_compute',
-                        'dest_host', 'old_instance_type_id',
-                        'new_instance_type_id', 'created_at', 'updated_at'
-                    ]
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['migrations']
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/parameter_types.py b/tempest/api_schema/response/compute/v2_1/parameter_types.py
deleted file mode 100644
index 07cc890..0000000
--- a/tempest/api_schema/response/compute/v2_1/parameter_types.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-links = {
-    'type': 'array',
-    'items': {
-        'type': 'object',
-        'properties': {
-            'href': {
-                'type': 'string',
-                'format': 'uri'
-            },
-            'rel': {'type': 'string'}
-        },
-        'additionalProperties': False,
-        'required': ['href', 'rel']
-    }
-}
-
-mac_address = {
-    'type': 'string',
-    'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
-}
-
-ip_address = {
-    'oneOf': [
-        {
-            'type': 'string',
-            'oneOf': [
-                {'format': 'ipv4'},
-                {'format': 'ipv6'}
-            ]
-        },
-        {'type': 'null'}
-    ]
-}
-
-access_ip_v4 = {
-    'type': 'string',
-    'oneOf': [{'format': 'ipv4'}, {'enum': ['']}]
-}
-
-access_ip_v6 = {
-    'type': 'string',
-    'oneOf': [{'format': 'ipv6'}, {'enum': ['']}]
-}
-
-addresses = {
-    'type': 'object',
-    'patternProperties': {
-        # NOTE: Here is for 'private' or something.
-        '^[a-zA-Z0-9-_.]+$': {
-            'type': 'array',
-            'items': {
-                'type': 'object',
-                'properties': {
-                    'version': {'type': 'integer'},
-                    'addr': {
-                        'type': 'string',
-                        'oneOf': [
-                            {'format': 'ipv4'},
-                            {'format': 'ipv6'}
-                        ]
-                    }
-                },
-                'additionalProperties': False,
-                'required': ['version', 'addr']
-            }
-        }
-    }
-}
-
-response_header = {
-    'connection': {'type': 'string'},
-    'content-length': {'type': 'string'},
-    'content-type': {'type': 'string'},
-    'status': {'type': 'string'},
-    'x-compute-request-id': {'type': 'string'},
-    'vary': {'type': 'string'},
-    'x-openstack-nova-api-version': {'type': 'string'},
-    'date': {
-        'type': 'string',
-        'format': 'data-time'
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/quota_classes.py b/tempest/api_schema/response/compute/v2_1/quota_classes.py
deleted file mode 100644
index a0cdaf5..0000000
--- a/tempest/api_schema/response/compute/v2_1/quota_classes.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2014 IBM Corporation.
-# All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from tempest.api_schema.response.compute.v2_1 import quotas
-
-# NOTE(mriedem): os-quota-class-sets responses are the same as os-quota-sets
-# except for the key in the response body is quota_class_set instead of
-# quota_set, so update this copy of the schema from os-quota-sets.
-get_quota_class_set = copy.deepcopy(quotas.get_quota_set)
-get_quota_class_set['response_body']['properties']['quota_class_set'] = (
-    get_quota_class_set['response_body']['properties'].pop('quota_set'))
-get_quota_class_set['response_body']['required'] = ['quota_class_set']
-
-update_quota_class_set = copy.deepcopy(quotas.update_quota_set)
-update_quota_class_set['response_body']['properties']['quota_class_set'] = (
-    update_quota_class_set['response_body']['properties'].pop('quota_set'))
-update_quota_class_set['response_body']['required'] = ['quota_class_set']
diff --git a/tempest/api_schema/response/compute/v2_1/quotas.py b/tempest/api_schema/response/compute/v2_1/quotas.py
deleted file mode 100644
index 7953983..0000000
--- a/tempest/api_schema/response/compute/v2_1/quotas.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-update_quota_set = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'instances': {'type': 'integer'},
-                    'cores': {'type': 'integer'},
-                    'ram': {'type': 'integer'},
-                    'floating_ips': {'type': 'integer'},
-                    'fixed_ips': {'type': 'integer'},
-                    'metadata_items': {'type': 'integer'},
-                    'key_pairs': {'type': 'integer'},
-                    'security_groups': {'type': 'integer'},
-                    'security_group_rules': {'type': 'integer'},
-                    'server_group_members': {'type': 'integer'},
-                    'server_groups': {'type': 'integer'},
-                    'injected_files': {'type': 'integer'},
-                    'injected_file_content_bytes': {'type': 'integer'},
-                    'injected_file_path_bytes': {'type': 'integer'}
-                },
-                'additionalProperties': False,
-                # NOTE: server_group_members and server_groups are represented
-                # when enabling quota_server_group extension. So they should
-                # not be required.
-                'required': ['instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'key_pairs',
-                             'security_groups', 'security_group_rules',
-                             'injected_files', 'injected_file_content_bytes',
-                             'injected_file_path_bytes']
-            }
-        },
-        'additionalProperties': False,
-        'required': ['quota_set']
-    }
-}
-
-get_quota_set = copy.deepcopy(update_quota_set)
-get_quota_set['response_body']['properties']['quota_set']['properties'][
-    'id'] = {'type': 'string'}
-get_quota_set['response_body']['properties']['quota_set']['required'].extend([
-    'id'])
-
-delete_quota = {
-    'status_code': [202]
-}
diff --git a/tempest/api_schema/response/compute/v2_1/security_group_default_rule.py b/tempest/api_schema/response/compute/v2_1/security_group_default_rule.py
deleted file mode 100644
index 2ec2826..0000000
--- a/tempest/api_schema/response/compute/v2_1/security_group_default_rule.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-common_security_group_default_rule_info = {
-    'type': 'object',
-    'properties': {
-        'from_port': {'type': 'integer'},
-        'id': {'type': 'integer'},
-        'ip_protocol': {'type': 'string'},
-        'ip_range': {
-            'type': 'object',
-            'properties': {
-                'cidr': {'type': 'string'}
-            },
-            'additionalProperties': False,
-            'required': ['cidr'],
-        },
-        'to_port': {'type': 'integer'},
-    },
-    'additionalProperties': False,
-    'required': ['from_port', 'id', 'ip_protocol', 'ip_range', 'to_port'],
-}
-
-create_get_security_group_default_rule = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'security_group_default_rule':
-                common_security_group_default_rule_info
-        },
-        'additionalProperties': False,
-        'required': ['security_group_default_rule']
-    }
-}
-
-delete_security_group_default_rule = {
-    'status_code': [204]
-}
-
-list_security_group_default_rules = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'security_group_default_rules': {
-                'type': 'array',
-                'items': common_security_group_default_rule_info
-            }
-        },
-        'additionalProperties': False,
-        'required': ['security_group_default_rules']
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/security_groups.py b/tempest/api_schema/response/compute/v2_1/security_groups.py
deleted file mode 100644
index 5ed5a5c..0000000
--- a/tempest/api_schema/response/compute/v2_1/security_groups.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-common_security_group_rule = {
-    'from_port': {'type': ['integer', 'null']},
-    'to_port': {'type': ['integer', 'null']},
-    'group': {
-        'type': 'object',
-        'properties': {
-            'tenant_id': {'type': 'string'},
-            'name': {'type': 'string'}
-        },
-        'additionalProperties': False,
-    },
-    'ip_protocol': {'type': ['string', 'null']},
-    # 'parent_group_id' can be UUID so defining it as 'string' also.
-    'parent_group_id': {'type': ['string', 'integer', 'null']},
-    'ip_range': {
-        'type': 'object',
-        'properties': {
-            'cidr': {'type': 'string'}
-        },
-        'additionalProperties': False,
-        # When optional argument is provided in request body
-        # like 'group_id' then, attribute 'cidr' does not
-        # comes in response body. So it is not 'required'.
-    },
-    'id': {'type': ['string', 'integer']}
-}
-
-common_security_group = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': ['integer', 'string']},
-        'name': {'type': 'string'},
-        'tenant_id': {'type': 'string'},
-        'rules': {
-            'type': 'array',
-            'items': {
-                'type': ['object', 'null'],
-                'properties': common_security_group_rule,
-                'additionalProperties': False,
-            }
-        },
-        'description': {'type': 'string'},
-    },
-    'additionalProperties': False,
-    'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
-}
-
-list_security_groups = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'security_groups': {
-                'type': 'array',
-                'items': common_security_group
-            }
-        },
-        'additionalProperties': False,
-        'required': ['security_groups']
-    }
-}
-
-get_security_group = create_security_group = update_security_group = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'security_group': common_security_group
-        },
-        'additionalProperties': False,
-        'required': ['security_group']
-    }
-}
-
-delete_security_group = {
-    'status_code': [202]
-}
-
-create_security_group_rule = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'security_group_rule': {
-                'type': 'object',
-                'properties': common_security_group_rule,
-                'additionalProperties': False,
-                'required': ['from_port', 'to_port', 'group', 'ip_protocol',
-                             'parent_group_id', 'id', 'ip_range']
-            }
-        },
-        'additionalProperties': False,
-        'required': ['security_group_rule']
-    }
-}
-
-delete_security_group_rule = {
-    'status_code': [202]
-}
diff --git a/tempest/api_schema/response/compute/v2_1/servers.py b/tempest/api_schema/response/compute/v2_1/servers.py
deleted file mode 100644
index 44ab9e9..0000000
--- a/tempest/api_schema/response/compute/v2_1/servers.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from tempest.api_schema.response.compute.v2_1 import parameter_types
-
-create_server = {
-    'status_code': [202],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'server': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'security_groups': {'type': 'array'},
-                    'links': parameter_types.links,
-                    'OS-DCF:diskConfig': {'type': 'string'}
-                },
-                'additionalProperties': False,
-                # NOTE: OS-DCF:diskConfig & security_groups are API extension,
-                # and some environments return a response without these
-                # attributes.So they are not 'required'.
-                'required': ['id', 'links']
-            }
-        },
-        'additionalProperties': False,
-        'required': ['server']
-    }
-}
-
-create_server_with_admin_pass = copy.deepcopy(create_server)
-create_server_with_admin_pass['response_body']['properties']['server'][
-    'properties'].update({'adminPass': {'type': 'string'}})
-create_server_with_admin_pass['response_body']['properties']['server'][
-    'required'].append('adminPass')
-
-list_servers = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'servers': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': 'string'},
-                        'links': parameter_types.links,
-                        'name': {'type': 'string'}
-                    },
-                    'additionalProperties': False,
-                    'required': ['id', 'links', 'name']
-                }
-            },
-            'servers_links': parameter_types.links
-        },
-        'additionalProperties': False,
-        # NOTE(gmann): servers_links attribute is not necessary to be
-        # present always So it is not 'required'.
-        'required': ['servers']
-    }
-}
-
-delete_server = {
-    'status_code': [204],
-}
-
-common_show_server = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': 'string'},
-        'name': {'type': 'string'},
-        'status': {'type': 'string'},
-        'image': {'oneOf': [
-            {'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'links': parameter_types.links
-                },
-                'additionalProperties': False,
-                'required': ['id', 'links']},
-            {'type': ['string', 'null']}
-        ]},
-        'flavor': {
-            'type': 'object',
-            'properties': {
-                'id': {'type': 'string'},
-                'links': parameter_types.links
-            },
-            'additionalProperties': False,
-            'required': ['id', 'links']
-        },
-        'fault': {
-            'type': 'object',
-            'properties': {
-                'code': {'type': 'integer'},
-                'created': {'type': 'string'},
-                'message': {'type': 'string'},
-                'details': {'type': 'string'},
-            },
-            'additionalProperties': False,
-            # NOTE(gmann): 'details' is not necessary to be present
-            #  in the 'fault'. So it is not defined as 'required'.
-            'required': ['code', 'created', 'message']
-        },
-        'user_id': {'type': 'string'},
-        'tenant_id': {'type': 'string'},
-        'created': {'type': 'string'},
-        'updated': {'type': 'string'},
-        'progress': {'type': 'integer'},
-        'metadata': {'type': 'object'},
-        'links': parameter_types.links,
-        'addresses': parameter_types.addresses,
-        'hostId': {'type': 'string'},
-        'OS-DCF:diskConfig': {'type': 'string'},
-        'accessIPv4': parameter_types.access_ip_v4,
-        'accessIPv6': parameter_types.access_ip_v6
-    },
-    'additionalProperties': False,
-    # NOTE(GMann): 'progress' attribute is present in the response
-    # only when server's status is one of the progress statuses
-    # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
-    # 'fault' attribute is present in the response
-    # only when server's status is one of the  "ERROR", "DELETED".
-    # OS-DCF:diskConfig and accessIPv4/v6 are API
-    # extensions, and some environments return a response
-    # without these attributes.So these are not defined as 'required'.
-    'required': ['id', 'name', 'status', 'image', 'flavor',
-                 'user_id', 'tenant_id', 'created', 'updated',
-                 'metadata', 'links', 'addresses', 'hostId']
-}
-
-update_server = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'server': common_show_server
-        },
-        'additionalProperties': False,
-        'required': ['server']
-    }
-}
-
-server_detail = copy.deepcopy(common_show_server)
-server_detail['properties'].update({
-    'key_name': {'type': ['string', 'null']},
-    'security_groups': {'type': 'array'},
-
-    # NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ"
-    # attributes.
-    'OS-SRV-USG:launched_at': {'type': ['string', 'null']},
-    'OS-SRV-USG:terminated_at': {'type': ['string', 'null']},
-    'OS-EXT-AZ:availability_zone': {'type': 'string'},
-
-    # NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR"
-    # attributes.
-    'OS-EXT-STS:task_state': {'type': ['string', 'null']},
-    'OS-EXT-STS:vm_state': {'type': 'string'},
-    'OS-EXT-STS:power_state': {'type': 'integer'},
-    'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']},
-    'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'},
-    'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']},
-    'os-extended-volumes:volumes_attached': {'type': 'array'},
-    'config_drive': {'type': 'string'}
-})
-server_detail['properties']['addresses']['patternProperties'][
-    '^[a-zA-Z0-9-_.]+$']['items']['properties'].update({
-        'OS-EXT-IPS:type': {'type': 'string'},
-        'OS-EXT-IPS-MAC:mac_addr': parameter_types.mac_address})
-# NOTE(gmann): Update OS-EXT-IPS:type and OS-EXT-IPS-MAC:mac_addr
-# attributes in server address. Those are API extension,
-# and some environments return a response without
-# these attributes. So they are not 'required'.
-
-get_server = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'server': server_detail
-        },
-        'additionalProperties': False,
-        'required': ['server']
-    }
-}
-
-list_servers_detail = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'servers': {
-                'type': 'array',
-                'items': server_detail
-            },
-            'servers_links': parameter_types.links
-        },
-        'additionalProperties': False,
-        # NOTE(gmann): servers_links attribute is not necessary to be
-        # present always So it is not 'required'.
-        'required': ['servers']
-    }
-}
-
-rebuild_server = copy.deepcopy(update_server)
-rebuild_server['status_code'] = [202]
-
-rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
-rebuild_server_with_admin_pass['response_body']['properties']['server'][
-    'properties'].update({'adminPass': {'type': 'string'}})
-rebuild_server_with_admin_pass['response_body']['properties']['server'][
-    'required'].append('adminPass')
-
-rescue_server = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'adminPass': {'type': 'string'}
-        },
-        'additionalProperties': False,
-        'required': ['adminPass']
-    }
-}
-
-list_virtual_interfaces = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'virtual_interfaces': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': 'string'},
-                        'mac_address': parameter_types.mac_address,
-                        'OS-EXT-VIF-NET:net_id': {'type': 'string'}
-                    },
-                    'additionalProperties': False,
-                    # 'OS-EXT-VIF-NET:net_id' is API extension So it is
-                    # not defined as 'required'
-                    'required': ['id', 'mac_address']
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['virtual_interfaces']
-    }
-}
-
-common_attach_volume_info = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': 'string'},
-        'device': {'type': 'string'},
-        'volumeId': {'type': 'string'},
-        'serverId': {'type': ['integer', 'string']}
-    },
-    'additionalProperties': False,
-    'required': ['id', 'device', 'volumeId', 'serverId']
-}
-
-attach_volume = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'volumeAttachment': common_attach_volume_info
-        },
-        'additionalProperties': False,
-        'required': ['volumeAttachment']
-    }
-}
-
-detach_volume = {
-    'status_code': [202]
-}
-
-get_volume_attachment = copy.deepcopy(attach_volume)
-get_volume_attachment['response_body']['properties'][
-    'volumeAttachment']['properties'].update({'serverId': {'type': 'string'}})
-
-list_volume_attachments = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'volumeAttachments': {
-                'type': 'array',
-                'items': common_attach_volume_info
-            }
-        },
-        'additionalProperties': False,
-        'required': ['volumeAttachments']
-    }
-}
-list_volume_attachments['response_body']['properties'][
-    'volumeAttachments']['items']['properties'].update(
-    {'serverId': {'type': 'string'}})
-
-list_addresses_by_network = {
-    'status_code': [200],
-    'response_body': parameter_types.addresses
-}
-
-list_addresses = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'addresses': parameter_types.addresses
-        },
-        'additionalProperties': False,
-        'required': ['addresses']
-    }
-}
-
-common_server_group = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': 'string'},
-        'name': {'type': 'string'},
-        'policies': {
-            'type': 'array',
-            'items': {'type': 'string'}
-        },
-        # 'members' attribute contains the array of instance's UUID of
-        # instances present in server group
-        'members': {
-            'type': 'array',
-            'items': {'type': 'string'}
-        },
-        'metadata': {'type': 'object'}
-    },
-    'additionalProperties': False,
-    'required': ['id', 'name', 'policies', 'members', 'metadata']
-}
-
-create_get_server_group = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'server_group': common_server_group
-        },
-        'additionalProperties': False,
-        'required': ['server_group']
-    }
-}
-
-delete_server_group = {
-    'status_code': [204]
-}
-
-list_server_groups = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'server_groups': {
-                'type': 'array',
-                'items': common_server_group
-            }
-        },
-        'additionalProperties': False,
-        'required': ['server_groups']
-    }
-}
-
-instance_actions = {
-    'type': 'object',
-    'properties': {
-        'action': {'type': 'string'},
-        'request_id': {'type': 'string'},
-        'user_id': {'type': 'string'},
-        'project_id': {'type': 'string'},
-        'start_time': {'type': 'string'},
-        'message': {'type': ['string', 'null']},
-        'instance_uuid': {'type': 'string'}
-    },
-    'additionalProperties': False,
-    'required': ['action', 'request_id', 'user_id', 'project_id',
-                 'start_time', 'message', 'instance_uuid']
-}
-
-instance_action_events = {
-    'type': 'array',
-    'items': {
-        'type': 'object',
-        'properties': {
-            'event': {'type': 'string'},
-            'start_time': {'type': 'string'},
-            'finish_time': {'type': 'string'},
-            'result': {'type': 'string'},
-            'traceback': {'type': ['string', 'null']}
-        },
-        'additionalProperties': False,
-        'required': ['event', 'start_time', 'finish_time', 'result',
-                     'traceback']
-    }
-}
-
-list_instance_actions = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'instanceActions': {
-                'type': 'array',
-                'items': instance_actions
-            }
-        },
-        'additionalProperties': False,
-        'required': ['instanceActions']
-    }
-}
-
-instance_actions_with_events = copy.deepcopy(instance_actions)
-instance_actions_with_events['properties'].update({
-    'events': instance_action_events})
-# 'events' does not come in response body always so it is not
-# defined as 'required'
-
-get_instance_action = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'instanceAction': instance_actions_with_events
-        },
-        'additionalProperties': False,
-        'required': ['instanceAction']
-    }
-}
-
-get_password = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'password': {'type': 'string'}
-        },
-        'additionalProperties': False,
-        'required': ['password']
-    }
-}
-
-get_vnc_console = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'console': {
-                'type': 'object',
-                'properties': {
-                    'type': {'type': 'string'},
-                    'url': {
-                        'type': 'string',
-                        'format': 'uri'
-                    }
-                },
-                'additionalProperties': False,
-                'required': ['type', 'url']
-            }
-        },
-        'additionalProperties': False,
-        'required': ['console']
-    }
-}
-
-get_console_output = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'output': {'type': 'string'}
-        },
-        'additionalProperties': False,
-        'required': ['output']
-    }
-}
-
-set_server_metadata = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'metadata': {
-                'type': 'object',
-                'patternProperties': {
-                    '^.+$': {'type': 'string'}
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['metadata']
-    }
-}
-
-list_server_metadata = copy.deepcopy(set_server_metadata)
-
-update_server_metadata = copy.deepcopy(set_server_metadata)
-
-delete_server_metadata_item = {
-    'status_code': [204]
-}
-
-set_get_server_metadata_item = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'meta': {
-                'type': 'object',
-                'patternProperties': {
-                    '^.+$': {'type': 'string'}
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['meta']
-    }
-}
-
-server_actions_common_schema = {
-    'status_code': [202]
-}
-
-server_actions_delete_password = {
-    'status_code': [204]
-}
-
-server_actions_confirm_resize = copy.deepcopy(
-    server_actions_delete_password)
diff --git a/tempest/api_schema/response/compute/v2_1/services.py b/tempest/api_schema/response/compute/v2_1/services.py
deleted file mode 100644
index ddef7b2..0000000
--- a/tempest/api_schema/response/compute/v2_1/services.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-list_services = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'services': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': ['integer', 'string'],
-                               'pattern': '^[a-zA-Z!]*@[0-9]+$'},
-                        'zone': {'type': 'string'},
-                        'host': {'type': 'string'},
-                        'state': {'type': 'string'},
-                        'binary': {'type': 'string'},
-                        'status': {'type': 'string'},
-                        'updated_at': {'type': ['string', 'null']},
-                        'disabled_reason': {'type': ['string', 'null']}
-                    },
-                    'additionalProperties': False,
-                    'required': ['id', 'zone', 'host', 'state', 'binary',
-                                 'status', 'updated_at', 'disabled_reason']
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['services']
-    }
-}
-
-enable_disable_service = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'service': {
-                'type': 'object',
-                'properties': {
-                    'status': {'type': 'string'},
-                    'binary': {'type': 'string'},
-                    'host': {'type': 'string'}
-                },
-                'additionalProperties': False,
-                'required': ['status', 'binary', 'host']
-            }
-        },
-        'additionalProperties': False,
-        'required': ['service']
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/snapshots.py b/tempest/api_schema/response/compute/v2_1/snapshots.py
deleted file mode 100644
index 01a524b..0000000
--- a/tempest/api_schema/response/compute/v2_1/snapshots.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2015 Fujitsu(fnst) Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-common_snapshot_info = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': 'string'},
-        'volumeId': {'type': 'string'},
-        'status': {'type': 'string'},
-        'size': {'type': 'integer'},
-        'createdAt': {'type': 'string'},
-        'displayName': {'type': ['string', 'null']},
-        'displayDescription': {'type': ['string', 'null']}
-    },
-    'additionalProperties': False,
-    'required': ['id', 'volumeId', 'status', 'size',
-                 'createdAt', 'displayName', 'displayDescription']
-}
-
-create_get_snapshot = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'snapshot': common_snapshot_info
-        },
-        'additionalProperties': False,
-        'required': ['snapshot']
-    }
-}
-
-list_snapshots = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'snapshots': {
-                'type': 'array',
-                'items': common_snapshot_info
-            }
-        },
-        'additionalProperties': False,
-        'required': ['snapshots']
-    }
-}
-
-delete_snapshot = {
-    'status_code': [202]
-}
diff --git a/tempest/api_schema/response/compute/v2_1/tenant_networks.py b/tempest/api_schema/response/compute/v2_1/tenant_networks.py
deleted file mode 100644
index ddfab96..0000000
--- a/tempest/api_schema/response/compute/v2_1/tenant_networks.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-param_network = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': 'string'},
-        'cidr': {'type': ['string', 'null']},
-        'label': {'type': 'string'}
-    },
-    'additionalProperties': False,
-    'required': ['id', 'cidr', 'label']
-}
-
-
-list_tenant_networks = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'networks': {
-                'type': 'array',
-                'items': param_network
-            }
-        },
-        'additionalProperties': False,
-        'required': ['networks']
-    }
-}
-
-
-get_tenant_network = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'network': param_network
-        },
-        'additionalProperties': False,
-        'required': ['network']
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/tenant_usages.py b/tempest/api_schema/response/compute/v2_1/tenant_usages.py
deleted file mode 100644
index d51ef12..0000000
--- a/tempest/api_schema/response/compute/v2_1/tenant_usages.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-_server_usages = {
-    'type': 'array',
-    'items': {
-        'type': 'object',
-        'properties': {
-            'ended_at': {
-                'oneOf': [
-                    {'type': 'string'},
-                    {'type': 'null'}
-                ]
-            },
-            'flavor': {'type': 'string'},
-            'hours': {'type': 'number'},
-            'instance_id': {'type': 'string'},
-            'local_gb': {'type': 'integer'},
-            'memory_mb': {'type': 'integer'},
-            'name': {'type': 'string'},
-            'started_at': {'type': 'string'},
-            'state': {'type': 'string'},
-            'tenant_id': {'type': 'string'},
-            'uptime': {'type': 'integer'},
-            'vcpus': {'type': 'integer'},
-        },
-        'required': ['ended_at', 'flavor', 'hours', 'instance_id', 'local_gb',
-                     'memory_mb', 'name', 'started_at', 'state', 'tenant_id',
-                     'uptime', 'vcpus']
-    }
-}
-
-_tenant_usage_list = {
-    'type': 'object',
-    'properties': {
-        'server_usages': _server_usages,
-        'start': {'type': 'string'},
-        'stop': {'type': 'string'},
-        'tenant_id': {'type': 'string'},
-        'total_hours': {'type': 'number'},
-        'total_local_gb_usage': {'type': 'number'},
-        'total_memory_mb_usage': {'type': 'number'},
-        'total_vcpus_usage': {'type': 'number'},
-    },
-    'required': ['start', 'stop', 'tenant_id',
-                 'total_hours', 'total_local_gb_usage',
-                 'total_memory_mb_usage', 'total_vcpus_usage']
-}
-
-# 'required' of get_tenant is different from list_tenant's.
-_tenant_usage_get = copy.deepcopy(_tenant_usage_list)
-_tenant_usage_get['required'] = ['server_usages', 'start', 'stop', 'tenant_id',
-                                 'total_hours', 'total_local_gb_usage',
-                                 'total_memory_mb_usage', 'total_vcpus_usage']
-
-list_tenant_usage = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'tenant_usages': {
-                'type': 'array',
-                'items': _tenant_usage_list
-            }
-        },
-        'required': ['tenant_usages']
-    }
-}
-
-get_tenant_usage = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'tenant_usage': _tenant_usage_get
-        },
-        'required': ['tenant_usage']
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/versions.py b/tempest/api_schema/response/compute/v2_1/versions.py
deleted file mode 100644
index 08a9fab..0000000
--- a/tempest/api_schema/response/compute/v2_1/versions.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-
-_version = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': 'string'},
-        'links': {
-            'type': 'array',
-            'items': {
-                'type': 'object',
-                'properties': {
-                    'href': {'type': 'string', 'format': 'uri'},
-                    'rel': {'type': 'string'},
-                    'type': {'type': 'string'},
-                },
-                'required': ['href', 'rel'],
-                'additionalProperties': False
-            }
-        },
-        'status': {'type': 'string'},
-        'updated': {'type': 'string', 'format': 'date-time'},
-        'version': {'type': 'string'},
-        'min_version': {'type': 'string'},
-        'media-types': {
-            'type': 'array',
-            'properties': {
-                'base': {'type': 'string'},
-                'type': {'type': 'string'},
-            }
-        },
-    },
-    # NOTE: version and min_version have been added since Kilo,
-    # so they should not be required.
-    # NOTE(sdague): media-types only shows up in single version requests.
-    'required': ['id', 'links', 'status', 'updated'],
-    'additionalProperties': False
-}
-
-list_versions = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'versions': {
-                'type': 'array',
-                'items': _version
-            }
-        },
-        'required': ['versions'],
-        'additionalProperties': False
-    }
-}
-
-
-_detail_get_version = copy.deepcopy(_version)
-_detail_get_version['properties'].pop('min_version')
-_detail_get_version['properties'].pop('version')
-_detail_get_version['properties'].pop('updated')
-_detail_get_version['properties']['media-types'] = {
-    'type': 'array',
-    'items': {
-        'type': 'object',
-        'properties': {
-            'base': {'type': 'string'},
-            'type': {'type': 'string'}
-        }
-    }
-}
-_detail_get_version['required'] = ['id', 'links', 'status', 'media-types']
-
-get_version = {
-    'status_code': [300],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'choices': {
-                'type': 'array',
-                'items': _detail_get_version
-            }
-        },
-        'required': ['choices'],
-        'additionalProperties': False
-    }
-}
-
-get_one_version = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'version': _version
-        },
-        'additionalProperties': False
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/volumes.py b/tempest/api_schema/response/compute/v2_1/volumes.py
deleted file mode 100644
index bb34acb..0000000
--- a/tempest/api_schema/response/compute/v2_1/volumes.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-create_get_volume = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'volume': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'status': {'type': 'string'},
-                    'displayName': {'type': ['string', 'null']},
-                    'availabilityZone': {'type': 'string'},
-                    'createdAt': {'type': 'string'},
-                    'displayDescription': {'type': ['string', 'null']},
-                    'volumeType': {'type': ['string', 'null']},
-                    'snapshotId': {'type': ['string', 'null']},
-                    'metadata': {'type': 'object'},
-                    'size': {'type': 'integer'},
-                    'attachments': {
-                        'type': 'array',
-                        'items': {
-                            'type': 'object',
-                            'properties': {
-                                'id': {'type': 'string'},
-                                'device': {'type': 'string'},
-                                'volumeId': {'type': 'string'},
-                                'serverId': {'type': 'string'}
-                            },
-                            'additionalProperties': False,
-                            # NOTE- If volume is not attached to any server
-                            # then, 'attachments' attributes comes as array
-                            # with empty objects "[{}]" due to that elements
-                            # of 'attachments' cannot defined as 'required'.
-                            # If it would come as empty array "[]" then,
-                            # those elements can be defined as 'required'.
-                        }
-                    }
-                },
-                'additionalProperties': False,
-                'required': ['id', 'status', 'displayName', 'availabilityZone',
-                             'createdAt', 'displayDescription', 'volumeType',
-                             'snapshotId', 'metadata', 'size', 'attachments']
-            }
-        },
-        'additionalProperties': False,
-        'required': ['volume']
-    }
-}
-
-list_volumes = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'volumes': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': 'string'},
-                        'status': {'type': 'string'},
-                        'displayName': {'type': ['string', 'null']},
-                        'availabilityZone': {'type': 'string'},
-                        'createdAt': {'type': 'string'},
-                        'displayDescription': {'type': ['string', 'null']},
-                        'volumeType': {'type': ['string', 'null']},
-                        'snapshotId': {'type': ['string', 'null']},
-                        'metadata': {'type': 'object'},
-                        'size': {'type': 'integer'},
-                        'attachments': {
-                            'type': 'array',
-                            'items': {
-                                'type': 'object',
-                                'properties': {
-                                    'id': {'type': 'string'},
-                                    'device': {'type': 'string'},
-                                    'volumeId': {'type': 'string'},
-                                    'serverId': {'type': 'string'}
-                                },
-                                'additionalProperties': False,
-                                # NOTE- If volume is not attached to any server
-                                # then, 'attachments' attributes comes as array
-                                # with empty object "[{}]" due to that elements
-                                # of 'attachments' cannot defined as 'required'
-                                # If it would come as empty array "[]" then,
-                                # those elements can be defined as 'required'.
-                            }
-                        }
-                    },
-                    'additionalProperties': False,
-                    'required': ['id', 'status', 'displayName',
-                                 'availabilityZone', 'createdAt',
-                                 'displayDescription', 'volumeType',
-                                 'snapshotId', 'metadata', 'size',
-                                 'attachments']
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['volumes']
-    }
-}
-
-delete_volume = {
-    'status_code': [202]
-}
diff --git a/tempest/services/volume/json/__init__.py b/tempest/api_schema/response/compute/v2_2/__init__.py
similarity index 100%
copy from tempest/services/volume/json/__init__.py
copy to tempest/api_schema/response/compute/v2_2/__init__.py
diff --git a/tempest/api_schema/response/compute/v2_2/keypairs.py b/tempest/api_schema/response/compute/v2_2/keypairs.py
new file mode 100644
index 0000000..5d8d24d
--- /dev/null
+++ b/tempest/api_schema/response/compute/v2_2/keypairs.py
@@ -0,0 +1,41 @@
+# Copyright 2016 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.response.compute.v2_1 import keypairs
+
+get_keypair = copy.deepcopy(keypairs.get_keypair)
+get_keypair['response_body']['properties']['keypair'][
+    'properties'].update({'type': {'type': 'string'}})
+get_keypair['response_body']['properties']['keypair'][
+    'required'].append('type')
+
+create_keypair = copy.deepcopy(keypairs.create_keypair)
+create_keypair['status_code'] = [201]
+create_keypair['response_body']['properties']['keypair'][
+    'properties'].update({'type': {'type': 'string'}})
+create_keypair['response_body']['properties']['keypair'][
+    'required'].append('type')
+
+delete_keypair = {
+    'status_code': [204],
+}
+
+list_keypairs = copy.deepcopy(keypairs.list_keypairs)
+list_keypairs['response_body']['properties']['keypairs'][
+    'items']['properties']['keypair'][
+    'properties'].update({'type': {'type': 'string'}})
+list_keypairs['response_body']['properties']['keypairs'][
+    'items']['properties']['keypair']['required'].append('type')
diff --git a/tempest/clients.py b/tempest/clients.py
index 84e8544..bc1f5ad 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -32,55 +32,60 @@
     FloatingIPPoolsClient
 from tempest_lib.services.compute.floating_ips_bulk_client import \
     FloatingIPsBulkClient
+from tempest_lib.services.compute.floating_ips_client import \
+    FloatingIPsClient as ComputeFloatingIPsClient
 from tempest_lib.services.compute.hosts_client import HostsClient
 from tempest_lib.services.compute.hypervisor_client import \
     HypervisorClient
+from tempest_lib.services.compute.images_client import ImagesClient \
+    as ComputeImagesClient
+from tempest_lib.services.compute.instance_usage_audit_log_client import \
+    InstanceUsagesAuditLogClient
+from tempest_lib.services.compute.interfaces_client import InterfacesClient
+from tempest_lib.services.compute.limits_client import LimitsClient
+from tempest_lib.services.compute.migrations_client import MigrationsClient
+from tempest_lib.services.compute.networks_client import NetworksClient \
+    as ComputeNetworksClient
+from tempest_lib.services.compute.quota_classes_client import \
+    QuotaClassesClient
+from tempest_lib.services.compute.quotas_client import QuotasClient
+from tempest_lib.services.compute.security_group_default_rules_client import \
+    SecurityGroupDefaultRulesClient
+from tempest_lib.services.compute.security_group_rules_client import \
+    SecurityGroupRulesClient as ComputeSecurityGroupRulesClient
+from tempest_lib.services.compute.security_groups_client import \
+    SecurityGroupsClient as ComputeSecurityGroupsClient
+from tempest_lib.services.compute.server_groups_client import \
+    ServerGroupsClient
+from tempest_lib.services.compute.servers_client import ServersClient
+from tempest_lib.services.compute.services_client import ServicesClient
+from tempest_lib.services.compute.snapshots_client import \
+    SnapshotsClient as ComputeSnapshotsClient
+from tempest_lib.services.compute.tenant_networks_client import \
+    TenantNetworksClient
+from tempest_lib.services.compute.tenant_usages_client import \
+    TenantUsagesClient
+from tempest_lib.services.compute.versions_client import VersionsClient
+from tempest_lib.services.compute.volumes_client import \
+    VolumesClient as ComputeVolumesClient
 from tempest_lib.services.identity.v2.token_client import TokenClient
 from tempest_lib.services.identity.v3.token_client import V3TokenClient
+from tempest_lib.services.network.floating_ips_client import FloatingIPsClient
+from tempest_lib.services.network.metering_label_rules_client import \
+    MeteringLabelRulesClient
+from tempest_lib.services.network.metering_labels_client import \
+    MeteringLabelsClient
+from tempest_lib.services.network.networks_client import NetworksClient
+from tempest_lib.services.network.ports_client import PortsClient
+from tempest_lib.services.network.subnets_client import SubnetsClient
 
-from tempest.common import cred_provider
 from tempest.common import negative_rest_client
 from tempest import config
 from tempest import exceptions
 from tempest import manager
 from tempest.services.baremetal.v1.json.baremetal_client import \
     BaremetalClient
-from tempest.services import botoclients
-from tempest.services.compute.json.floating_ips_client import \
-    FloatingIPsClient
-from tempest.services.compute.json.images_client import ImagesClient
-from tempest.services.compute.json.instance_usage_audit_log_client import \
-    InstanceUsagesAuditLogClient
-from tempest.services.compute.json.interfaces_client import \
-    InterfacesClient
 from tempest.services.compute.json.keypairs_client import KeyPairsClient
-from tempest.services.compute.json.limits_client import LimitsClient
-from tempest.services.compute.json.migrations_client import \
-    MigrationsClient
-from tempest.services.compute.json.networks_client import NetworksClient \
-    as ComputeNetworksClient
-from tempest.services.compute.json.quota_classes_client import \
-    QuotaClassesClient
-from tempest.services.compute.json.quotas_client import QuotasClient
-from tempest.services.compute.json.security_group_default_rules_client import \
-    SecurityGroupDefaultRulesClient
-from tempest.services.compute.json.security_group_rules_client import \
-    SecurityGroupRulesClient
-from tempest.services.compute.json.security_groups_client import \
-    SecurityGroupsClient
-from tempest.services.compute.json.server_groups_client import \
-    ServerGroupsClient
-from tempest.services.compute.json.servers_client import ServersClient
-from tempest.services.compute.json.services_client import ServicesClient
-from tempest.services.compute.json.snapshots_client import \
-    SnapshotsClient as ComputeSnapshotsClient
-from tempest.services.compute.json.tenant_networks_client import \
-    TenantNetworksClient
-from tempest.services.compute.json.tenant_usages_client import \
-    TenantUsagesClient
-from tempest.services.compute.json.versions_client import VersionsClient
-from tempest.services.compute.json.volumes_client import \
-    VolumesClient as ComputeVolumesClient
 from tempest.services.data_processing.v1_1.data_processing_client import \
     DataProcessingClient
 from tempest.services.database.json.flavors_client import \
@@ -89,75 +94,98 @@
     DatabaseLimitsClient
 from tempest.services.database.json.versions_client import \
     DatabaseVersionsClient
+from tempest.services.identity.v2.json.endpoints_client import \
+    EndpointsClient as EndpointsV2Client
 from tempest.services.identity.v2.json.identity_client import \
     IdentityClient
+from tempest.services.identity.v2.json.roles_client import \
+    RolesClient
+from tempest.services.identity.v2.json.services_client import \
+    ServicesClient as ServicesV2Client
+from tempest.services.identity.v2.json.tenants_client import \
+    TenantsClient
+from tempest.services.identity.v2.json.users_client import \
+    UsersClient
 from tempest.services.identity.v3.json.credentials_client import \
-    CredentialsClient
+    CredentialsClient as CredentialsV3Client
 from tempest.services.identity.v3.json.endpoints_client import \
-    EndPointClient
-from tempest.services.identity.v3.json.identity_client import \
-    IdentityV3Client
-from tempest.services.identity.v3.json.policy_client import PolicyClient
-from tempest.services.identity.v3.json.region_client import RegionClient
-from tempest.services.identity.v3.json.service_client import \
-    ServiceClient
-from tempest.services.image.v1.json.image_client import ImageClient
-from tempest.services.image.v2.json.image_client import ImageClientV2
+    EndPointClient as EndPointV3Client
+from tempest.services.identity.v3.json.groups_client import \
+    GroupsClient as GroupsV3Client
+from tempest.services.identity.v3.json.identity_client import IdentityV3Client
+from tempest.services.identity.v3.json.policies_client import \
+    PoliciesClient as PoliciesV3Client
+from tempest.services.identity.v3.json.regions_client import \
+    RegionsClient as RegionsV3Client
+from tempest.services.identity.v3.json.services_client import \
+    ServicesClient as IdentityServicesV3Client
+from tempest.services.image.v1.json.images_client import ImagesClient
+from tempest.services.image.v2.json.images_client import ImagesClientV2
 from tempest.services.messaging.json.messaging_client import \
     MessagingClient
+from tempest.services.network.json.agents_client import AgentsClient \
+    as NetworkAgentsClient
+from tempest.services.network.json.extensions_client import \
+    ExtensionsClient as NetworkExtensionsClient
 from tempest.services.network.json.network_client import NetworkClient
-from tempest.services.network.json.networks_client import NetworksClient
-from tempest.services.network.json.subnets_client import SubnetsClient
+from tempest.services.network.json.quotas_client import QuotasClient \
+    as NetworkQuotasClient
+from tempest.services.network.json.security_group_rules_client import \
+    SecurityGroupRulesClient
+from tempest.services.network.json.security_groups_client import \
+    SecurityGroupsClient
+from tempest.services.network.json.subnetpools_client import SubnetpoolsClient
 from tempest.services.object_storage.account_client import AccountClient
 from tempest.services.object_storage.container_client import ContainerClient
 from tempest.services.object_storage.object_client import ObjectClient
 from tempest.services.orchestration.json.orchestration_client import \
     OrchestrationClient
+from tempest.services.telemetry.json.alarming_client import AlarmingClient
 from tempest.services.telemetry.json.telemetry_client import \
     TelemetryClient
-from tempest.services.volume.json.admin.volume_hosts_client import \
-    VolumeHostsClient
-from tempest.services.volume.json.admin.volume_quotas_client import \
-    VolumeQuotasClient
-from tempest.services.volume.json.admin.volume_services_client import \
-    VolumesServicesClient
-from tempest.services.volume.json.admin.volume_types_client import \
-    VolumeTypesClient
-from tempest.services.volume.json.availability_zone_client import \
-    VolumeAvailabilityZoneClient
-from tempest.services.volume.json.backups_client import BackupsClient
-from tempest.services.volume.json.extensions_client import \
-    ExtensionsClient as VolumeExtensionClient
-from tempest.services.volume.json.qos_client import QosSpecsClient
-from tempest.services.volume.json.snapshots_client import SnapshotsClient
-from tempest.services.volume.json.volumes_client import VolumesClient
-from tempest.services.volume.v2.json.admin.volume_hosts_client import \
-    VolumeHostsV2Client
-from tempest.services.volume.v2.json.admin.volume_quotas_client import \
-    VolumeQuotasV2Client
-from tempest.services.volume.v2.json.admin.volume_services_client import \
-    VolumesServicesV2Client
-from tempest.services.volume.v2.json.admin.volume_types_client import \
-    VolumeTypesV2Client
+from tempest.services.volume.v1.json.admin.hosts_client import \
+    HostsClient as VolumeHostsClient
+from tempest.services.volume.v1.json.admin.quotas_client import \
+    QuotasClient as VolumeQuotasClient
+from tempest.services.volume.v1.json.admin.services_client import \
+    ServicesClient as VolumeServicesClient
+from tempest.services.volume.v1.json.admin.types_client import \
+    TypesClient as VolumeTypesClient
+from tempest.services.volume.v1.json.availability_zone_client import \
+    AvailabilityZoneClient as VolumeAvailabilityZoneClient
+from tempest.services.volume.v1.json.backups_client import BackupsClient
+from tempest.services.volume.v1.json.extensions_client import \
+    ExtensionsClient as VolumeExtensionsClient
+from tempest.services.volume.v1.json.qos_client import QosSpecsClient
+from tempest.services.volume.v1.json.snapshots_client import SnapshotsClient
+from tempest.services.volume.v1.json.volumes_client import VolumesClient
+from tempest.services.volume.v2.json.admin.hosts_client import \
+    HostsClient as VolumeHostsV2Client
+from tempest.services.volume.v2.json.admin.quotas_client import \
+    QuotasClient as VolumeQuotasV2Client
+from tempest.services.volume.v2.json.admin.services_client import \
+    ServicesClient as VolumeServicesV2Client
+from tempest.services.volume.v2.json.admin.types_client import \
+    TypesClient as VolumeTypesV2Client
 from tempest.services.volume.v2.json.availability_zone_client import \
-    VolumeV2AvailabilityZoneClient
-from tempest.services.volume.v2.json.backups_client import BackupsClientV2
+    AvailabilityZoneClient as VolumeAvailabilityZoneV2Client
+from tempest.services.volume.v2.json.backups_client import \
+    BackupsClient as BackupsV2Client
 from tempest.services.volume.v2.json.extensions_client import \
-    ExtensionsV2Client as VolumeV2ExtensionClient
-from tempest.services.volume.v2.json.qos_client import QosSpecsV2Client
+    ExtensionsClient as VolumeExtensionsV2Client
+from tempest.services.volume.v2.json.qos_client import \
+    QosSpecsClient as QosSpecsV2Client
 from tempest.services.volume.v2.json.snapshots_client import \
-    SnapshotsV2Client
-from tempest.services.volume.v2.json.volumes_client import VolumesV2Client
+    SnapshotsClient as SnapshotsV2Client
+from tempest.services.volume.v2.json.volumes_client import \
+    VolumesClient as VolumesV2Client
 
 CONF = config.CONF
 LOG = logging.getLogger(__name__)
 
 
 class Manager(manager.Manager):
-
-    """
-    Top level manager for OpenStack tempest clients
-    """
+    """Top level manager for OpenStack tempest clients"""
 
     default_params = {
         'disable_ssl_certificate_validation':
@@ -174,9 +202,30 @@
     }
     default_params_with_timeout_values.update(default_params)
 
-    def __init__(self, credentials=None, service=None):
-        super(Manager, self).__init__(credentials=credentials)
+    def __init__(self, credentials, service=None, api_microversions=None):
+        """Initialization of Manager class.
 
+        Setup all services clients and make them available for tests cases.
+        :param credentials: type Credentials or TestResources
+        :param service: Service name
+        :param api_microversions: This is dict of services catalog type
+               and their microversion which will be set on respective
+               services clients.
+               {<service catalog type>: request_microversion}
+               Example :
+                {'compute': request_microversion}
+                    - request_microversion will be set on all compute
+                      service clients.
+                OR
+                {'compute': request_microversion,
+                 'volume': request_microversion}
+                    - request_microversion of compute will be set on all
+                      compute service clients.
+                    - request_microversion of volume will be set on all
+                      volume service clients.
+        """
+        super(Manager, self).__init__(credentials=credentials)
+        self.api_microversions = api_microversions or {}
         self._set_compute_clients()
         self._set_database_clients()
         self._set_identity_clients()
@@ -189,6 +238,22 @@
             CONF.identity.region,
             endpoint_type=CONF.baremetal.endpoint_type,
             **self.default_params_with_timeout_values)
+        self.network_agents_client = NetworkAgentsClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
+        self.network_extensions_client = NetworkExtensionsClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
         self.network_client = NetworkClient(
             self.auth_provider,
             CONF.network.catalog_type,
@@ -205,6 +270,14 @@
             build_interval=CONF.network.build_interval,
             build_timeout=CONF.network.build_timeout,
             **self.default_params)
+        self.subnetpools_client = SubnetpoolsClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
         self.subnets_client = SubnetsClient(
             self.auth_provider,
             CONF.network.catalog_type,
@@ -213,6 +286,62 @@
             build_interval=CONF.network.build_interval,
             build_timeout=CONF.network.build_timeout,
             **self.default_params)
+        self.ports_client = PortsClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
+        self.network_quotas_client = NetworkQuotasClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
+        self.floating_ips_client = FloatingIPsClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
+        self.metering_labels_client = MeteringLabelsClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
+        self.metering_label_rules_client = MeteringLabelRulesClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
+        self.security_group_rules_client = SecurityGroupRulesClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
+        self.security_groups_client = SecurityGroupsClient(
+            self.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **self.default_params)
         self.messaging_client = MessagingClient(
             self.auth_provider,
             CONF.messaging.catalog_type,
@@ -225,8 +354,15 @@
                 CONF.identity.region,
                 endpoint_type=CONF.telemetry.endpoint_type,
                 **self.default_params_with_timeout_values)
+        if CONF.service_available.aodh:
+            self.alarming_client = AlarmingClient(
+                self.auth_provider,
+                CONF.alarming.catalog_type,
+                CONF.identity.region,
+                endpoint_type=CONF.alarming.endpoint_type,
+                **self.default_params_with_timeout_values)
         if CONF.service_available.glance:
-            self.image_client = ImageClient(
+            self.image_client = ImagesClient(
                 self.auth_provider,
                 CONF.image.catalog_type,
                 CONF.image.region or CONF.identity.region,
@@ -234,7 +370,7 @@
                 build_interval=CONF.image.build_interval,
                 build_timeout=CONF.image.build_timeout,
                 **self.default_params)
-            self.image_client_v2 = ImageClientV2(
+            self.image_client_v2 = ImagesClientV2(
                 self.auth_provider,
                 CONF.image.catalog_type,
                 CONF.image.region or CONF.identity.region,
@@ -259,14 +395,7 @@
         self.negative_client = negative_rest_client.NegativeRestClient(
             self.auth_provider, service, **self.default_params)
 
-        # Generating EC2 credentials in tempest is only supported
-        # with identity v2
-        if CONF.identity_feature_enabled.api_v2 and \
-                CONF.identity.auth_version == 'v2':
-            # EC2 and S3 clients, if used, will check configured AWS
-            # credentials and generate new ones if needed
-            self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
-            self.s3_client = botoclients.ObjectClientS3(self.identity_client)
+        self._set_api_microversions()
 
     def _set_compute_clients(self):
         params = {
@@ -295,7 +424,8 @@
         self.server_groups_client = ServerGroupsClient(
             self.auth_provider, **params)
         self.limits_client = LimitsClient(self.auth_provider, **params)
-        self.images_client = ImagesClient(self.auth_provider, **params)
+        self.compute_images_client = ComputeImagesClient(self.auth_provider,
+                                                         **params)
         self.keypairs_client = KeyPairsClient(self.auth_provider, **params)
         self.quotas_client = QuotasClient(self.auth_provider, **params)
         self.quota_classes_client = QuotaClassesClient(self.auth_provider,
@@ -307,11 +437,11 @@
             self.auth_provider, **params)
         self.floating_ips_bulk_client = FloatingIPsBulkClient(
             self.auth_provider, **params)
-        self.floating_ips_client = FloatingIPsClient(self.auth_provider,
-                                                     **params)
-        self.security_group_rules_client = SecurityGroupRulesClient(
+        self.compute_floating_ips_client = ComputeFloatingIPsClient(
             self.auth_provider, **params)
-        self.security_groups_client = SecurityGroupsClient(
+        self.compute_security_group_rules_client = \
+            ComputeSecurityGroupRulesClient(self.auth_provider, **params)
+        self.compute_security_groups_client = ComputeSecurityGroupsClient(
             self.auth_provider, **params)
         self.interfaces_client = InterfacesClient(self.auth_provider,
                                                   **params)
@@ -371,29 +501,50 @@
             'region': CONF.identity.region
         }
         params.update(self.default_params_with_timeout_values)
+
+        # Clients below use the admin endpoint type of Keystone API v2
         params_v2_admin = params.copy()
         params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type
-        # Client uses admin endpoint type of Keystone API v2
+        self.endpoints_v2_client = EndpointsV2Client(self.auth_provider,
+                                                     **params_v2_admin)
         self.identity_client = IdentityClient(self.auth_provider,
                                               **params_v2_admin)
+        self.tenants_client = TenantsClient(self.auth_provider,
+                                            **params_v2_admin)
+        self.roles_client = RolesClient(self.auth_provider,
+                                        **params_v2_admin)
+        self.users_client = UsersClient(self.auth_provider,
+                                        **params_v2_admin)
+        self.services_v2_client = ServicesV2Client(self.auth_provider,
+                                                   **params_v2_admin)
+
+        # Clients below use the public endpoint type of Keystone API v2
         params_v2_public = params.copy()
         params_v2_public['endpoint_type'] = (
             CONF.identity.v2_public_endpoint_type)
-        # Client uses public endpoint type of Keystone API v2
         self.identity_public_client = IdentityClient(self.auth_provider,
                                                      **params_v2_public)
+        self.tenants_public_client = TenantsClient(self.auth_provider,
+                                                   **params_v2_public)
+        self.users_public_client = UsersClient(self.auth_provider,
+                                               **params_v2_public)
+
+        # Clients below use the endpoint type of Keystone API v3
         params_v3 = params.copy()
         params_v3['endpoint_type'] = CONF.identity.v3_endpoint_type
-        # Client uses the endpoint type of Keystone API v3
         self.identity_v3_client = IdentityV3Client(self.auth_provider,
                                                    **params_v3)
-        self.endpoints_client = EndPointClient(self.auth_provider,
-                                               **params)
-        self.service_client = ServiceClient(self.auth_provider, **params)
-        self.policy_client = PolicyClient(self.auth_provider, **params)
-        self.region_client = RegionClient(self.auth_provider, **params)
-        self.credentials_client = CredentialsClient(self.auth_provider,
-                                                    **params)
+        self.endpoints_client = EndPointV3Client(self.auth_provider,
+                                                 **params_v3)
+        self.identity_services_client = IdentityServicesV3Client(
+            self.auth_provider, **params_v3)
+        self.policies_client = PoliciesV3Client(self.auth_provider,
+                                                **params_v3)
+        self.regions_client = RegionsV3Client(self.auth_provider, **params_v3)
+        self.credentials_client = CredentialsV3Client(self.auth_provider,
+                                                      **params_v3)
+        self.groups_client = GroupsV3Client(self.auth_provider, **params_v3)
+
         # Token clients do not use the catalog. They only need default_params.
         # They read auth_url, so they should only be set if the corresponding
         # API version is marked as enabled
@@ -426,10 +577,12 @@
                                                 **params)
         self.volume_qos_v2_client = QosSpecsV2Client(
             self.auth_provider, **params)
-        self.volume_services_v2_client = VolumesServicesV2Client(
+        self.volume_services_client = VolumeServicesClient(
+            self.auth_provider, **params)
+        self.volume_services_v2_client = VolumeServicesV2Client(
             self.auth_provider, **params)
         self.backups_client = BackupsClient(self.auth_provider, **params)
-        self.backups_v2_client = BackupsClientV2(self.auth_provider,
+        self.backups_v2_client = BackupsV2Client(self.auth_provider,
                                                  **params)
         self.snapshots_client = SnapshotsClient(self.auth_provider,
                                                 **params)
@@ -443,7 +596,7 @@
             **params)
         self.volume_types_client = VolumeTypesClient(self.auth_provider,
                                                      **params)
-        self.volume_services_client = VolumesServicesClient(
+        self.volume_types_v2_client = VolumeTypesV2Client(
             self.auth_provider, **params)
         self.volume_hosts_client = VolumeHostsClient(self.auth_provider,
                                                      **params)
@@ -453,16 +606,14 @@
                                                        **params)
         self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider,
                                                             **params)
-        self.volumes_extension_client = VolumeExtensionClient(
+        self.volumes_extension_client = VolumeExtensionsClient(
             self.auth_provider, **params)
-        self.volumes_v2_extension_client = VolumeV2ExtensionClient(
+        self.volumes_v2_extension_client = VolumeExtensionsV2Client(
             self.auth_provider, **params)
         self.volume_availability_zone_client = \
             VolumeAvailabilityZoneClient(self.auth_provider, **params)
         self.volume_v2_availability_zone_client = \
-            VolumeV2AvailabilityZoneClient(self.auth_provider, **params)
-        self.volume_types_v2_client = VolumeTypesV2Client(
-            self.auth_provider, **params)
+            VolumeAvailabilityZoneV2Client(self.auth_provider, **params)
 
     def _set_object_storage_clients(self):
         params = {
@@ -476,16 +627,14 @@
         self.container_client = ContainerClient(self.auth_provider, **params)
         self.object_client = ObjectClient(self.auth_provider, **params)
 
-
-class AdminManager(Manager):
-
-    """
-    Manager object that uses the admin credentials for its
-    managed client objects
-    """
-
-    def __init__(self, service=None):
-        super(AdminManager, self).__init__(
-            credentials=cred_provider.get_configured_credentials(
-                'identity_admin'),
-            service=service)
+    def _set_api_microversions(self):
+        service_clients = [x for x in self.__dict__ if x.endswith('_client')]
+        for client in service_clients:
+            client_obj = getattr(self, client)
+            microversion = self.api_microversions.get(client_obj.service)
+            if microversion:
+                if hasattr(client_obj, 'set_api_microversion'):
+                    client_obj.set_api_microversion(microversion)
+                else:
+                    LOG.debug("Need to implement set_api_microversion on %s"
+                              % client)
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index a90b0ce..9e98d90 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -85,22 +85,33 @@
 import argparse
 import netaddr
 import os
+import traceback
 
+from cliff import command
 from oslo_log import log as logging
-import yaml
-
-from tempest import config
-from tempest import exceptions as exc
-from tempest.services.identity.v2.json import identity_client
-from tempest.services.network.json import network_client
-from tempest.services.network.json import networks_client
-from tempest.services.network.json import subnets_client
 import tempest_lib.auth
 from tempest_lib.common.utils import data_utils
 import tempest_lib.exceptions
+from tempest_lib.services.network import networks_client
+from tempest_lib.services.network import subnets_client
+import yaml
+
+from tempest.common import identity
+from tempest import config
+from tempest import exceptions as exc
+from tempest.services.identity.v2.json import identity_client
+from tempest.services.identity.v2.json import roles_client
+from tempest.services.identity.v2.json import tenants_client
+from tempest.services.identity.v2.json import users_client
+from tempest.services.network.json import network_client
 
 LOG = None
 CONF = config.CONF
+DESCRIPTION = ('Create accounts.yaml file for concurrent test runs.%s'
+               'One primary user, one alt user, '
+               'one swift admin, one stack owner '
+               'and one admin (optionally) will be created '
+               'for each concurrent thread.' % os.linesep)
 
 
 def setup_logging():
@@ -137,6 +148,27 @@
         endpoint_type='adminURL',
         **params
     )
+    tenants_admin = tenants_client.TenantsClient(
+        _auth,
+        CONF.identity.catalog_type,
+        CONF.identity.region,
+        endpoint_type='adminURL',
+        **params
+    )
+    roles_admin = roles_client.RolesClient(
+        _auth,
+        CONF.identity.catalog_type,
+        CONF.identity.region,
+        endpoint_type='adminURL',
+        **params
+    )
+    users_admin = users_client.UsersClient(
+        _auth,
+        CONF.identity.catalog_type,
+        CONF.identity.region,
+        endpoint_type='adminURL',
+        **params
+    )
     network_admin = None
     networks_admin = None
     subnets_admin = None
@@ -162,14 +194,15 @@
             CONF.network.region or CONF.identity.region,
             endpoint_type='adminURL',
             **params)
-    return (identity_admin, neutron_iso_networks, network_admin,
-            networks_admin, subnets_admin)
+    return (identity_admin, tenants_admin, roles_admin, users_admin,
+            neutron_iso_networks, network_admin, networks_admin, subnets_admin)
 
 
 def create_resources(opts, resources):
-    (identity_admin, neutron_iso_networks,
-     network_admin, networks_admin, subnets_admin) = get_admin_clients(opts)
-    roles = identity_admin.list_roles()['roles']
+    (identity_admin, tenants_admin, roles_admin, users_admin,
+     neutron_iso_networks, network_admin, networks_admin,
+     subnets_admin) = get_admin_clients(opts)
+    roles = roles_admin.list_roles()['roles']
     for u in resources['users']:
         u['role_ids'] = []
         for r in u.get('roles', ()):
@@ -179,37 +212,39 @@
                 msg = "Role: %s doesn't exist" % r
                 raise exc.InvalidConfiguration(msg)
             u['role_ids'] += [role['id']]
-    existing = [x['name'] for x in identity_admin.list_tenants()['tenants']]
+    existing = [x['name'] for x in tenants_admin.list_tenants()['tenants']]
     for tenant in resources['tenants']:
         if tenant not in existing:
-            identity_admin.create_tenant(tenant)
+            tenants_admin.create_tenant(tenant)
         else:
-            LOG.warn("Tenant '%s' already exists in this environment" % tenant)
+            LOG.warning("Tenant '%s' already exists in this environment"
+                        % tenant)
     LOG.info('Tenants created')
     for u in resources['users']:
         try:
-            tenant = identity_admin.get_tenant_by_name(u['tenant'])
+            tenant = identity.get_tenant_by_name(tenants_admin, u['tenant'])
         except tempest_lib.exceptions.NotFound:
             LOG.error("Tenant: %s - not found" % u['tenant'])
             continue
         while True:
             try:
-                identity_admin.get_user_by_username(tenant['id'], u['name'])
+                identity.get_user_by_username(tenants_admin,
+                                              tenant['id'], u['name'])
             except tempest_lib.exceptions.NotFound:
-                identity_admin.create_user(
+                users_admin.create_user(
                     u['name'], u['pass'], tenant['id'],
                     "%s@%s" % (u['name'], tenant['id']),
                     enabled=True)
                 break
             else:
-                LOG.warn("User '%s' already exists in this environment. "
-                         "New name generated" % u['name'])
+                LOG.warning("User '%s' already exists in this environment. "
+                            "New name generated" % u['name'])
                 u['name'] = random_user_name(opts.tag, u['prefix'])
 
     LOG.info('Users created')
     if neutron_iso_networks:
         for u in resources['users']:
-            tenant = identity_admin.get_tenant_by_name(u['tenant'])
+            tenant = identity.get_tenant_by_name(tenants_admin, u['tenant'])
             network_name, router_name = create_network_resources(
                 network_admin, networks_admin, subnets_admin, tenant['id'],
                 u['name'])
@@ -218,19 +253,19 @@
         LOG.info('Networks created')
     for u in resources['users']:
         try:
-            tenant = identity_admin.get_tenant_by_name(u['tenant'])
+            tenant = identity.get_tenant_by_name(tenants_admin, u['tenant'])
         except tempest_lib.exceptions.NotFound:
             LOG.error("Tenant: %s - not found" % u['tenant'])
             continue
         try:
-            user = identity_admin.get_user_by_username(tenant['id'],
-                                                       u['name'])
+            user = identity.get_user_by_username(tenants_admin,
+                                                 tenant['id'], u['name'])
         except tempest_lib.exceptions.NotFound:
             LOG.error("User: %s - not found" % u['user'])
             continue
         for r in u['role_ids']:
             try:
-                identity_admin.assign_user_role(tenant['id'], user['id'], r)
+                roles_admin.assign_user_role(tenant['id'], user['id'], r)
             except tempest_lib.exceptions.Conflict:
                 # don't care if it's already assigned
                 pass
@@ -277,8 +312,8 @@
         return resp_body['router']
 
     def _add_router_interface(router_id, subnet_id):
-        network_admin_client.add_router_interface_with_subnet_id(
-            router_id, subnet_id)
+        network_admin_client.add_router_interface(router_id,
+                                                  subnet_id=subnet_id)
 
     network_name = name + "-network"
     network = _create_network(network_name)
@@ -339,7 +374,7 @@
                 resources['users'].append({
                     'tenant': tenant,
                     'name': user,
-                    'pass': data_utils.rand_name(),
+                    'pass': data_utils.rand_password(),
                     'prefix': user_group['prefix'],
                     'roles': user_group['roles']
                 })
@@ -355,7 +390,7 @@
             'password': user['pass'],
             'roles': user['roles']
         }
-        if 'network' or 'router' in user:
+        if 'network' in user or 'router' in user:
             account['resources'] = {}
         if 'network' in user:
             account['resources']['network'] = user['network']
@@ -369,20 +404,7 @@
     LOG.info('%s generated successfully!' % opts.accounts)
 
 
-def get_options():
-    usage_string = ('tempest-account-generator [-h] <ARG> ...\n\n'
-                    'To see help on specific argument, do:\n'
-                    'tempest-account-generator <ARG> -h')
-    parser = argparse.ArgumentParser(
-        description='Create accounts.yaml file for concurrent test runs. '
-                    'One primary user, one alt user, '
-                    'one swift admin, one stack owner '
-                    'and one admin (optionally) will be created '
-                    'for each concurrent thread.',
-        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
-        usage=usage_string
-    )
-
+def _parser_add_args(parser):
     parser.add_argument('-c', '--config-file',
                         metavar='/etc/tempest.conf',
                         help='path to tempest config file')
@@ -419,16 +441,50 @@
                         metavar='accounts_file.yaml',
                         help='Output accounts yaml file')
 
+
+def get_options():
+    usage_string = ('tempest-account-generator [-h] <ARG> ...\n\n'
+                    'To see help on specific argument, do:\n'
+                    'tempest-account-generator <ARG> -h')
+    parser = argparse.ArgumentParser(
+        description=DESCRIPTION,
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+        usage=usage_string
+    )
+
+    _parser_add_args(parser)
     opts = parser.parse_args()
-    if opts.config_file:
-        config.CONF.set_config_path(opts.config_file)
     return opts
 
 
+class TempestAccountGenerator(command.Command):
+
+    def get_parser(self, prog_name):
+        parser = super(TempestAccountGenerator, self).get_parser(prog_name)
+        _parser_add_args(parser)
+        return parser
+
+    def take_action(self, parsed_args):
+        try:
+            return main(parsed_args)
+        except Exception:
+            LOG.exception("Failure generating test accounts.")
+            traceback.print_exc()
+            raise
+        return 0
+
+    def get_description(self):
+        return DESCRIPTION
+
+
 def main(opts=None):
-    if not opts:
-        opts = get_options()
     setup_logging()
+    if not opts:
+        LOG.warn("Use of: 'tempest-account-generator' is deprecated, "
+                 "please use: 'tempest account-generator'")
+        opts = get_options()
+    if opts.config_file:
+        config.CONF.set_config_path(opts.config_file)
     resources = generate_resources(opts)
     create_resources(opts, resources)
     dump_accounts(opts, resources)
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 4fa4302..7b73a61 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -51,6 +51,7 @@
 Please run with **--help** to see full list of options.
 """
 import sys
+import traceback
 
 from cliff import command
 from oslo_log import log as logging
@@ -58,7 +59,8 @@
 
 from tempest import clients
 from tempest.cmd import cleanup_service
-from tempest.common import cred_provider
+from tempest.common import credentials_factory as credentials
+from tempest.common import identity
 from tempest import config
 
 SAVED_STATE_JSON = "saved_state.json"
@@ -73,9 +75,19 @@
         super(TempestCleanup, self).__init__(app, cmd)
 
     def take_action(self, parsed_args):
+        try:
+            self.init(parsed_args)
+            self._cleanup()
+        except Exception:
+            LOG.exception("Failure during cleanup")
+            traceback.print_exc()
+            raise
+        return 0
+
+    def init(self, parsed_args):
         cleanup_service.init_conf()
         self.options = parsed_args
-        self.admin_mgr = clients.AdminManager()
+        self.admin_mgr = credentials.AdminManager()
         self.dry_run_data = {}
         self.json_data = {}
 
@@ -95,10 +107,9 @@
             return
 
         self._load_json()
-        self._cleanup()
 
     def _cleanup(self):
-        LOG.debug("Begin cleanup")
+        print ("Begin cleanup")
         is_dry_run = self.options.dry_run
         is_preserve = not self.options.delete_tempest_conf_objects
         is_save_state = False
@@ -117,7 +128,7 @@
                   'is_save_state': is_save_state}
         tenant_service = cleanup_service.TenantService(admin_mgr, **kwargs)
         tenants = tenant_service.list()
-        LOG.debug("Process %s tenants" % len(tenants))
+        print ("Process %s tenants" % len(tenants))
 
         # Loop through list of tenants and clean them up.
         for tenant in tenants:
@@ -148,7 +159,7 @@
             self._remove_admin_role(tenant_id)
 
     def _clean_tenant(self, tenant):
-        LOG.debug("Cleaning tenant:  %s " % tenant['name'])
+        print ("Cleaning tenant:  %s " % tenant['name'])
         is_dry_run = self.options.dry_run
         dry_run_data = self.dry_run_data
         is_preserve = not self.options.delete_tempest_conf_objects
@@ -162,7 +173,7 @@
         kwargs = {"username": CONF.auth.admin_username,
                   "password": CONF.auth.admin_password,
                   "tenant_name": tenant['name']}
-        mgr = clients.Manager(credentials=cred_provider.get_credentials(
+        mgr = clients.Manager(credentials=credentials.get_credentials(
             **kwargs))
         kwargs = {'data': tenant_data,
                   'is_dry_run': is_dry_run,
@@ -175,16 +186,18 @@
             svc.run()
 
     def _init_admin_ids(self):
-        id_cl = self.admin_mgr.identity_client
+        tn_cl = self.admin_mgr.tenants_client
+        rl_cl = self.admin_mgr.roles_client
 
-        tenant = id_cl.get_tenant_by_name(CONF.auth.admin_tenant_name)
+        tenant = identity.get_tenant_by_name(tn_cl,
+                                             CONF.auth.admin_tenant_name)
         self.admin_tenant_id = tenant['id']
 
-        user = id_cl.get_user_by_username(self.admin_tenant_id,
-                                          CONF.auth.admin_username)
+        user = identity.get_user_by_username(tn_cl, self.admin_tenant_id,
+                                             CONF.auth.admin_username)
         self.admin_id = user['id']
 
-        roles = id_cl.list_roles()['roles']
+        roles = rl_cl.list_roles()['roles']
         for role in roles:
             if role['name'] == CONF.identity.admin_role:
                 self.admin_role_id = role['id']
@@ -219,8 +232,9 @@
 
     def _add_admin(self, tenant_id):
         id_cl = self.admin_mgr.identity_client
+        rl_cl = self.admin_mgr.roles_client
         needs_role = True
-        roles = id_cl.list_user_roles(tenant_id, self.admin_id)['roles']
+        roles = rl_cl.list_user_roles(tenant_id, self.admin_id)['roles']
         for role in roles:
             if role['id'] == self.admin_role_id:
                 needs_role = False
@@ -235,19 +249,19 @@
         LOG.debug("Remove admin user role for tenant: %s" % tenant_id)
         # Must initialize AdminManager for each user role
         # Otherwise authentication exception is thrown, weird
-        id_cl = clients.AdminManager().identity_client
+        id_cl = credentials.AdminManager().identity_client
         if (self._tenant_exists(tenant_id)):
             try:
-                id_cl.remove_user_role(tenant_id, self.admin_id,
+                id_cl.delete_user_role(tenant_id, self.admin_id,
                                        self.admin_role_id)
             except Exception as ex:
                 LOG.exception("Failed removing role from tenant which still"
                               "exists, exception: %s" % ex)
 
     def _tenant_exists(self, tenant_id):
-        id_cl = self.admin_mgr.identity_client
+        tn_cl = self.admin_mgr.tenants_client
         try:
-            t = id_cl.get_tenant(tenant_id)
+            t = tn_cl.show_tenant(tenant_id)
             LOG.debug("Tenant is: %s" % str(t))
             return True
         except Exception as ex:
@@ -255,7 +269,7 @@
             return False
 
     def _init_state(self):
-        LOG.debug("Initializing saved state.")
+        print ("Initializing saved state.")
         data = {}
         admin_mgr = self.admin_mgr
         kwargs = {'data': data,
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 1b5820b..33f19b1 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -16,7 +16,8 @@
 
 from oslo_log import log as logging
 
-from tempest import clients
+from tempest.common import credentials_factory as credentials
+from tempest.common import identity
 from tempest import config
 from tempest import test
 
@@ -82,12 +83,12 @@
 
 
 def _get_network_id(net_name, tenant_name):
-    am = clients.AdminManager()
+    am = credentials.AdminManager()
     net_cl = am.networks_client
-    id_cl = am.identity_client
+    tn_cl = am.tenants_client
 
     networks = net_cl.list_networks()
-    tenant = id_cl.get_tenant_by_name(tenant_name)
+    tenant = identity.get_tenant_by_name(tn_cl, tenant_name)
     t_id = tenant['id']
     n_id = None
     for net in networks['networks']:
@@ -268,7 +269,7 @@
 class SecurityGroupService(BaseService):
     def __init__(self, manager, **kwargs):
         super(SecurityGroupService, self).__init__(kwargs)
-        self.client = manager.security_groups_client
+        self.client = manager.compute_security_groups_client
 
     def list(self):
         client = self.client
@@ -294,7 +295,7 @@
 class FloatingIpService(BaseService):
     def __init__(self, manager, **kwargs):
         super(FloatingIpService, self).__init__(kwargs)
-        self.client = manager.floating_ips_client
+        self.client = manager.compute_floating_ips_client
 
     def list(self):
         client = self.client
@@ -384,6 +385,11 @@
         self.client = manager.network_client
         self.networks_client = manager.networks_client
         self.subnets_client = manager.subnets_client
+        self.ports_client = manager.ports_client
+        self.floating_ips_client = manager.floating_ips_client
+        self.metering_labels_client = manager.metering_labels_client
+        self.metering_label_rules_client = manager.metering_label_rules_client
+        self.security_groups_client = manager.security_groups_client
 
     def _filter_by_conf_networks(self, item_list):
         if not item_list or not all(('network_id' in i for i in item_list)):
@@ -420,7 +426,7 @@
 class NetworkFloatingIpService(NetworkService):
 
     def list(self):
-        client = self.client
+        client = self.floating_ips_client
         flips = client.list_floatingips(**self.tenant_filter)
         flips = flips['floatingips']
         LOG.debug("List count, %s Network Floating IPs" % len(flips))
@@ -463,8 +469,7 @@
                          in client.list_router_interfaces(rid)['ports']
                          if port["device_owner"] == "network:router_interface"]
                 for port in ports:
-                    client.remove_router_interface_with_port_id(rid,
-                                                                port['id'])
+                    client.remove_router_interface(rid, port_id=port['id'])
                 client.delete_router(rid)
             except Exception:
                 LOG.exception("Delete Router exception.")
@@ -573,7 +578,7 @@
 class NetworkMeteringLabelRuleService(NetworkService):
 
     def list(self):
-        client = self.client
+        client = self.metering_label_rules_client
         rules = client.list_metering_label_rules()
         rules = rules['metering_label_rules']
         rules = self._filter_by_tenant_id(rules)
@@ -581,7 +586,7 @@
         return rules
 
     def delete(self):
-        client = self.client
+        client = self.metering_label_rules_client
         rules = self.list()
         for rule in rules:
             try:
@@ -597,7 +602,7 @@
 class NetworkMeteringLabelService(NetworkService):
 
     def list(self):
-        client = self.client
+        client = self.metering_labels_client
         labels = client.list_metering_labels()
         labels = labels['metering_labels']
         labels = self._filter_by_tenant_id(labels)
@@ -605,7 +610,7 @@
         return labels
 
     def delete(self):
-        client = self.client
+        client = self.metering_labels_client
         labels = self.list()
         for label in labels:
             try:
@@ -621,7 +626,7 @@
 class NetworkPortService(NetworkService):
 
     def list(self):
-        client = self.client
+        client = self.ports_client
         ports = [port for port in
                  client.list_ports(**self.tenant_filter)['ports']
                  if port["device_owner"] == "" or
@@ -634,7 +639,7 @@
         return ports
 
     def delete(self):
-        client = self.client
+        client = self.ports_client
         ports = self.list()
         for port in ports:
             try:
@@ -649,7 +654,7 @@
 
 class NetworkSecGroupService(NetworkService):
     def list(self):
-        client = self.client
+        client = self.security_groups_client
         filter = self.tenant_filter
         # cannot delete default sec group so never show it.
         secgroups = [secgroup for secgroup in
@@ -769,7 +774,7 @@
 class ImageService(BaseService):
     def __init__(self, manager, **kwargs):
         super(ImageService, self).__init__(kwargs)
-        self.client = manager.images_client
+        self.client = manager.compute_images_client
 
     def list(self):
         client = self.client
@@ -809,11 +814,14 @@
         self.client = manager.identity_client
 
 
-class UserService(IdentityService):
+class UserService(BaseService):
+
+    def __init__(self, manager, **kwargs):
+        super(UserService, self).__init__(kwargs)
+        self.client = manager.users_client
 
     def list(self):
-        client = self.client
-        users = client.get_users()['users']
+        users = self.client.list_users()['users']
 
         if not self.is_save_state:
             users = [user for user in users if user['id']
@@ -831,11 +839,10 @@
         return users
 
     def delete(self):
-        client = self.client
         users = self.list()
         for user in users:
             try:
-                client.delete_user(user['id'])
+                self.client.delete_user(user['id'])
             except Exception:
                 LOG.exception("Delete User exception.")
 
@@ -850,12 +857,15 @@
             self.data['users'][user['id']] = user['name']
 
 
-class RoleService(IdentityService):
+class RoleService(BaseService):
+
+    def __init__(self, manager, **kwargs):
+        super(RoleService, self).__init__(kwargs)
+        self.client = manager.roles_client
 
     def list(self):
-        client = self.client
         try:
-            roles = client.list_roles()['roles']
+            roles = self.client.list_roles()['roles']
             # reconcile roles with saved state and never list admin role
             if not self.is_save_state:
                 roles = [role for role in roles if
@@ -869,11 +879,10 @@
             return []
 
     def delete(self):
-        client = self.client
         roles = self.list()
         for role in roles:
             try:
-                client.delete_role(role['id'])
+                self.client.delete_role(role['id'])
             except Exception:
                 LOG.exception("Delete Role exception.")
 
@@ -888,11 +897,14 @@
             self.data['roles'][role['id']] = role['name']
 
 
-class TenantService(IdentityService):
+class TenantService(BaseService):
+
+    def __init__(self, manager, **kwargs):
+        super(TenantService, self).__init__(kwargs)
+        self.client = manager.tenants_client
 
     def list(self):
-        client = self.client
-        tenants = client.list_tenants()['tenants']
+        tenants = self.client.list_tenants()['tenants']
         if not self.is_save_state:
             tenants = [tenant for tenant in tenants if (tenant['id']
                        not in self.saved_state_json['tenants'].keys()
@@ -906,11 +918,10 @@
         return tenants
 
     def delete(self):
-        client = self.client
         tenants = self.list()
         for tenant in tenants:
             try:
-                client.delete_tenant(tenant['id'])
+                self.client.delete_tenant(tenant['id'])
             except Exception:
                 LOG.exception("Delete Tenant exception.")
 
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
index a4ed064..ac67ce4 100644
--- a/tempest/cmd/init.py
+++ b/tempest/cmd/init.py
@@ -35,7 +35,9 @@
 
 
 def get_tempest_default_config_dir():
-    """Returns the correct default config dir to support both cases of
+    """Get default config directory of tempest
+
+    Returns the correct default config dir to support both cases of
     tempest being or not installed in a virtualenv.
     Cases considered:
     - no virtual env, python2: real_prefix and base_prefix not set
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index f57e757..e26a014 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -118,22 +118,27 @@
 from tempest_lib import auth
 from tempest_lib import exceptions as lib_exc
 from tempest_lib.services.compute import flavors_client
+from tempest_lib.services.compute import floating_ips_client
+from tempest_lib.services.compute import security_group_rules_client
+from tempest_lib.services.compute import security_groups_client
+from tempest_lib.services.compute import servers_client
+from tempest_lib.services.network import subnets_client
 import yaml
 
+from tempest.common import identity
 from tempest.common import waiters
 from tempest import config
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import security_group_rules_client
-from tempest.services.compute.json import security_groups_client
-from tempest.services.compute.json import servers_client
 from tempest.services.identity.v2.json import identity_client
-from tempest.services.image.v2.json import image_client
+from tempest.services.identity.v2.json import roles_client
+from tempest.services.identity.v2.json import tenants_client
+from tempest.services.identity.v2.json import users_client
+from tempest.services.image.v2.json import images_client
 from tempest.services.network.json import network_client
-from tempest.services.network.json import subnets_client
 from tempest.services.object_storage import container_client
 from tempest.services.object_storage import object_client
+from tempest.services.telemetry.json import alarming_client
 from tempest.services.telemetry.json import telemetry_client
-from tempest.services.volume.json import volumes_client
+from tempest.services.volume.v1.json import volumes_client
 
 CONF = config.CONF
 OPTS = {}
@@ -197,6 +202,24 @@
             CONF.identity.region,
             endpoint_type='adminURL',
             **default_params_with_timeout_values)
+        self.tenants = tenants_client.TenantsClient(
+            _auth,
+            CONF.identity.catalog_type,
+            CONF.identity.region,
+            endpoint_type='adminURL',
+            **default_params_with_timeout_values)
+        self.roles = roles_client.RolesClient(
+            _auth,
+            CONF.identity.catalog_type,
+            CONF.identity.region,
+            endpoint_type='adminURL',
+            **default_params_with_timeout_values)
+        self.users = users_client.UsersClient(
+            _auth,
+            CONF.identity.catalog_type,
+            CONF.identity.region,
+            endpoint_type='adminURL',
+            **default_params_with_timeout_values)
         self.servers = servers_client.ServersClient(_auth,
                                                     **compute_params)
         self.flavors = flavors_client.FlavorsClient(_auth,
@@ -211,7 +234,7 @@
                                                   **object_storage_params)
         self.containers = container_client.ContainerClient(
             _auth, **object_storage_params)
-        self.images = image_client.ImageClientV2(
+        self.images = images_client.ImagesClientV2(
             _auth,
             CONF.image.catalog_type,
             CONF.image.region or CONF.identity.region,
@@ -225,6 +248,12 @@
             CONF.identity.region,
             endpoint_type=CONF.telemetry.endpoint_type,
             **default_params_with_timeout_values)
+        self.alarming = alarming_client.AlarmingClient(
+            _auth,
+            CONF.alarm.catalog_type,
+            CONF.identity.region,
+            endpoint_type=CONF.alarm.endpoint_type,
+            **default_params_with_timeout_values)
         self.volumes = volumes_client.VolumesClient(
             _auth,
             CONF.volume.catalog_type,
@@ -283,20 +312,21 @@
     Don't create the tenants if they already exist.
     """
     admin = keystone_admin()
-    body = admin.identity.list_tenants()['tenants']
+    body = admin.tenants.list_tenants()['tenants']
     existing = [x['name'] for x in body]
     for tenant in tenants:
         if tenant not in existing:
-            admin.identity.create_tenant(tenant)['tenant']
+            admin.tenants.create_tenant(tenant)['tenant']
         else:
-            LOG.warn("Tenant '%s' already exists in this environment" % tenant)
+            LOG.warning("Tenant '%s' already exists in this environment"
+                        % tenant)
 
 
 def destroy_tenants(tenants):
     admin = keystone_admin()
     for tenant in tenants:
-        tenant_id = admin.identity.get_tenant_by_name(tenant)['id']
-        admin.identity.delete_tenant(tenant_id)
+        tenant_id = identity.get_tenant_by_name(admin.tenant, tenant)['id']
+        admin.tenants.delete_tenant(tenant_id)
 
 ##############
 #
@@ -324,11 +354,11 @@
 
 def _assign_swift_role(user, swift_role):
     admin = keystone_admin()
-    roles = admin.identity.list_roles()
+    roles = admin.roles.list_roles()
     role = next(r for r in roles if r['name'] == swift_role)
     LOG.debug(USERS[user])
     try:
-        admin.identity.assign_user_role(
+        admin.roles.assign_user_role(
             USERS[user]['tenant_id'],
             USERS[user]['id'],
             role['id'])
@@ -347,16 +377,17 @@
     admin = keystone_admin()
     for u in users:
         try:
-            tenant = admin.identity.get_tenant_by_name(u['tenant'])
+            tenant = identity.get_tenant_by_name(admin.tenants, u['tenant'])
         except lib_exc.NotFound:
             LOG.error("Tenant: %s - not found" % u['tenant'])
             continue
         try:
-            admin.identity.get_user_by_username(tenant['id'], u['name'])
-            LOG.warn("User '%s' already exists in this environment"
-                     % u['name'])
+            identity.get_user_by_username(admin.tenants,
+                                          tenant['id'], u['name'])
+            LOG.warning("User '%s' already exists in this environment"
+                        % u['name'])
         except lib_exc.NotFound:
-            admin.identity.create_user(
+            admin.users.create_user(
                 u['name'], u['pass'], tenant['id'],
                 "%s@%s" % (u['name'], tenant['id']),
                 enabled=True)
@@ -365,10 +396,11 @@
 def destroy_users(users):
     admin = keystone_admin()
     for user in users:
-        tenant_id = admin.identity.get_tenant_by_name(user['tenant'])['id']
-        user_id = admin.identity.get_user_by_username(tenant_id,
-                                                      user['name'])['id']
-        admin.identity.delete_user(user_id)
+        tenant_id = identity.get_tenant_by_name(admin.tenants,
+                                                user['tenant'])['id']
+        user_id = identity.get_user_by_username(admin.tenants,
+                                                tenant_id, user['name'])['id']
+        admin.users.delete_user(user_id)
 
 
 def collect_users(users):
@@ -376,10 +408,11 @@
     LOG.info("Collecting users")
     admin = keystone_admin()
     for u in users:
-        tenant = admin.identity.get_tenant_by_name(u['tenant'])
+        tenant = identity.get_tenant_by_name(admin.tenants, u['tenant'])
         u['tenant_id'] = tenant['id']
         USERS[u['name']] = u
-        body = admin.identity.get_user_by_username(tenant['id'], u['name'])
+        body = identity.get_user_by_username(admin.tenants,
+                                             tenant['id'], u['name'])
         USERS[u['name']]['id'] = body['id']
 
 
@@ -432,7 +465,7 @@
         LOG.info("checking users")
         for name, user in six.iteritems(self.users):
             client = keystone_admin()
-            found = client.identity.get_user(user['id'])['user']
+            found = client.users.show_user(user['id'])['user']
             self.assertEqual(found['name'], user['name'])
             self.assertEqual(found['tenantId'], user['tenant_id'])
 
@@ -474,24 +507,26 @@
                 for network_name, body in found['addresses'].items():
                     for addr in body:
                         ip = addr['addr']
-                        # If floatingip_for_ssh is at True, it's assumed
-                        # you want to use the floating IP to reach the server,
-                        # fallback to fixed IP, then other type.
+                        # Use floating IP, fixed IP or other type to
+                        # reach the server.
                         # This is useful in multi-node environment.
-                        if CONF.compute.use_floatingip_for_ssh:
+                        if CONF.validation.connect_method == 'floating':
                             if addr.get('OS-EXT-IPS:type',
                                         'floating') == 'floating':
                                 self._ping_ip(ip, 60)
                                 _floating_is_alive = True
-                        elif addr.get('OS-EXT-IPS:type', 'fixed') == 'fixed':
-                            namespace = _get_router_namespace(client,
-                                                              network_name)
-                            self._ping_ip(ip, 60, namespace)
+                        elif CONF.validation.connect_method == 'fixed':
+                            if addr.get('OS-EXT-IPS:type',
+                                        'fixed') == 'fixed':
+                                namespace = _get_router_namespace(client,
+                                                                  network_name)
+                                self._ping_ip(ip, 60, namespace)
                         else:
                             self._ping_ip(ip, 60)
-                # if floatingip_for_ssh is at True, validate found a
-                # floating IP and ping worked.
-                if CONF.compute.use_floatingip_for_ssh:
+                # If CONF.validation.connect_method is floating, validate
+                # that the floating IP is attached to the server and the
+                # the server is pingable.
+                if CONF.validation.connect_method == 'floating':
                     self.assertTrue(_floating_is_alive,
                                     "Server %s has no floating IP." %
                                     server['name'])
@@ -806,8 +841,8 @@
         for subnet in router['subnet']:
             subnet_id = _get_resource_by_name(client.networks,
                                               'subnets', subnet)['id']
-            client.networks.remove_router_interface_with_subnet_id(router_id,
-                                                                   subnet_id)
+            client.networks.remove_router_interface(router_id,
+                                                    subnet_id=subnet_id)
         client.networks.delete_router(router_id)
 
 
@@ -821,8 +856,8 @@
             subnet_id = _get_resource_by_name(client.networks,
                                               'subnets', subnet)['id']
             # connect routers to their subnets
-            client.networks.add_router_interface_with_subnet_id(router_id,
-                                                                subnet_id)
+            client.networks.add_router_interface(router_id,
+                                                 subnet_id=subnet_id)
         # connect routers to external network if set to "gateway"
         if router['gateway']:
             if CONF.network.public_network_id:
@@ -884,8 +919,8 @@
         client.servers.wait_for_server_status(server_id, 'ACTIVE')
         # create security group(s) after server spawning
         for secgroup in server['secgroups']:
-            client.servers.add_security_group(server_id, secgroup)
-        if CONF.compute.use_floatingip_for_ssh:
+            client.servers.add_security_group(server_id, name=secgroup)
+        if CONF.validation.connect_method == 'floating':
             floating_ip_pool = server.get('floating_ip_pool')
             floating_ip = client.floating_ips.create_floating_ip(
                 pool_name=floating_ip_pool)['floating_ip']
@@ -995,7 +1030,9 @@
         server_id = _get_server_by_name(client, volume['server'])['id']
         volume_id = _get_volume_by_name(client, volume['name'])['id']
         device = volume['device']
-        client.volumes.attach_volume(volume_id, server_id, device)
+        client.volumes.attach_volume(volume_id,
+                                     instance_uuid=server_id,
+                                     mountpoint=device)
 
 
 #######################
@@ -1047,7 +1084,7 @@
     destroy_secgroups(RES['secgroups'])
     destroy_users(RES['users'])
     destroy_tenants(RES['tenants'])
-    LOG.warn("Destroy mode incomplete")
+    LOG.warning("Destroy mode incomplete")
 
 
 def get_options():
@@ -1103,6 +1140,8 @@
 
 
 def main():
+    print("Javelin is deprecated and will be removed from Tempest in the "
+          "future.")
     global RES
     get_options()
     setup_logging()
diff --git a/tempest/cmd/list_plugins.py b/tempest/cmd/list_plugins.py
new file mode 100644
index 0000000..1f1ff1a
--- /dev/null
+++ b/tempest/cmd/list_plugins.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Utility for listing all currently installed Tempest plugins.
+
+**Usage:** ``tempest list-plugins``.
+"""
+
+from cliff import command
+from oslo_log import log as logging
+import prettytable
+
+from tempest.test_discover.plugins import TempestTestPluginManager
+
+LOG = logging.getLogger(__name__)
+
+
+class TempestListPlugins(command.Command):
+    def take_action(self, parsed_args):
+        self._list_plugins()
+        return 0
+
+    def get_description(self):
+        return 'List all tempest plugins'
+
+    def _list_plugins(self):
+        plugins = TempestTestPluginManager()
+
+        output = prettytable.PrettyTable(["Name", "EntryPoint"])
+        for plugin in plugins.ext_plugins.extensions:
+            output.add_row([
+                plugin.name, plugin.entry_point_target])
+
+        print(output)
diff --git a/tempest/cmd/main.py b/tempest/cmd/main.py
index 577df9b..acd97a8 100644
--- a/tempest/cmd/main.py
+++ b/tempest/cmd/main.py
@@ -28,6 +28,7 @@
             description='Tempest cli application',
             version=version.VersionInfo('tempest').version_string(),
             command_manager=commandmanager.CommandManager('tempest.cm'),
+            deferred_help=True,
             )
 
     def initialize_app(self, argv):
diff --git a/tempest/cmd/run_stress.py b/tempest/cmd/run_stress.py
old mode 100755
new mode 100644
index 0448589..943fe5b
--- a/tempest/cmd/run_stress.py
+++ b/tempest/cmd/run_stress.py
@@ -22,7 +22,9 @@
 except ImportError:
     # unittest in python 2.6 does not contain loader, so uses unittest2
     from unittest2 import loader
+import traceback
 
+from cliff import command
 from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
 from testtools import testsuite
@@ -33,8 +35,7 @@
 
 
 def discover_stress_tests(path="./", filter_attr=None, call_inherited=False):
-    """Discovers all tempest tests and create action out of them
-    """
+    """Discovers all tempest tests and create action out of them"""
     LOG.info("Start test discovery")
     tests = []
     testloader = loader.TestLoader()
@@ -71,29 +72,51 @@
     return tests
 
 
-parser = argparse.ArgumentParser(description='Run stress tests')
-parser.add_argument('-d', '--duration', default=300, type=int,
-                    help="Duration of test in secs")
-parser.add_argument('-s', '--serial', action='store_true',
-                    help="Trigger running tests serially")
-parser.add_argument('-S', '--stop', action='store_true',
-                    default=False, help="Stop on first error")
-parser.add_argument('-n', '--number', type=int,
-                    help="How often an action is executed for each process")
-group = parser.add_mutually_exclusive_group(required=True)
-group.add_argument('-a', '--all', action='store_true',
-                   help="Execute all stress tests")
-parser.add_argument('-T', '--type',
-                    help="Filters tests of a certain type (e.g. gate)")
-parser.add_argument('-i', '--call-inherited', action='store_true',
-                    default=False,
-                    help="Call also inherited function with stress attribute")
-group.add_argument('-t', "--tests", nargs='?',
-                   help="Name of the file with test description")
+class TempestRunStress(command.Command):
+
+    def get_parser(self, prog_name):
+        pa = super(TempestRunStress, self).get_parser(prog_name)
+        pa = add_arguments(pa)
+        return pa
+
+    def take_action(self, pa):
+        try:
+            action(pa)
+        except Exception:
+            LOG.exception("Failure in the stress test framework")
+            traceback.print_exc()
+            raise
+        return 0
+
+    def get_description(self):
+        return 'Run tempest stress tests'
 
 
-def main():
-    ns = parser.parse_args()
+def add_arguments(parser):
+    parser.add_argument('-d', '--duration', default=300, type=int,
+                        help="Duration of test in secs")
+    parser.add_argument('-s', '--serial', action='store_true',
+                        help="Trigger running tests serially")
+    parser.add_argument('-S', '--stop', action='store_true',
+                        default=False, help="Stop on first error")
+    parser.add_argument('-n', '--number', type=int,
+                        help="How often an action is executed for each "
+                        "process")
+    group = parser.add_mutually_exclusive_group(required=True)
+    group.add_argument('-a', '--all', action='store_true',
+                       help="Execute all stress tests")
+    parser.add_argument('-T', '--type',
+                        help="Filters tests of a certain type (e.g. gate)")
+    parser.add_argument('-i', '--call-inherited', action='store_true',
+                        default=False,
+                        help="Call also inherited function with stress "
+                        "attribute")
+    group.add_argument('-t', "--tests", nargs='?',
+                       help="Name of the file with test description")
+    return parser
+
+
+def action(ns):
     result = 0
     if not ns.all:
         tests = json.load(open(ns.tests, 'r'))
@@ -122,9 +145,19 @@
     return result
 
 
+def main():
+    LOG.warning("Deprecated: Use 'tempest run-stress' instead. "
+                "The old entrypoint will be removed in a future release.")
+    parser = argparse.ArgumentParser(description='Run stress tests')
+    pa = add_arguments(parser)
+    ns = pa.parse_args()
+    return action(ns)
+
+
 if __name__ == "__main__":
     try:
         sys.exit(main())
     except Exception:
         LOG.exception("Failure in the stress test framework")
+        traceback.print_exc()
         sys.exit(1)
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
old mode 100755
new mode 100644
index 2811070..92aa19e
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -15,22 +15,27 @@
 #    under the License.
 
 import argparse
+import httplib2
 import os
 import sys
+import traceback
 
-import httplib2
+from cliff import command
+from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
 from six import moves
 from six.moves.urllib import parse as urlparse
 
 from tempest import clients
-from tempest.common import credentials
+from tempest.common import credentials_factory as credentials
 from tempest import config
 
 
 CONF = config.CONF
 CONF_PARSER = None
 
+LOG = logging.getLogger(__name__)
+
 
 def _get_config_file():
     default_config_dir = os.path.join(os.path.abspath(
@@ -141,7 +146,7 @@
     extensions_client = {
         'nova': os.extensions_client,
         'cinder': os.volumes_extension_client,
-        'neutron': os.network_client,
+        'neutron': os.network_extensions_client,
         'swift': os.account_client,
     }
     # NOTE (e0ne): Use Cinder API v2 by default because v1 is deprecated
@@ -152,7 +157,7 @@
 
     if service not in extensions_client:
         print('No tempest extensions client for %s' % service)
-        exit(1)
+        sys.exit(1)
     return extensions_client[service]
 
 
@@ -165,7 +170,7 @@
     }
     if service not in extensions_options:
         print('No supported extensions list option for %s' % service)
-        exit(1)
+        sys.exit(1)
     return extensions_options[service]
 
 
@@ -310,8 +315,7 @@
     return avail_services
 
 
-def parse_args():
-    parser = argparse.ArgumentParser()
+def _parser_add_args(parser):
     parser.add_argument('-u', '--update', action='store_true',
                         help='Update the config file with results from api '
                              'queries. This assumes whatever is set in the '
@@ -329,13 +333,21 @@
     parser.add_argument('-r', '--replace-ext', action='store_true',
                         help="If specified the all option will be replaced "
                              "with a full list of extensions")
-    args = parser.parse_args()
-    return args
 
 
-def main():
+def parse_args():
+    parser = argparse.ArgumentParser()
+    _parser_add_args(parser)
+    opts = parser.parse_args()
+    return opts
+
+
+def main(opts=None):
     print('Running config verification...')
-    opts = parse_args()
+    if opts is None:
+        print("Use of: 'verify-tempest-config' is deprecated, "
+              "please use: 'tempest verify-config'")
+        opts = parse_args()
     update = opts.update
     replace = opts.replace_ext
     global CONF_PARSER
@@ -373,5 +385,22 @@
         icreds.clear_creds()
 
 
+class TempestVerifyConfig(command.Command):
+    """Verify your current tempest configuration"""
+
+    def get_parser(self, prog_name):
+        parser = super(TempestVerifyConfig, self).get_parser(prog_name)
+        _parser_add_args(parser)
+        return parser
+
+    def take_action(self, parsed_args):
+        try:
+            return main(parsed_args)
+        except Exception:
+            LOG.exception("Failure verifying configuration.")
+            traceback.print_exc()
+            raise
+        return 0
+
 if __name__ == "__main__":
     main()
diff --git a/tempest/common/api_version_request.py b/tempest/common/api_version_request.py
new file mode 100644
index 0000000..d8a5b56
--- /dev/null
+++ b/tempest/common/api_version_request.py
@@ -0,0 +1,154 @@
+# Copyright 2014 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import re
+
+from tempest import exceptions
+
+
+# Define the minimum and maximum version of the API across all of the
+# REST API. The format of the version is:
+# X.Y where:
+#
+# - X will only be changed if a significant backwards incompatible API
+# change is made which affects the API as whole. That is, something
+# that is only very very rarely incremented.
+#
+# - Y when you make any change to the API. Note that this includes
+# semantic changes which may not affect the input or output formats or
+# even originate in the API code layer. We are not distinguishing
+# between backwards compatible and backwards incompatible changes in
+# the versioning system. It must be made clear in the documentation as
+# to what is a backwards compatible change and what is a backwards
+# incompatible one.
+
+class APIVersionRequest(object):
+    """This class represents an API Version Request.
+
+    This class provides convenience methods for manipulation
+    and comparison of version numbers that we need to do to
+    implement microversions.
+    """
+
+    # NOTE: This 'latest' version is a magic number, we assume any
+    # projects(Nova, etc.) never achieve this number.
+    latest_ver_major = 99999
+    latest_ver_minor = 99999
+
+    def __init__(self, version_string=None):
+        """Create an API version request object.
+
+        :param version_string: String representation of APIVersionRequest.
+            Correct format is 'X.Y', where 'X' and 'Y' are int values.
+            None value should be used to create Null APIVersionRequest,
+            which is equal to 0.0
+        """
+        # NOTE(gmann): 'version_string' as String "None" will be considered as
+        # invalid version string.
+        self.ver_major = 0
+        self.ver_minor = 0
+
+        if version_string is not None:
+            match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$",
+                             version_string)
+            if match:
+                self.ver_major = int(match.group(1))
+                self.ver_minor = int(match.group(2))
+            elif version_string == 'latest':
+                self.ver_major = self.latest_ver_major
+                self.ver_minor = self.latest_ver_minor
+            else:
+                raise exceptions.InvalidAPIVersionString(
+                    version=version_string)
+
+    def __str__(self):
+        """Debug/Logging representation of object."""
+        return ("API Version Request: %s" % self.get_string())
+
+    def is_null(self):
+        return self.ver_major == 0 and self.ver_minor == 0
+
+    def _format_type_error(self, other):
+        return TypeError("'%(other)s' should be an instance of '%(cls)s'" %
+                         {"other": other, "cls": self.__class__})
+
+    def __lt__(self, other):
+        if not isinstance(other, APIVersionRequest):
+            raise self._format_type_error(other)
+
+        return ((self.ver_major, self.ver_minor) <
+                (other.ver_major, other.ver_minor))
+
+    def __eq__(self, other):
+        if not isinstance(other, APIVersionRequest):
+            raise self._format_type_error(other)
+
+        return ((self.ver_major, self.ver_minor) ==
+                (other.ver_major, other.ver_minor))
+
+    def __gt__(self, other):
+        if not isinstance(other, APIVersionRequest):
+            raise self._format_type_error(other)
+
+        return ((self.ver_major, self.ver_minor) >
+                (other.ver_major, other.ver_minor))
+
+    def __le__(self, other):
+        return self < other or self == other
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __ge__(self, other):
+        return self > other or self == other
+
+    def matches(self, min_version, max_version):
+        """Matches the version object.
+
+        Returns whether the version object represents a version
+        greater than or equal to the minimum version and less than
+        or equal to the maximum version.
+
+        @param min_version: Minimum acceptable version.
+        @param max_version: Maximum acceptable version.
+        @returns: boolean
+
+        If min_version is null then there is no minimum limit.
+        If max_version is null then there is no maximum limit.
+        If self is null then raise ValueError
+        """
+
+        if self.is_null():
+            raise ValueError
+        if max_version.is_null() and min_version.is_null():
+            return True
+        elif max_version.is_null():
+            return min_version <= self
+        elif min_version.is_null():
+            return self <= max_version
+        else:
+            return min_version <= self <= max_version
+
+    def get_string(self):
+        """Version string representation.
+
+        Converts object to string representation which if used to create
+        an APIVersionRequest object results in the same version request.
+        """
+        if self.is_null():
+            return None
+        if (self.ver_major == self.latest_ver_major and
+            self.ver_minor == self.latest_ver_minor):
+            return 'latest'
+        return "%s.%s" % (self.ver_major, self.ver_minor)
diff --git a/tempest/common/api_version_utils.py b/tempest/common/api_version_utils.py
new file mode 100644
index 0000000..c3d977f
--- /dev/null
+++ b/tempest/common/api_version_utils.py
@@ -0,0 +1,95 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.common import api_version_request
+from tempest import exceptions
+
+
+class BaseMicroversionTest(object):
+    """Mixin class for API microversion test class."""
+
+    # NOTE: Basically, each microversion is small API change and we
+    # can use the same tests for most microversions in most cases.
+    # So it is nice to define the test class as possible to run
+    # for all microversions. We need to define microversion range
+    # (min_microversion, max_microversion) on each test class if necessary.
+    min_microversion = None
+    max_microversion = 'latest'
+
+
+def check_skip_with_microversion(test_min_version, test_max_version,
+                                 cfg_min_version, cfg_max_version):
+    min_version = api_version_request.APIVersionRequest(test_min_version)
+    max_version = api_version_request.APIVersionRequest(test_max_version)
+    config_min_version = api_version_request.APIVersionRequest(cfg_min_version)
+    config_max_version = api_version_request.APIVersionRequest(cfg_max_version)
+    if ((min_version > max_version) or
+       (config_min_version > config_max_version)):
+        msg = ("Test Class versions [%s - %s]. "
+               "Configuration versions [%s - %s]."
+               % (min_version.get_string(),
+                  max_version.get_string(),
+                  config_min_version.get_string(),
+                  config_max_version.get_string()))
+        raise exceptions.InvalidAPIVersionRange(msg)
+
+    # NOTE: Select tests which are in range of configuration like
+    #               config min           config max
+    # ----------------+--------------------------+----------------
+    # ...don't-select|
+    #            ...select...  ...select...  ...select...
+    #                                             |don't-select...
+    # ......................select............................
+    if (max_version < config_min_version or
+        config_max_version < min_version):
+        msg = ("The microversion range[%s - %s] of this test is out of the "
+               "configuration range[%s - %s]."
+               % (min_version.get_string(),
+                  max_version.get_string(),
+                  config_min_version.get_string(),
+                  config_max_version.get_string()))
+        raise testtools.TestCase.skipException(msg)
+
+
+def select_request_microversion(test_min_version, cfg_min_version):
+    test_version = api_version_request.APIVersionRequest(test_min_version)
+    cfg_version = api_version_request.APIVersionRequest(cfg_min_version)
+    max_version = cfg_version if cfg_version >= test_version else test_version
+    return max_version.get_string()
+
+
+def assert_version_header_matches_request(api_microversion_header_name,
+                                          api_microversion,
+                                          response_header):
+    """Checks API microversion in resposne header
+
+    Verify whether microversion is present in response header
+    and with specified 'api_microversion' value.
+
+    @param: api_microversion_header_name: Microversion header name
+            Example- "X-OpenStack-Nova-API-Version"
+    @param: api_microversion: Microversion number like "2.10"
+    @param: response_header: Response header where microversion is
+            expected to be present.
+    """
+    api_microversion_header_name = api_microversion_header_name.lower()
+    if (api_microversion_header_name not in response_header or
+        api_microversion != response_header[api_microversion_header_name]):
+        msg = ("Microversion header '%s' with value '%s' does not match in "
+               "response - %s. " % (api_microversion_header_name,
+                                    api_microversion,
+                                    response_header))
+        raise exceptions.InvalidHTTPResponseHeader(msg)
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 41b0529..73505e6 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -29,7 +29,8 @@
 
 def create_test_server(clients, validatable=False, validation_resources=None,
                        tenant_network=None, wait_until=None,
-                       volume_backed=False, **kwargs):
+                       volume_backed=False, name=None, flavor=None,
+                       image_id=None, **kwargs):
     """Common wrapper utility returning a test server.
 
     This method is a common wrapper returning a test server that can be
@@ -43,26 +44,32 @@
     :param wait_until: Server status to wait for the server to reach after
     its creation.
     :param volume_backed: Whether the instance is volume backed or not.
-    :returns a tuple
+    :returns: a tuple
     """
 
     # TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE
 
-    if 'name' in kwargs:
-        name = kwargs.pop('name')
-    else:
-        name = data_utils.rand_name(__name__ + "-instance")
+    name = name
+    flavor = flavor
+    image_id = image_id
 
-    flavor = kwargs.pop('flavor', CONF.compute.flavor_ref)
-    image_id = kwargs.pop('image_id', CONF.compute.image_ref)
+    if name is None:
+        name = data_utils.rand_name(__name__ + "-instance")
+    if flavor is None:
+        flavor = CONF.compute.flavor_ref
+    if image_id is None:
+        image_id = CONF.compute.image_ref
 
     kwargs = fixed_network.set_networks_kwarg(
         tenant_network, kwargs) or {}
 
+    multiple_create_request = (max(kwargs.get('min_count', 0),
+                                   kwargs.get('max_count', 0)) > 1)
+
     if CONF.validation.run_validation and validatable:
         # As a first implementation, multiple pingable or sshable servers will
         # not be supported
-        if 'min_count' in kwargs or 'max_count' in kwargs:
+        if multiple_create_request:
             msg = ("Multiple pingable or sshable servers not supported at "
                    "this stage.")
             raise ValueError(msg)
@@ -116,7 +123,7 @@
 
     # handle the case of multiple servers
     servers = []
-    if 'min_count' in kwargs or 'max_count' in kwargs:
+    if multiple_create_request:
         # Get servers created which name match with name param.
         body_servers = clients.servers_client.list_servers()
         servers = \
@@ -127,7 +134,7 @@
 
     # The name of the method to associate a floating IP to as server is too
     # long for PEP8 compliance so:
-    assoc = clients.floating_ips_client.associate_floating_ip_to_server
+    assoc = clients.compute_floating_ips_client.associate_floating_ip_to_server
 
     if wait_until:
         for server in servers:
diff --git a/tempest/common/cred_client.py b/tempest/common/cred_client.py
index 79a502a..6df7eb2 100644
--- a/tempest/common/cred_client.py
+++ b/tempest/common/cred_client.py
@@ -24,18 +24,25 @@
 
 @six.add_metaclass(abc.ABCMeta)
 class CredsClient(object):
-    """This class is a wrapper around the identity clients, to provide a
-     single interface for managing credentials in both v2 and v3 cases.
-     It's not bound to created credentials, only to a specific set of admin
-     credentials used for generating credentials.
+    """This class is a wrapper around the identity clients
+
+     to provide a single interface for managing credentials in both v2 and v3
+     cases. It's not bound to created credentials, only to a specific set of
+     admin credentials used for generating credentials.
     """
 
-    def __init__(self, identity_client):
+    def __init__(self, identity_client, projects_client=None,
+                 roles_client=None, users_client=None):
         # The client implies version and credentials
         self.identity_client = identity_client
+        # this is temporary until the v3 project client is
+        # separated, then projects_client will become mandatory
+        self.projects_client = projects_client or identity_client
+        self.roles_client = roles_client or identity_client
+        self.users_client = users_client or identity_client
 
     def create_user(self, username, password, project, email):
-        user = self.identity_client.create_user(
+        user = self.users_client.create_user(
             username, password, project['id'], email)
         if 'user' in user:
             user = user['user']
@@ -55,7 +62,7 @@
 
     def create_user_role(self, role_name):
         if not self._check_role_exists(role_name):
-            self.identity_client.create_role(role_name)
+            self.roles_client.create_role(name=role_name)
 
     def assign_user_role(self, user, project, role_name):
         role = self._check_role_exists(role_name)
@@ -63,8 +70,8 @@
             msg = 'No "%s" role found' % role_name
             raise lib_exc.NotFound(msg)
         try:
-            self.identity_client.assign_user_role(project['id'], user['id'],
-                                                  role['id'])
+            self.roles_client.assign_user_role(project['id'], user['id'],
+                                               role['id'])
         except lib_exc.Conflict:
             LOG.debug("Role %s already assigned on project %s for user %s" % (
                 role['id'], project['id'], user['id']))
@@ -81,17 +88,24 @@
         pass
 
     def delete_user(self, user_id):
-        self.identity_client.delete_user(user_id)
+        self.users_client.delete_user(user_id)
 
     def _list_roles(self):
-        roles = self.identity_client.list_roles()['roles']
+        roles = self.roles_client.list_roles()['roles']
         return roles
 
 
 class V2CredsClient(CredsClient):
 
+    def __init__(self, identity_client, projects_client, roles_client,
+                 users_client):
+        super(V2CredsClient, self).__init__(identity_client,
+                                            projects_client,
+                                            roles_client,
+                                            users_client)
+
     def create_project(self, name, description):
-        tenant = self.identity_client.create_tenant(
+        tenant = self.projects_client.create_tenant(
             name=name, description=description)['tenant']
         return tenant
 
@@ -107,7 +121,7 @@
             password=password)
 
     def delete_project(self, project_id):
-        self.identity_client.delete_tenant(project_id)
+        self.projects_client.delete_tenant(project_id)
 
 
 class V3CredsClient(CredsClient):
@@ -151,8 +165,13 @@
         return roles
 
 
-def get_creds_client(identity_client, project_domain_name=None):
+def get_creds_client(identity_client,
+                     projects_client=None,
+                     roles_client=None,
+                     users_client=None,
+                     project_domain_name=None):
     if isinstance(identity_client, v2_identity.IdentityClient):
-        return V2CredsClient(identity_client)
+        return V2CredsClient(identity_client, projects_client, roles_client,
+                             users_client)
     else:
         return V3CredsClient(identity_client, project_domain_name)
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
index 3575998..9dd89ea 100644
--- a/tempest/common/cred_provider.py
+++ b/tempest/common/cred_provider.py
@@ -14,93 +14,18 @@
 
 import abc
 
-from oslo_log import log as logging
 import six
 from tempest_lib import auth
 
-from tempest import config
 from tempest import exceptions
 
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-# Type of credentials available from configuration
-CREDENTIAL_TYPES = {
-    'identity_admin': ('auth', 'admin'),
-    'user': ('identity', None),
-    'alt_user': ('identity', 'alt')
-}
-
-DEFAULT_PARAMS = {
-    'disable_ssl_certificate_validation':
-        CONF.identity.disable_ssl_certificate_validation,
-    'ca_certs': CONF.identity.ca_certificates_file,
-    'trace_requests': CONF.debug.trace_requests
-}
-
-
-# Read credentials from configuration, builds a Credentials object
-# based on the specified or configured version
-def get_configured_credentials(credential_type, fill_in=True,
-                               identity_version=None):
-    identity_version = identity_version or CONF.identity.auth_version
-    if identity_version not in ('v2', 'v3'):
-        raise exceptions.InvalidConfiguration(
-            'Unsupported auth version: %s' % identity_version)
-    if credential_type not in CREDENTIAL_TYPES:
-        raise exceptions.InvalidCredentials()
-    conf_attributes = ['username', 'password', 'tenant_name']
-    if identity_version == 'v3':
-        conf_attributes.append('domain_name')
-    # Read the parts of credentials from config
-    params = DEFAULT_PARAMS.copy()
-    section, prefix = CREDENTIAL_TYPES[credential_type]
-    for attr in conf_attributes:
-        _section = getattr(CONF, section)
-        if prefix is None:
-            params[attr] = getattr(_section, attr)
-        else:
-            params[attr] = getattr(_section, prefix + "_" + attr)
-    # Build and validate credentials. We are reading configured credentials,
-    # so validate them even if fill_in is False
-    credentials = get_credentials(fill_in=fill_in,
-                                  identity_version=identity_version, **params)
-    if not fill_in:
-        if not credentials.is_valid():
-            msg = ("The %s credentials are incorrectly set in the config file."
-                   " Double check that all required values are assigned" %
-                   credential_type)
-            raise exceptions.InvalidConfiguration(msg)
-    return credentials
-
-
-# Wrapper around auth.get_credentials to use the configured identity version
-# is none is specified
-def get_credentials(fill_in=True, identity_version=None, **kwargs):
-    params = dict(DEFAULT_PARAMS, **kwargs)
-    identity_version = identity_version or CONF.identity.auth_version
-    # In case of "v3" add the domain from config if not specified
-    if identity_version == 'v3':
-        domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
-                            if 'domain' in x)
-        if not domain_fields.intersection(kwargs.keys()):
-            domain_name = CONF.auth.default_credentials_domain_name
-            params['user_domain_name'] = domain_name
-
-        auth_url = CONF.identity.uri_v3
-    else:
-        auth_url = CONF.identity.uri
-    return auth.get_credentials(auth_url,
-                                fill_in=fill_in,
-                                identity_version=identity_version,
-                                **params)
-
 
 @six.add_metaclass(abc.ABCMeta)
 class CredentialProvider(object):
     def __init__(self, identity_version, name=None, network_resources=None,
                  credentials_domain=None, admin_role=None):
         """A CredentialProvider supplies credentials to test classes.
+
         :param identity_version: Identity version of the credentials provided
         :param name: Name of the calling test. Included in provisioned
                      credentials when credentials are provisioned on the fly
diff --git a/tempest/common/credentials.py b/tempest/common/credentials.py
deleted file mode 100644
index 76f8afe..0000000
--- a/tempest/common/credentials.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import os
-
-from tempest.common import cred_provider
-from tempest.common import dynamic_creds
-from tempest.common import preprov_creds
-from tempest import config
-from tempest import exceptions
-
-CONF = config.CONF
-
-
-# Return the right implementation of CredentialProvider based on config
-# Dropping interface and password, as they are never used anyways
-# TODO(andreaf) Drop them from the CredentialsProvider interface completely
-def get_credentials_provider(name, network_resources=None,
-                             force_tenant_isolation=False,
-                             identity_version=None):
-    # If a test requires a new account to work, it can have it via forcing
-    # dynamic credentials. A new account will be produced only for that test.
-    # In case admin credentials are not available for the account creation,
-    # the test should be skipped else it would fail.
-    identity_version = identity_version or CONF.identity.auth_version
-    if CONF.auth.use_dynamic_credentials or force_tenant_isolation:
-        return dynamic_creds.DynamicCredentialProvider(
-            name=name,
-            network_resources=network_resources,
-            identity_version=identity_version,
-            credentials_domain=CONF.auth.default_credentials_domain_name,
-            admin_role=CONF.identity.admin_role)
-    else:
-        if (CONF.auth.test_accounts_file and
-                os.path.isfile(CONF.auth.test_accounts_file)):
-            # Most params are not relevant for pre-created accounts
-            return preprov_creds.PreProvisionedCredentialProvider(
-                name=name, identity_version=identity_version,
-                credentials_domain=CONF.auth.default_credentials_domain_name,
-                admin_role=CONF.identity.admin_role)
-        else:
-            return preprov_creds.NonLockingCredentialProvider(
-                name=name, identity_version=identity_version,
-                admin_role=CONF.identity.admin_role)
-
-
-# We want a helper function here to check and see if admin credentials
-# are available so we can do a single call from skip_checks if admin
-# creds area available.
-# This depends on identity_version as there may be admin credentials
-# available for v2 but not for v3.
-def is_admin_available(identity_version):
-    is_admin = True
-    # If dynamic credentials is enabled admin will be available
-    if CONF.auth.use_dynamic_credentials:
-        return is_admin
-    # Check whether test accounts file has the admin specified or not
-    elif (CONF.auth.test_accounts_file and
-            os.path.isfile(CONF.auth.test_accounts_file)):
-        check_accounts = preprov_creds.PreProvisionedCredentialProvider(
-            identity_version=identity_version, name='check_admin',
-            admin_role=CONF.identity.admin_role)
-        if not check_accounts.admin_available():
-            is_admin = False
-    else:
-        try:
-            cred_provider.get_configured_credentials(
-                'identity_admin', fill_in=False,
-                identity_version=identity_version)
-        except exceptions.InvalidConfiguration:
-            is_admin = False
-    return is_admin
-
-
-# We want a helper function here to check and see if alt credentials
-# are available so we can do a single call from skip_checks if alt
-# creds area available.
-# This depends on identity_version as there may be alt credentials
-# available for v2 but not for v3.
-def is_alt_available(identity_version):
-    # If dynamic credentials is enabled alt will be available
-    if CONF.auth.use_dynamic_credentials:
-        return True
-    # Check whether test accounts file has the admin specified or not
-    if (CONF.auth.test_accounts_file and
-            os.path.isfile(CONF.auth.test_accounts_file)):
-        check_accounts = preprov_creds.PreProvisionedCredentialProvider(
-            identity_version=identity_version, name='check_alt',
-            admin_role=CONF.identity.admin_role)
-    else:
-        check_accounts = preprov_creds.NonLockingCredentialProvider(
-            identity_version=identity_version, name='check_alt',
-            admin_role=CONF.identity.admin_role)
-    try:
-        if not check_accounts.is_multi_user():
-            return False
-        else:
-            return True
-    except exceptions.InvalidConfiguration:
-        return False
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
new file mode 100644
index 0000000..24c1198
--- /dev/null
+++ b/tempest/common/credentials_factory.py
@@ -0,0 +1,324 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from oslo_concurrency import lockutils
+from tempest_lib import auth
+
+from tempest import clients
+from tempest.common import cred_provider
+from tempest.common import dynamic_creds
+from tempest.common import preprov_creds
+from tempest import config
+from tempest import exceptions
+
+CONF = config.CONF
+
+
+"""This module provides factories of credential and credential providers
+
+Credentials providers and clients are (going to be) part of tempest-lib,
+and so they may not hold any dependency to tempest configuration.
+
+Methods in this module collect the relevant configuration details and pass
+them to credentials providers and clients, so that test can have easy
+access to these features.
+
+Client managers with hard-coded configured credentials are also moved here,
+to avoid circular dependencies."""
+
+# === Credential Providers
+
+
+# Subset of the parameters of credential providers that depend on configuration
+def _get_common_provider_params():
+    return {
+        'credentials_domain': CONF.auth.default_credentials_domain_name,
+        'admin_role': CONF.identity.admin_role
+    }
+
+
+def _get_dynamic_provider_params():
+    return _get_common_provider_params()
+
+
+def _get_preprov_provider_params():
+    _common_params = _get_common_provider_params()
+    reseller_admin_role = CONF.object_storage.reseller_admin_role
+    return dict(_common_params, **dict([
+        ('accounts_lock_dir', lockutils.get_lock_path(CONF)),
+        ('test_accounts_file', CONF.auth.test_accounts_file),
+        ('object_storage_operator_role', CONF.object_storage.operator_role),
+        ('object_storage_reseller_admin_role', reseller_admin_role)
+    ]))
+
+
+class LegacyCredentialProvider(cred_provider.CredentialProvider):
+
+    def __init__(self, identity_version):
+        """Credentials provider which returns credentials from tempest.conf
+
+        Credentials provider which always returns the first and second
+        configured accounts as primary and alt users.
+        Credentials from tempest.conf are deprecated, and this credential
+        provider is also accordingly.
+
+        This credential provider can be used in case of serial test execution
+        to preserve the current behaviour of the serial tempest run.
+
+        :param identity_version: Version of the identity API
+        :return: CredentialProvider
+        """
+        super(LegacyCredentialProvider, self).__init__(
+            identity_version=identity_version)
+        self._creds = {}
+
+    def _unique_creds(self, cred_arg=None):
+        """Verify that the configured credentials are valid and distinct """
+        try:
+            user = self.get_primary_creds()
+            alt_user = self.get_alt_creds()
+            return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
+        except exceptions.InvalidCredentials as ic:
+            msg = "At least one of the configured credentials is " \
+                  "not valid: %s" % ic.message
+            raise exceptions.InvalidConfiguration(msg)
+
+    def is_multi_user(self):
+        return self._unique_creds('username')
+
+    def is_multi_tenant(self):
+        return self._unique_creds('tenant_id')
+
+    def get_primary_creds(self):
+        if self._creds.get('primary'):
+            return self._creds.get('primary')
+        primary_credential = get_configured_credentials(
+            credential_type='user', fill_in=False,
+            identity_version=self.identity_version)
+        self._creds['primary'] = cred_provider.TestResources(
+            primary_credential)
+        return self._creds['primary']
+
+    def get_alt_creds(self):
+        if self._creds.get('alt'):
+            return self._creds.get('alt')
+        alt_credential = get_configured_credentials(
+            credential_type='alt_user', fill_in=False,
+            identity_version=self.identity_version)
+        self._creds['alt'] = cred_provider.TestResources(
+            alt_credential)
+        return self._creds['alt']
+
+    def clear_creds(self):
+        self._creds = {}
+
+    def get_admin_creds(self):
+        if self._creds.get('admin'):
+            return self._creds.get('admin')
+        creds = get_configured_credentials(
+            "identity_admin", fill_in=False)
+        self._creds['admin'] = cred_provider.TestResources(creds)
+        return self._creds['admin']
+
+    def get_creds_by_roles(self, roles, force_new=False):
+        msg = "Credentials being specified through the config file can not be"\
+              " used with tests that specify using credentials by roles. "\
+              "Either exclude/skip the tests doing this or use either an "\
+              "test_accounts_file or dynamic credentials."
+        raise exceptions.InvalidConfiguration(msg)
+
+    def is_role_available(self, role):
+        # NOTE(andreaf) LegacyCredentialProvider does not support credentials
+        # by role, so returning always False.
+        # Test that rely on credentials by role should use this to skip
+        # when this is credential provider is used
+        return False
+
+
+# Return the right implementation of CredentialProvider based on config
+# Dropping interface and password, as they are never used anyways
+# TODO(andreaf) Drop them from the CredentialsProvider interface completely
+def get_credentials_provider(name, network_resources=None,
+                             force_tenant_isolation=False,
+                             identity_version=None):
+    # If a test requires a new account to work, it can have it via forcing
+    # dynamic credentials. A new account will be produced only for that test.
+    # In case admin credentials are not available for the account creation,
+    # the test should be skipped else it would fail.
+    identity_version = identity_version or CONF.identity.auth_version
+    if CONF.auth.use_dynamic_credentials or force_tenant_isolation:
+        admin_creds = get_configured_credentials(
+            'identity_admin', fill_in=True, identity_version=identity_version)
+        return dynamic_creds.DynamicCredentialProvider(
+            name=name,
+            network_resources=network_resources,
+            identity_version=identity_version,
+            admin_creds=admin_creds,
+            **_get_dynamic_provider_params())
+    else:
+        if CONF.auth.test_accounts_file:
+            # Most params are not relevant for pre-created accounts
+            return preprov_creds.PreProvisionedCredentialProvider(
+                name=name, identity_version=identity_version,
+                **_get_preprov_provider_params())
+        else:
+            # Dynamic credentials are disabled, and the account file is not
+            # defined - we fall back on credentials configured in tempest.conf
+            return LegacyCredentialProvider(identity_version=identity_version)
+
+
+# We want a helper function here to check and see if admin credentials
+# are available so we can do a single call from skip_checks if admin
+# creds area available.
+# This depends on identity_version as there may be admin credentials
+# available for v2 but not for v3.
+def is_admin_available(identity_version):
+    is_admin = True
+    # If dynamic credentials is enabled admin will be available
+    if CONF.auth.use_dynamic_credentials:
+        return is_admin
+    # Check whether test accounts file has the admin specified or not
+    elif CONF.auth.test_accounts_file:
+        check_accounts = preprov_creds.PreProvisionedCredentialProvider(
+            identity_version=identity_version, name='check_admin',
+            **_get_preprov_provider_params())
+        if not check_accounts.admin_available():
+            is_admin = False
+    else:
+        try:
+            get_configured_credentials('identity_admin', fill_in=False,
+                                       identity_version=identity_version)
+        except exceptions.InvalidConfiguration:
+            is_admin = False
+    return is_admin
+
+
+# We want a helper function here to check and see if alt credentials
+# are available so we can do a single call from skip_checks if alt
+# creds area available.
+# This depends on identity_version as there may be alt credentials
+# available for v2 but not for v3.
+def is_alt_available(identity_version):
+    # If dynamic credentials is enabled alt will be available
+    if CONF.auth.use_dynamic_credentials:
+        return True
+    # Check whether test accounts file has the admin specified or not
+    if CONF.auth.test_accounts_file:
+        check_accounts = preprov_creds.PreProvisionedCredentialProvider(
+            identity_version=identity_version, name='check_alt',
+            **_get_preprov_provider_params())
+    else:
+        check_accounts = LegacyCredentialProvider(identity_version)
+    try:
+        if not check_accounts.is_multi_user():
+            return False
+        else:
+            return True
+    except exceptions.InvalidConfiguration:
+        return False
+
+# === Credentials
+
+# Type of credentials available from configuration
+CREDENTIAL_TYPES = {
+    'identity_admin': ('auth', 'admin'),
+    'user': ('identity', None),
+    'alt_user': ('identity', 'alt')
+}
+
+DEFAULT_PARAMS = {
+    'disable_ssl_certificate_validation':
+        CONF.identity.disable_ssl_certificate_validation,
+    'ca_certs': CONF.identity.ca_certificates_file,
+    'trace_requests': CONF.debug.trace_requests
+}
+
+
+# Read credentials from configuration, builds a Credentials object
+# based on the specified or configured version
+def get_configured_credentials(credential_type, fill_in=True,
+                               identity_version=None):
+    identity_version = identity_version or CONF.identity.auth_version
+
+    if identity_version not in ('v2', 'v3'):
+        raise exceptions.InvalidConfiguration(
+            'Unsupported auth version: %s' % identity_version)
+
+    if credential_type not in CREDENTIAL_TYPES:
+        raise exceptions.InvalidCredentials()
+    conf_attributes = ['username', 'password', 'tenant_name']
+
+    if identity_version == 'v3':
+        conf_attributes.append('domain_name')
+    # Read the parts of credentials from config
+    params = DEFAULT_PARAMS.copy()
+    section, prefix = CREDENTIAL_TYPES[credential_type]
+    for attr in conf_attributes:
+        _section = getattr(CONF, section)
+        if prefix is None:
+            params[attr] = getattr(_section, attr)
+        else:
+            params[attr] = getattr(_section, prefix + "_" + attr)
+    # Build and validate credentials. We are reading configured credentials,
+    # so validate them even if fill_in is False
+    credentials = get_credentials(fill_in=fill_in,
+                                  identity_version=identity_version, **params)
+    if not fill_in:
+        if not credentials.is_valid():
+            msg = ("The %s credentials are incorrectly set in the config file."
+                   " Double check that all required values are assigned" %
+                   credential_type)
+            raise exceptions.InvalidConfiguration(msg)
+    return credentials
+
+
+# Wrapper around auth.get_credentials to use the configured identity version
+# is none is specified
+def get_credentials(fill_in=True, identity_version=None, **kwargs):
+    params = dict(DEFAULT_PARAMS, **kwargs)
+    identity_version = identity_version or CONF.identity.auth_version
+    # In case of "v3" add the domain from config if not specified
+    if identity_version == 'v3':
+        domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
+                            if 'domain' in x)
+        if not domain_fields.intersection(kwargs.keys()):
+            domain_name = CONF.auth.default_credentials_domain_name
+            params['user_domain_name'] = domain_name
+
+        auth_url = CONF.identity.uri_v3
+    else:
+        auth_url = CONF.identity.uri
+    return auth.get_credentials(auth_url,
+                                fill_in=fill_in,
+                                identity_version=identity_version,
+                                **params)
+
+# === Credential / client managers
+
+
+class ConfiguredUserManager(clients.Manager):
+    """Manager that uses user credentials for its managed client objects"""
+
+    def __init__(self, service=None):
+        super(ConfiguredUserManager, self).__init__(
+            credentials=get_configured_credentials('user'),
+            service=service)
+
+
+class AdminManager(clients.Manager):
+    """Manager that uses admin credentials for its managed client objects"""
+
+    def __init__(self, service=None):
+        super(AdminManager, self).__init__(
+            credentials=get_configured_credentials('identity_admin'),
+            service=service)
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index 839088c..8ba33ed 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -19,8 +19,7 @@
 
 
 class ExistsAllResponseHeaders(object):
-    """
-    Specific matcher to check the existence of Swift's response headers
+    """Specific matcher to check the existence of Swift's response headers
 
     This matcher checks the existence of common headers for each HTTP method
     or the target, which means account, container or object.
@@ -30,7 +29,8 @@
     """
 
     def __init__(self, target, method):
-        """
+        """Initialization of ExistsAllResponseHeaders
+
         param: target Account/Container/Object
         param: method PUT/GET/HEAD/DELETE/COPY/POST
         """
@@ -38,7 +38,8 @@
         self.method = method
 
     def match(self, actual):
-        """
+        """Check headers
+
         param: actual HTTP response headers
         """
         # Check common headers for all HTTP methods
@@ -95,10 +96,7 @@
 
 
 class NonExistentHeader(object):
-    """
-    Informs an error message for end users in the case of missing a
-    certain header in Swift's responses
-    """
+    """Informs an error message in the case of missing a certain header"""
 
     def __init__(self, header):
         self.header = header
@@ -111,9 +109,7 @@
 
 
 class AreAllWellFormatted(object):
-    """
-    Specific matcher to check the correctness of formats of values of Swift's
-    response headers
+    """Specific matcher to check the correctness of formats of values
 
     This matcher checks the format of values of response headers.
     When checking the format of values of 'specific' headers such as
@@ -149,10 +145,7 @@
 
 
 class InvalidFormat(object):
-    """
-    Informs an error message for end users if a format of a certain header
-    is invalid
-    """
+    """Informs an error message if a format of a certain header is invalid"""
 
     def __init__(self, key, value):
         self.key = key
@@ -166,8 +159,9 @@
 
 
 class MatchesDictExceptForKeys(object):
-    """Matches two dictionaries. Verifies all items are equals except for those
-    identified by a list of keys.
+    """Matches two dictionaries.
+
+    Verifies all items are equals except for those identified by a list of keys
     """
 
     def __init__(self, expected, excluded_keys=None):
diff --git a/tempest/common/dynamic_creds.py b/tempest/common/dynamic_creds.py
index e2df560..8d3a24d 100644
--- a/tempest/common/dynamic_creds.py
+++ b/tempest/common/dynamic_creds.py
@@ -31,19 +31,40 @@
 class DynamicCredentialProvider(cred_provider.CredentialProvider):
 
     def __init__(self, identity_version, name=None, network_resources=None,
-                 credentials_domain=None, admin_role=None):
+                 credentials_domain=None, admin_role=None, admin_creds=None):
+        """Creates credentials dynamically for tests
+
+        A credential provider that, based on an initial set of
+        admin credentials, creates new credentials on the fly for
+        tests to use and then discard.
+
+        :param str identity_version: identity API version to use `v2` or `v3`
+        :param str admin_role: name of the admin role added to admin users
+        :param str name: names of dynamic resources include this parameter
+                         when specified
+        :param str credentials_domain: name of the domain where the users
+                                       are created. If not defined, the project
+                                       domain from admin_credentials is used
+        :param dict network_resources: network resources to be created for
+                                       the created credentials
+        :param Credentials admin_creds: initial admin credentials
+        """
         super(DynamicCredentialProvider, self).__init__(
-            identity_version=identity_version, name=name,
-            network_resources=network_resources,
-            credentials_domain=credentials_domain, admin_role=admin_role)
+            identity_version=identity_version, admin_role=admin_role,
+            name=name, credentials_domain=credentials_domain,
+            network_resources=network_resources)
+        self.network_resources = network_resources
         self._creds = {}
         self.ports = []
-        self.default_admin_creds = cred_provider.get_configured_credentials(
-            'identity_admin', fill_in=True,
-            identity_version=self.identity_version)
-        (self.identity_admin_client, self.network_admin_client,
+        self.default_admin_creds = admin_creds
+        (self.identity_admin_client, self.tenants_admin_client,
+         self.roles_admin_client,
+         self.users_admin_client,
+         self.network_admin_client,
          self.networks_admin_client,
-         self.subnets_admin_client) = self._get_admin_clients()
+         self.subnets_admin_client,
+         self.ports_admin_client,
+         self.security_groups_admin_client) = self._get_admin_clients()
         # Domain where isolated credentials are provisioned (v3 only).
         # Use that of the admin account is None is configured.
         self.creds_domain_name = None
@@ -52,22 +73,29 @@
                 self.default_admin_creds.project_domain_name or
                 self.credentials_domain)
         self.creds_client = cred_client.get_creds_client(
-            self.identity_admin_client, self.creds_domain_name)
+            self.identity_admin_client,
+            self.tenants_admin_client,
+            self.roles_admin_client,
+            self.users_admin_client,
+            self.creds_domain_name)
 
     def _get_admin_clients(self):
-        """
-        Returns a tuple with instances of the following admin clients (in this
-        order):
+        """Returns a tuple with instances of the following admin clients
+
+        (in this order):
             identity
             network
         """
         os = clients.Manager(self.default_admin_creds)
         if self.identity_version == 'v2':
-            return (os.identity_client, os.network_client, os.networks_client,
-                    os.subnets_client)
+            return (os.identity_client, os.tenants_client, os.roles_client,
+                    os.users_client, os.network_client, os.networks_client,
+                    os.subnets_client, os.ports_client,
+                    os.security_groups_client)
         else:
-            return (os.identity_v3_client, os.network_client,
-                    os.networks_client, os.subnets_client)
+            return (os.identity_v3_client, None, None, None, os.network_client,
+                    os.networks_client, os.subnets_client, os.ports_client,
+                    os.security_groups_client)
 
     def _create_creds(self, suffix="", admin=False, roles=None):
         """Create random credentials under the following schema.
@@ -205,8 +233,8 @@
         return resp_body['router']
 
     def _add_router_interface(self, router_id, subnet_id):
-        self.network_admin_client.add_router_interface_with_subnet_id(
-            router_id, subnet_id)
+        self.network_admin_client.add_router_interface(router_id,
+                                                       subnet_id=subnet_id)
 
     def get_credentials(self, credential_type):
         if self._creds.get(str(credential_type)):
@@ -260,36 +288,36 @@
         try:
             net_client.delete_router(router_id)
         except lib_exc.NotFound:
-            LOG.warn('router with name: %s not found for delete' %
-                     router_name)
+            LOG.warning('router with name: %s not found for delete' %
+                        router_name)
 
     def _clear_isolated_subnet(self, subnet_id, subnet_name):
         client = self.subnets_admin_client
         try:
             client.delete_subnet(subnet_id)
         except lib_exc.NotFound:
-            LOG.warn('subnet with name: %s not found for delete' %
-                     subnet_name)
+            LOG.warning('subnet with name: %s not found for delete' %
+                        subnet_name)
 
     def _clear_isolated_network(self, network_id, network_name):
         net_client = self.networks_admin_client
         try:
             net_client.delete_network(network_id)
         except lib_exc.NotFound:
-            LOG.warn('network with name: %s not found for delete' %
-                     network_name)
+            LOG.warning('network with name: %s not found for delete' %
+                        network_name)
 
     def _cleanup_default_secgroup(self, tenant):
-        net_client = self.network_admin_client
-        resp_body = net_client.list_security_groups(tenant_id=tenant,
+        nsg_client = self.security_groups_admin_client
+        resp_body = nsg_client.list_security_groups(tenant_id=tenant,
                                                     name="default")
         secgroups_to_delete = resp_body['security_groups']
         for secgroup in secgroups_to_delete:
             try:
-                net_client.delete_security_group(secgroup['id'])
+                nsg_client.delete_security_group(secgroup['id'])
             except lib_exc.NotFound:
-                LOG.warn('Security group %s, id %s not found for clean-up' %
-                         (secgroup['name'], secgroup['id']))
+                LOG.warning('Security group %s, id %s not found for clean-up' %
+                            (secgroup['name'], secgroup['id']))
 
     def _clear_isolated_net_resources(self):
         net_client = self.network_admin_client
@@ -305,11 +333,12 @@
             if (not self.network_resources or
                     (self.network_resources.get('router') and creds.subnet)):
                 try:
-                    net_client.remove_router_interface_with_subnet_id(
-                        creds.router['id'], creds.subnet['id'])
+                    net_client.remove_router_interface(
+                        creds.router['id'],
+                        subnet_id=creds.subnet['id'])
                 except lib_exc.NotFound:
-                    LOG.warn('router with name: %s not found for delete' %
-                             creds.router['name'])
+                    LOG.warning('router with name: %s not found for delete' %
+                                creds.router['name'])
                 self._clear_isolated_router(creds.router['id'],
                                             creds.router['name'])
             if (not self.network_resources or
@@ -329,15 +358,15 @@
             try:
                 self.creds_client.delete_user(creds.user_id)
             except lib_exc.NotFound:
-                LOG.warn("user with name: %s not found for delete" %
-                         creds.username)
+                LOG.warning("user with name: %s not found for delete" %
+                            creds.username)
             try:
                 if CONF.service_available.neutron:
                     self._cleanup_default_secgroup(creds.tenant_id)
                 self.creds_client.delete_project(creds.tenant_id)
             except lib_exc.NotFound:
-                LOG.warn("tenant with name: %s not found for delete" %
-                         creds.tenant_name)
+                LOG.warning("tenant with name: %s not found for delete" %
+                            creds.tenant_name)
         self._creds = {}
 
     def is_multi_user(self):
diff --git a/tempest/common/fixed_network.py b/tempest/common/fixed_network.py
index b81830a..3fc1365 100644
--- a/tempest/common/fixed_network.py
+++ b/tempest/common/fixed_network.py
@@ -15,11 +15,8 @@
 
 from tempest_lib.common.utils import misc as misc_utils
 
-from tempest import config
 from tempest import exceptions
 
-CONF = config.CONF
-
 LOG = logging.getLogger(__name__)
 
 
@@ -31,14 +28,14 @@
         object to use for making the network lists api request
     :return: The full dictionary for the network in question
     :rtype: dict
-    :raises InvalidConfiguration: If the name provided is invalid, the networks
+    :raises InvalidTestResource: If the name provided is invalid, the networks
         list returns a 404, there are no found networks, or the found network
         is invalid
     """
     caller = misc_utils.find_test_caller()
 
     if not name:
-        raise exceptions.InvalidConfiguration()
+        raise exceptions.InvalidTestResource(type='network', name=name)
 
     networks = compute_networks_client.list_networks()['networks']
     networks = [n for n in networks if n['label'] == name]
@@ -52,14 +49,14 @@
                    name, networks))
         if caller:
             msg = '(%s) %s' % (caller, msg)
-        LOG.warn(msg)
-        raise exceptions.InvalidConfiguration()
+        LOG.warning(msg)
+        raise exceptions.InvalidTestResource(type='network', name=name)
     else:
         msg = "Network with name: %s not found" % name
         if caller:
             msg = '(%s) %s' % (caller, msg)
-        LOG.warn(msg)
-        raise exceptions.InvalidConfiguration()
+        LOG.warning(msg)
+        raise exceptions.InvalidTestResource(type='network', name=name)
     # To be consistent between neutron and nova network always use name even
     # if label is used in the api response. If neither is present than then
     # the returned network is invalid.
@@ -68,13 +65,14 @@
         msg = "Network found from list doesn't contain a valid name or label"
         if caller:
             msg = '(%s) %s' % (caller, msg)
-        LOG.warn(msg)
-        raise exceptions.InvalidConfiguration()
+        LOG.warning(msg)
+        raise exceptions.InvalidTestResource(type='network', name=name)
     network['name'] = name
     return network
 
 
-def get_tenant_network(creds_provider, compute_networks_client):
+def get_tenant_network(creds_provider, compute_networks_client,
+                       shared_network_name):
     """Get a network usable by the primary tenant
 
     :param creds_provider: instance of credential provider
@@ -83,23 +81,24 @@
            neutron and nova-network cases. If this is not an admin network
            client, set_network_kwargs might fail in case fixed_network_name
            is the network to be used, and it's not visible to the tenant
-    :return a dict with 'id' and 'name' of the network
+    :param shared_network_name: name of the shared network to be used if no
+           tenant network is available in the creds provider
+    :returns: a dict with 'id' and 'name' of the network
     """
     caller = misc_utils.find_test_caller()
-    fixed_network_name = CONF.compute.fixed_network_name
     net_creds = creds_provider.get_primary_creds()
     network = getattr(net_creds, 'network', None)
     if not network or not network.get('name'):
-        if fixed_network_name:
+        if shared_network_name:
             msg = ('No valid network provided or created, defaulting to '
                    'fixed_network_name')
             if caller:
                 msg = '(%s) %s' % (caller, msg)
             LOG.debug(msg)
             try:
-                network = get_network_from_name(fixed_network_name,
+                network = get_network_from_name(shared_network_name,
                                                 compute_networks_client)
-            except exceptions.InvalidConfiguration:
+            except exceptions.InvalidTestResource:
                 network = {}
     msg = ('Found network %s available for tenant' % network)
     if caller:
@@ -123,6 +122,6 @@
         if 'id' in network.keys():
             params.update({"networks": [{'uuid': network['id']}]})
         else:
-            LOG.warn('The provided network dict: %s was invalid and did not '
-                     ' contain an id' % network)
+            LOG.warning('The provided network dict: %s was invalid and did '
+                        'not contain an id' % network)
     return params
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 3e09300..3a51f2e 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -17,11 +17,8 @@
 import functools
 
 import jsonschema
-from oslo_log import log as logging
 import six
 
-LOG = logging.getLogger(__name__)
-
 
 def _check_for_expected_result(name, schema):
     expected_result = None
@@ -41,9 +38,7 @@
 
 
 def simple_generator(fn):
-    """
-    Decorator for simple generators that return one value
-    """
+    """Decorator for simple generators that return one value"""
     @functools.wraps(fn)
     def wrapped(self, schema):
         result = fn(self, schema)
@@ -110,9 +105,7 @@
         jsonschema.validate(schema, self.schema)
 
     def generate_scenarios(self, schema, path=None):
-        """
-        Generates the scenario (all possible test cases) out of the given
-        schema.
+        """Generate scenario (all possible test cases) out of the given schema
 
         :param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
         :param path: the schema path if the given schema is a subschema
@@ -157,9 +150,10 @@
         return scenarios
 
     def generate_payload(self, test, schema):
-        """
-        Generates one jsonschema out of the given test. It's mandatory to use
-        generate_scenarios before to register all needed variables to the test.
+        """Generates one jsonschema out of the given test.
+
+        It's mandatory to use generate_scenarios before to register all needed
+        variables to the test.
 
         :param test: A test object (scenario) with all _negtest variables on it
         :param schema: schema for the test
diff --git a/tempest/common/generator/negative_generator.py b/tempest/common/generator/negative_generator.py
index 17997a5..67ace54 100644
--- a/tempest/common/generator/negative_generator.py
+++ b/tempest/common/generator/negative_generator.py
@@ -15,13 +15,9 @@
 
 import copy
 
-from oslo_log import log as logging
-
 import tempest.common.generator.base_generator as base
 import tempest.common.generator.valid_generator as valid
 
-LOG = logging.getLogger(__name__)
-
 
 class NegativeTestGenerator(base.BasicGeneratorSet):
     @base.generator_type("string")
diff --git a/tempest/common/generator/valid_generator.py b/tempest/common/generator/valid_generator.py
index 2213b4a..3070489 100644
--- a/tempest/common/generator/valid_generator.py
+++ b/tempest/common/generator/valid_generator.py
@@ -13,15 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 import six
 
 import tempest.common.generator.base_generator as base
 
 
-LOG = logging.getLogger(__name__)
-
-
 class ValidTestGenerator(base.BasicGeneratorSet):
     @base.generator_type("string")
     @base.simple_generator
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index 868a3e9..800e977 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -24,12 +24,10 @@
 
 import OpenSSL
 from oslo_log import log as logging
-from oslo_serialization import jsonutils as json
 import six
 from six import moves
 from six.moves import http_client as httplib
 from six.moves.urllib import parse as urlparse
-from tempest_lib import exceptions as lib_exc
 
 from tempest import exceptions as exc
 
@@ -51,19 +49,20 @@
         self.endpoint_port = endpoint_parts.port
         self.endpoint_path = endpoint_parts.path
 
-        self.connection_class = self.get_connection_class(self.endpoint_scheme)
-        self.connection_kwargs = self.get_connection_kwargs(
+        self.connection_class = self._get_connection_class(
+            self.endpoint_scheme)
+        self.connection_kwargs = self._get_connection_kwargs(
             self.endpoint_scheme, **kwargs)
 
     @staticmethod
-    def get_connection_class(scheme):
+    def _get_connection_class(scheme):
         if scheme == 'https':
             return VerifiedHTTPSConnection
         else:
             return httplib.HTTPConnection
 
     @staticmethod
-    def get_connection_kwargs(scheme, **kwargs):
+    def _get_connection_kwargs(scheme, **kwargs):
         _kwargs = {'timeout': float(kwargs.get('timeout', 600))}
 
         if scheme == 'https':
@@ -75,7 +74,7 @@
 
         return _kwargs
 
-    def get_connection(self):
+    def _get_connection(self):
         _class = self.connection_class
         try:
             return _class(self.endpoint_hostname, self.endpoint_port,
@@ -95,7 +94,7 @@
 
         self._log_request(method, url, kwargs['headers'])
 
-        conn = self.get_connection()
+        conn = self._get_connection()
 
         try:
             url_parts = urlparse.urlparse(url)
@@ -159,30 +158,6 @@
                 self.LOG.debug("Large body (%d) md5 summary: %s", length,
                                hashlib.md5(str_body).hexdigest())
 
-    def json_request(self, method, url, **kwargs):
-        kwargs.setdefault('headers', {})
-        kwargs['headers'].setdefault('Content-Type', 'application/json')
-        if kwargs['headers']['Content-Type'] != 'application/json':
-            msg = "Only application/json content-type is supported."
-            raise lib_exc.InvalidContentType(msg)
-
-        if 'body' in kwargs:
-            kwargs['body'] = json.dumps(kwargs['body'])
-
-        resp, body_iter = self._http_request(url, method, **kwargs)
-
-        if 'application/json' in resp.getheader('content-type', ''):
-            body = ''.join([chunk for chunk in body_iter])
-            try:
-                body = json.loads(body)
-            except ValueError:
-                LOG.error('Could not decode response body as JSON')
-        else:
-            msg = "Only json/application content-type is supported."
-            raise lib_exc.InvalidContentType(msg)
-
-        return resp, body
-
     def raw_request(self, method, url, **kwargs):
         kwargs.setdefault('headers', {})
         kwargs['headers'].setdefault('Content-Type',
@@ -203,8 +178,7 @@
 
 
 class OpenSSLConnectionDelegator(object):
-    """
-    An OpenSSL.SSL.Connection delegator.
+    """An OpenSSL.SSL.Connection delegator.
 
     Supplies an additional 'makefile' method which httplib requires
     and is not present in OpenSSL.SSL.Connection.
@@ -225,9 +199,8 @@
 
 
 class VerifiedHTTPSConnection(httplib.HTTPSConnection):
-    """
-    Extended HTTPSConnection which uses the OpenSSL library
-    for enhanced SSL support.
+    """Extended HTTPSConnection which uses OpenSSL library for enhanced SSL
+
     Note: Much of this functionality can eventually be replaced
           with native Python 3.3 code.
     """
@@ -247,11 +220,10 @@
 
     @staticmethod
     def host_matches_cert(host, x509):
-        """
-        Verify that the the x509 certificate we have received
-        from 'host' correctly identifies the server we are
-        connecting to, ie that the certificate's Common Name
-        or a Subject Alternative Name matches 'host'.
+        """Verify that the x509 certificate we have received from 'host'
+
+        Identifies the server we are connecting to, ie that the certificate's
+        Common Name or a Subject Alternative Name matches 'host'.
         """
         # First see if we can match the CN
         if x509.get_subject().commonName == host:
@@ -289,9 +261,7 @@
             return preverify_ok
 
     def setcontext(self):
-        """
-        Set up the OpenSSL context.
-        """
+        """Set up the OpenSSL context."""
         self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
 
         if self.ssl_compression is False:
@@ -336,10 +306,7 @@
             self.context.set_default_verify_paths()
 
     def connect(self):
-        """
-        Connect to an SSL port using the OpenSSL library and apply
-        per-connection parameters.
-        """
+        """Connect to SSL port and apply per-connection parameters."""
         sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         if self.timeout is not None:
             # '0' microseconds
diff --git a/tempest/common/identity.py b/tempest/common/identity.py
new file mode 100644
index 0000000..2179363
--- /dev/null
+++ b/tempest/common/identity.py
@@ -0,0 +1,32 @@
+# Copyright 2015 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest_lib import exceptions as lib_exc
+
+
+def get_tenant_by_name(client, tenant_name):
+    tenants = client.list_tenants()['tenants']
+    for tenant in tenants:
+        if tenant['name'] == tenant_name:
+            return tenant
+    raise lib_exc.NotFound('No such tenant(%s) in %s' % (tenant_name, tenants))
+
+
+def get_user_by_username(client, tenant_id, username):
+    users = client.list_tenant_users(tenant_id)['users']
+    for user in users:
+        if user['name'] == username:
+            return user
+    raise lib_exc.NotFound('No such user(%s) in %s' % (username, users))
diff --git a/tempest/common/negative_rest_client.py b/tempest/common/negative_rest_client.py
index abd8b31..d97411c 100644
--- a/tempest/common/negative_rest_client.py
+++ b/tempest/common/negative_rest_client.py
@@ -22,9 +22,7 @@
 
 
 class NegativeRestClient(service_client.ServiceClient):
-    """
-    Version of RestClient that does not raise exceptions.
-    """
+    """Version of RestClient that does not raise exceptions."""
     def __init__(self, auth_provider, service,
                  build_interval=None, build_timeout=None,
                  disable_ssl_certificate_validation=None,
@@ -43,9 +41,7 @@
             trace_requests=trace_requests)
 
     def _get_region_and_endpoint_type(self, service):
-        """
-        Returns the region for a specific service
-        """
+        """Returns the region for a specific service"""
         service_region = None
         service_endpoint_type = None
         for cfgname in dir(CONF._config):
diff --git a/tempest/common/preprov_creds.py b/tempest/common/preprov_creds.py
index dd27f08..34af31e 100644
--- a/tempest/common/preprov_creds.py
+++ b/tempest/common/preprov_creds.py
@@ -19,41 +19,76 @@
 from oslo_log import log as logging
 import six
 from tempest_lib import auth
+from tempest_lib import exceptions as lib_exc
 import yaml
 
 from tempest import clients
 from tempest.common import cred_provider
 from tempest.common import fixed_network
-from tempest import config
 from tempest import exceptions
 
-CONF = config.CONF
 LOG = logging.getLogger(__name__)
 
 
 def read_accounts_yaml(path):
-    with open(path, 'r') as yaml_file:
-        accounts = yaml.load(yaml_file)
+    try:
+        with open(path, 'r') as yaml_file:
+            accounts = yaml.load(yaml_file)
+    except IOError:
+        raise exceptions.InvalidConfiguration(
+            'The path for the test accounts file: %s '
+            'could not be found' % path)
     return accounts
 
 
 class PreProvisionedCredentialProvider(cred_provider.CredentialProvider):
 
-    def __init__(self, identity_version, name=None, credentials_domain=None,
-                 admin_role=None):
+    def __init__(self, identity_version, test_accounts_file,
+                 accounts_lock_dir, name=None, credentials_domain=None,
+                 admin_role=None, object_storage_operator_role=None,
+                 object_storage_reseller_admin_role=None):
+        """Credentials provider using pre-provisioned accounts
+
+        This credentials provider loads the details of pre-provisioned
+        accounts from a YAML file, in the format specified by
+        `etc/accounts.yaml.sample`. It locks accounts while in use, using the
+        external locking mechanism, allowing for multiple python processes
+        to share a single account file, and thus running tests in parallel.
+
+        The accounts_lock_dir must be generated using `lockutils.get_lock_path`
+        from the oslo.concurrency library. For instance:
+
+            accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
+                                             'test_accounts')
+
+        Role names for object storage are optional as long as the
+        `operator` and `reseller_admin` credential types are not used in the
+        accounts file.
+
+        :param identity_version: identity version of the credentials
+        :param admin_role: name of the admin role
+        :param test_accounts_file: path to the accounts YAML file
+        :param accounts_lock_dir: the directory for external locking
+        :param name: name of the hash file (optional)
+        :param credentials_domain: name of the domain credentials belong to
+                                   (if no domain is configured)
+        :param object_storage_operator_role: name of the role
+        :param object_storage_reseller_admin_role: name of the role
+        """
         super(PreProvisionedCredentialProvider, self).__init__(
             identity_version=identity_version, name=name,
-            credentials_domain=credentials_domain, admin_role=admin_role)
-        if (CONF.auth.test_accounts_file and
-                os.path.isfile(CONF.auth.test_accounts_file)):
-            accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
+            admin_role=admin_role, credentials_domain=credentials_domain)
+        self.test_accounts_file = test_accounts_file
+        if test_accounts_file:
+            accounts = read_accounts_yaml(self.test_accounts_file)
             self.use_default_creds = False
         else:
             accounts = {}
             self.use_default_creds = True
-        self.hash_dict = self.get_hash_dict(accounts, admin_role)
-        self.accounts_dir = os.path.join(lockutils.get_lock_path(CONF),
-                                         'test_accounts')
+        self.hash_dict = self.get_hash_dict(
+            accounts, admin_role, object_storage_operator_role,
+            object_storage_reseller_admin_role)
+        self.accounts_dir = accounts_lock_dir
         self._creds = {}
 
     @classmethod
@@ -65,7 +100,9 @@
         return hash_dict
 
     @classmethod
-    def get_hash_dict(cls, accounts, admin_role):
+    def get_hash_dict(cls, accounts, admin_role,
+                      object_storage_operator_role=None,
+                      object_storage_reseller_admin_role=None):
         hash_dict = {'roles': {}, 'creds': {}, 'networks': {}}
         # Loop over the accounts read from the yaml file
         for account in accounts:
@@ -92,14 +129,24 @@
                     hash_dict = cls._append_role(admin_role, temp_hash_key,
                                                  hash_dict)
                 elif type == 'operator':
-                    hash_dict = cls._append_role(
-                        CONF.object_storage.operator_role, temp_hash_key,
-                        hash_dict)
+                    if object_storage_operator_role:
+                        hash_dict = cls._append_role(
+                            object_storage_operator_role, temp_hash_key,
+                            hash_dict)
+                    else:
+                        msg = ("Type 'operator' configured, but no "
+                               "object_storage_operator_role specified")
+                        raise lib_exc.InvalidCredentials(msg)
                 elif type == 'reseller_admin':
-                    hash_dict = cls._append_role(
-                        CONF.object_storage.reseller_admin_role,
-                        temp_hash_key,
-                        hash_dict)
+                    if object_storage_reseller_admin_role:
+                        hash_dict = cls._append_role(
+                            object_storage_reseller_admin_role,
+                            temp_hash_key,
+                            hash_dict)
+                    else:
+                        msg = ("Type 'reseller_admin' configured, but no "
+                               "object_storage_reseller_admin_role specified")
+                        raise lib_exc.InvalidCredentials(msg)
             # Populate the network subdict
             for resource in resources:
                 if resource == 'network':
@@ -112,8 +159,8 @@
     def is_multi_user(self):
         # Default credentials is not a valid option with locking Account
         if self.use_default_creds:
-            raise exceptions.InvalidConfiguration(
-                "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
+            raise lib_exc.InvalidCredentials(
+                "Account file %s doesn't exist" % self.test_accounts_file)
         else:
             return len(self.hash_dict['creds']) > 1
 
@@ -149,7 +196,7 @@
                     names.append(fd.read())
         msg = ('Insufficient number of users provided. %s have allocated all '
                'the credentials for this allocation request' % ','.join(names))
-        raise exceptions.InvalidConfiguration(msg)
+        raise lib_exc.InvalidCredentials(msg)
 
     def _get_match_hash_list(self, roles=None):
         hashes = []
@@ -159,7 +206,7 @@
             for role in roles:
                 temp_hashes = self.hash_dict['roles'].get(role, None)
                 if not temp_hashes:
-                    raise exceptions.InvalidConfiguration(
+                    raise lib_exc.InvalidCredentials(
                         "No credentials with role: %s specified in the "
                         "accounts ""file" % role)
                 hashes.append(temp_hashes)
@@ -191,8 +238,8 @@
 
     def _get_creds(self, roles=None):
         if self.use_default_creds:
-            raise exceptions.InvalidConfiguration(
-                "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
+            raise lib_exc.InvalidCredentials(
+                "Account file %s doesn't exist" % self.test_accounts_file)
         useable_hashes = self._get_match_hash_list(roles)
         free_hash = self._get_free_hash(useable_hashes)
         clean_creds = self._sanitize_creds(
@@ -297,7 +344,7 @@
         try:
             network = fixed_network.get_network_from_name(
                 net_name, compute_network_client)
-        except exceptions.InvalidConfiguration:
+        except exceptions.InvalidTestResource:
             network = {}
         net_creds.set_resources(network=network)
         return net_creds
@@ -310,64 +357,3 @@
             if not user_domain_fields.intersection(set(creds_dict.keys())):
                 creds_dict['user_domain_name'] = self.credentials_domain
         return creds_dict
-
-
-class NonLockingCredentialProvider(PreProvisionedCredentialProvider):
-    """Credentials provider which always returns the first and second
-    configured accounts as primary and alt users.
-    This credential provider can be used in case of serial test execution
-    to preserve the current behaviour of the serial tempest run.
-    """
-
-    def _unique_creds(self, cred_arg=None):
-        """Verify that the configured credentials are valid and distinct """
-        try:
-            user = self.get_primary_creds()
-            alt_user = self.get_alt_creds()
-            return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
-        except exceptions.InvalidCredentials as ic:
-            msg = "At least one of the configured credentials is " \
-                  "not valid: %s" % ic.message
-            raise exceptions.InvalidConfiguration(msg)
-
-    def is_multi_user(self):
-        return self._unique_creds('username')
-
-    def is_multi_tenant(self):
-        return self._unique_creds('tenant_id')
-
-    def get_primary_creds(self):
-        if self._creds.get('primary'):
-            return self._creds.get('primary')
-        primary_credential = cred_provider.get_configured_credentials(
-            fill_in=False, credential_type='user',
-            identity_version=self.identity_version)
-        self._creds['primary'] = cred_provider.TestResources(
-            primary_credential)
-        return self._creds['primary']
-
-    def get_alt_creds(self):
-        if self._creds.get('alt'):
-            return self._creds.get('alt')
-        alt_credential = cred_provider.get_configured_credentials(
-            fill_in=False, credential_type='alt_user',
-            identity_version=self.identity_version)
-        self._creds['alt'] = cred_provider.TestResources(
-            alt_credential)
-        return self._creds['alt']
-
-    def clear_creds(self):
-        self._creds = {}
-
-    def get_admin_creds(self):
-        creds = cred_provider.get_configured_credentials(
-            "identity_admin", fill_in=False)
-        self._creds['admin'] = cred_provider.TestResources(creds)
-        return self._creds['admin']
-
-    def get_creds_by_roles(self, roles, force_new=False):
-        msg = "Credentials being specified through the config file can not be"\
-              " used with tests that specify using credentials by roles. "\
-              "Either exclude/skip the tests doing this or use either an "\
-              "test_accounts_file or dynamic credentials."
-        raise exceptions.InvalidConfiguration(msg)
diff --git a/tempest/common/service_client.py b/tempest/common/service_client.py
index 87e925d..b3a5a09 100644
--- a/tempest/common/service_client.py
+++ b/tempest/common/service_client.py
@@ -57,8 +57,7 @@
 
 
 class ResponseBodyData(object):
-    """Class that wraps an http response and string data into a single value.
-    """
+    """Class that wraps an http response and string data into a single value"""
 
     def __init__(self, response, data):
         self.response = response
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 81b8110..aad6373 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -27,8 +27,6 @@
 
 class DataUtils(object):
     def __getattr__(self, attr):
-        if attr in self.__dict__:
-            return self.__dict__[attr]
 
         if attr == 'rand_name':
             # NOTE(flwang): This is a proxy to generate a random name that
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 3bead88..8f5faef 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -15,8 +15,8 @@
 import time
 
 from oslo_log import log as logging
-import six
 from tempest_lib.common import ssh
+import tempest_lib.exceptions
 
 from tempest import config
 from tempest import exceptions
@@ -28,22 +28,10 @@
 
 class RemoteClient(object):
 
-    # NOTE(afazekas): It should always get an address instead of server
-    def __init__(self, server, username, password=None, pkey=None):
+    def __init__(self, ip_address, username, password=None, pkey=None):
         ssh_timeout = CONF.validation.ssh_timeout
-        network = CONF.compute.network_for_ssh
-        ip_version = CONF.validation.ip_version_for_ssh
         connect_timeout = CONF.validation.connect_timeout
-        if isinstance(server, six.string_types):
-            ip_address = server
-        else:
-            addresses = server['addresses'][network]
-            for address in addresses:
-                if address['version'] == ip_version:
-                    ip_address = address['addr']
-                    break
-            else:
-                raise exceptions.ServerUnreachable()
+
         self.ssh_client = ssh.Client(ip_address, username, password,
                                      ssh_timeout, pkey=pkey,
                                      channel_timeout=connect_timeout)
@@ -51,12 +39,13 @@
     def exec_command(self, cmd):
         # Shell options below add more clearness on failures,
         # path is extended for some non-cirros guest oses (centos7)
-        cmd = CONF.compute.ssh_shell_prologue + " " + cmd
+        cmd = CONF.validation.ssh_shell_prologue + " " + cmd
         LOG.debug("Remote command: %s" % cmd)
         return self.ssh_client.exec_command(cmd)
 
     def validate_authentication(self):
         """Validate ssh connection and authentication
+
            This method raises an Exception when the validation fails.
         """
         self.ssh_client.test_connection_auth()
@@ -72,7 +61,7 @@
             return output.split()[1]
 
     def get_number_of_vcpus(self):
-        output = self.exec_command('grep -c processor /proc/cpuinfo')
+        output = self.exec_command('grep -c ^processor /proc/cpuinfo')
         return int(output)
 
     def get_partitions(self):
@@ -93,16 +82,25 @@
         cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message
         return self.exec_command(cmd)
 
-    def ping_host(self, host, count=CONF.compute.ping_count,
-                  size=CONF.compute.ping_size):
+    def ping_host(self, host, count=CONF.validation.ping_count,
+                  size=CONF.validation.ping_size, nic=None):
         addr = netaddr.IPAddress(host)
         cmd = 'ping6' if addr.version == 6 else 'ping'
+        if nic:
+            cmd = 'sudo {cmd} -I {nic}'.format(cmd=cmd, nic=nic)
         cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
         return self.exec_command(cmd)
 
-    def get_mac_address(self):
-        cmd = "ip addr | awk '/ether/ {print $2}'"
-        return self.exec_command(cmd)
+    def set_mac_address(self, nic, address):
+        self.set_nic_state(nic=nic, state="down")
+        cmd = "sudo ip link set dev {0} address {1}".format(nic, address)
+        self.exec_command(cmd)
+        self.set_nic_state(nic=nic, state="up")
+
+    def get_mac_address(self, nic=""):
+        show_nic = "show {nic} ".format(nic=nic) if nic else ""
+        cmd = "ip addr %s| awk '/ether/ {print $2}'" % show_nic
+        return self.exec_command(cmd).strip().lower()
 
     def get_nic_name(self, address):
         cmd = "ip -o addr | awk '/%s/ {print $2}'" % address
@@ -120,8 +118,8 @@
         )
         return self.exec_command(cmd)
 
-    def turn_nic_on(self, nic):
-        cmd = "sudo ip link set {nic} up".format(nic=nic)
+    def set_nic_state(self, nic, state="up"):
+        cmd = "sudo ip link set {nic} {state}".format(nic=nic, state=state)
         return self.exec_command(cmd)
 
     def get_pids(self, pr_name):
@@ -181,4 +179,10 @@
 
     def make_fs(self, dev_name, fs='ext4'):
         cmd_mkfs = 'sudo /usr/sbin/mke2fs -t %s /dev/%s' % (fs, dev_name)
-        self.exec_command(cmd_mkfs)
+        try:
+            self.exec_command(cmd_mkfs)
+        except tempest_lib.exceptions.SSHExecCommandFailed:
+            LOG.error("Couldn't mke2fs")
+            cmd_why = 'sudo ls -lR /dev'
+            LOG.info("Contents of /dev: %s" % self.exec_command(cmd_why))
+            raise
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
index debc200..9457a60 100644
--- a/tempest/common/validation_resources.py
+++ b/tempest/common/validation_resources.py
@@ -23,8 +23,8 @@
 
 
 def create_ssh_security_group(os, add_rule=False):
-    security_groups_client = os.security_groups_client
-    security_group_rules_client = os.security_group_rules_client
+    security_groups_client = os.compute_security_groups_client
+    security_group_rules_client = os.compute_security_group_rules_client
     sg_name = data_utils.rand_name('securitygroup-')
     sg_description = data_utils.rand_name('description-')
     security_group = security_groups_client.create_security_group(
@@ -58,7 +58,7 @@
             validation_data['security_group'] = \
                 create_ssh_security_group(os, add_rule)
         if validation_resources['floating_ip']:
-            floating_client = os.floating_ips_client
+            floating_client = os.compute_floating_ips_client
             validation_data.update(floating_client.create_floating_ip())
     return validation_data
 
@@ -73,22 +73,22 @@
             try:
                 keypair_client.delete_keypair(keypair_name)
             except lib_exc.NotFound:
-                LOG.warn("Keypair %s is not found when attempting to delete"
-                         % keypair_name)
+                LOG.warning("Keypair %s is not found when attempting to delete"
+                            % keypair_name)
             except Exception as exc:
                 LOG.exception('Exception raised while deleting key %s'
                               % keypair_name)
                 if not has_exception:
                     has_exception = exc
         if 'security_group' in validation_data:
-            security_group_client = os.security_groups_client
+            security_group_client = os.compute_security_groups_client
             sec_id = validation_data['security_group']['id']
             try:
                 security_group_client.delete_security_group(sec_id)
                 security_group_client.wait_for_resource_deletion(sec_id)
             except lib_exc.NotFound:
-                LOG.warn("Security group %s is not found when attempting to "
-                         " delete" % sec_id)
+                LOG.warning("Security group %s is not found when attempting "
+                            "to delete" % sec_id)
             except lib_exc.Conflict as exc:
                 LOG.exception('Conflict while deleting security '
                               'group %s VM might not be deleted ' % sec_id)
@@ -100,13 +100,13 @@
                 if not has_exception:
                     has_exception = exc
         if 'floating_ip' in validation_data:
-            floating_client = os.floating_ips_client
+            floating_client = os.compute_floating_ips_client
             fip_id = validation_data['floating_ip']['id']
             try:
                 floating_client.delete_floating_ip(fip_id)
             except lib_exc.NotFound:
-                LOG.warn('Floating ip %s not found while attempting to delete'
-                         % fip_id)
+                LOG.warning('Floating ip %s not found while attempting to '
+                            'delete' % fip_id)
             except Exception as exc:
                 LOG.exception('Exception raised while deleting ip %s '
                               % fip_id)
diff --git a/tempest/config.py b/tempest/config.py
index 0cda018..14a6ad2 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -17,9 +17,10 @@
 
 import logging as std_logging
 import os
+import tempfile
 
+from oslo_concurrency import lockutils
 from oslo_config import cfg
-
 from oslo_log import log as logging
 
 from tempest.test_discover import plugins
@@ -231,16 +232,6 @@
     cfg.StrOpt('flavor_ref_alt',
                default="2",
                help='Valid secondary flavor to be used in tests.'),
-    cfg.StrOpt('image_ssh_user',
-               default="root",
-               help="User name used to authenticate to an instance."),
-    cfg.StrOpt('image_ssh_password',
-               default="password",
-               help="Password used to authenticate to an instance."),
-    cfg.StrOpt('image_alt_ssh_user',
-               default="root",
-               help="User name used to authenticate to an instance using "
-                    "the alternate image."),
     cfg.IntOpt('build_interval',
                default=1,
                help="Time in seconds between build status checks."),
@@ -249,39 +240,6 @@
                help="Timeout in seconds to wait for an instance to build. "
                     "Other services that do not define build_timeout will "
                     "inherit this value."),
-    cfg.StrOpt('ssh_shell_prologue',
-               default="set -eu -o pipefail; PATH=$$PATH:/sbin;",
-               help="Shell fragments to use before executing a command "
-                    "when sshing to a guest."),
-    cfg.StrOpt('ssh_auth_method',
-               default='keypair',
-               help="Auth method used for authenticate to the instance. "
-                    "Valid choices are: keypair, configured, adminpass "
-                    "and disabled. "
-                    "Keypair: start the servers with a ssh keypair. "
-                    "Configured: use the configured user and password. "
-                    "Adminpass: use the injected adminPass. "
-                    "Disabled: avoid using ssh when it is an option."),
-    cfg.StrOpt('ssh_connect_method',
-               default='floating',
-               help="How to connect to the instance? "
-                    "fixed: using the first ip belongs the fixed network "
-                    "floating: creating and using a floating ip."),
-    cfg.StrOpt('ssh_user',
-               default='root',
-               help="User name used to authenticate to an instance."),
-    cfg.IntOpt('ping_timeout',
-               default=120,
-               help="Timeout in seconds to wait for ping to "
-                    "succeed."),
-    cfg.IntOpt('ping_size',
-               default=56,
-               help="The packet size for ping packets originating "
-                    "from remote linux hosts"),
-    cfg.IntOpt('ping_count',
-               default=1,
-               help="The number of ping packets originating from remote "
-                    "linux hosts"),
     cfg.IntOpt('ready_wait',
                default=0,
                help="Additional wait time for clean state, when there is "
@@ -293,13 +251,6 @@
                     "servers if tempest does not create a network or a "
                     "network is not specified elsewhere. It may be used for "
                     "ssh validation only if floating IPs are disabled."),
-    cfg.StrOpt('network_for_ssh',
-               default='public',
-               help="Network used for SSH connections. Ignored if "
-                    "use_floatingip_for_ssh=true or run_validation=false."),
-    cfg.BoolOpt('use_floatingip_for_ssh',
-                default=True,
-                help="Does SSH use Floating IPs?"),
     cfg.StrOpt('catalog_type',
                default='compute',
                help="Catalog type of the Compute service."),
@@ -325,18 +276,36 @@
                     'when shelved. This time should be the same as the time '
                     'of nova.conf, and some tests will run for as long as the '
                     'time.'),
-    cfg.StrOpt('floating_ip_range',
-               default='10.0.0.0/29',
-               help='Unallocated floating IP range, which will be used to '
-                    'test the floating IP bulk feature for CRUD operation. '
-                    'This block must not overlap an existing floating IP '
-                    'pool.')
+    cfg.IntOpt('min_compute_nodes',
+               default=1,
+               help=('The minimum number of compute nodes expected. This will '
+                     'be utilized by some multinode specific tests to ensure '
+                     'that requests match the expected size of the cluster '
+                     'you are testing with.'))
 ]
 
 compute_features_group = cfg.OptGroup(name='compute-feature-enabled',
                                       title="Enabled Compute Service Features")
 
 ComputeFeaturesGroup = [
+    cfg.StrOpt('min_microversion',
+               default=None,
+               help="Lower version of the test target microversion range. "
+                    "The format is 'X.Y', where 'X' and 'Y' are int values. "
+                    "Tempest selects tests based on the range between "
+                    "min_microversion and max_microversion. "
+                    "If both values are not specified, Tempest avoids tests "
+                    "which require a microversion. Valid values are string "
+                    "with format 'X.Y' or string 'latest'"),
+    cfg.StrOpt('max_microversion',
+               default=None,
+               help="Upper version of the test target microversion range. "
+                    "The format is 'X.Y', where 'X' and 'Y' are int values. "
+                    "Tempest selects tests based on the range between "
+                    "min_microversion and max_microversion. "
+                    "If both values are not specified, Tempest avoids tests "
+                    "which require a microversion. Valid values are string "
+                    "with format 'X.Y' or string 'latest'"),
     cfg.BoolOpt('disk_config',
                 default=True,
                 help="If false, skip disk config tests"),
@@ -419,9 +388,6 @@
                 default=True,
                 help='Does the test environment support creating snapshot '
                      'images of running instances?'),
-    cfg.BoolOpt('ec2_api',
-                default=True,
-                help='Does the test environment have the ec2 api running?'),
     cfg.BoolOpt('nova_cert',
                 default=True,
                 help='Does the test environment have the nova cert running?'),
@@ -650,9 +616,7 @@
     cfg.BoolOpt('run_validation',
                 default=False,
                 help='Enable ssh on created servers and creation of additional'
-                     ' validation resources to enable remote access',
-                deprecated_opts=[cfg.DeprecatedOpt('run_ssh',
-                                                   group='compute')]),
+                     ' validation resources to enable remote access'),
     cfg.BoolOpt('security_group',
                 default=True,
                 help='Enable/disable security groups.'),
@@ -664,31 +628,77 @@
                choices=['fixed', 'floating'],
                help='Default IP type used for validation: '
                     '-fixed: uses the first IP belonging to the fixed network '
-                    '-floating: creates and uses a floating IP'),
+                    '-floating: creates and uses a floating IP',
+               deprecated_opts=[cfg.DeprecatedOpt('use_floatingip_for_ssh',
+                                                  group='compute')]),
     cfg.StrOpt('auth_method',
                default='keypair',
                choices=['keypair'],
                help='Default authentication method to the instance. '
                     'Only ssh via keypair is supported for now. '
-                    'Additional methods will be handled in a separate spec.'),
+                    'Additional methods will be handled in a separate spec.',
+               deprecated_opts=[cfg.DeprecatedOpt('ssh_auth_method',
+                                                  group='compute')]),
     cfg.IntOpt('ip_version_for_ssh',
                default=4,
-               help='Default IP version for ssh connections.',
-               deprecated_opts=[cfg.DeprecatedOpt('ip_version_for_ssh',
-                                                  group='compute')]),
+               help='Default IP version for ssh connections.'),
     cfg.IntOpt('ping_timeout',
                default=120,
-               help='Timeout in seconds to wait for ping to succeed.'),
+               help='Timeout in seconds to wait for ping to succeed.',
+               deprecated_opts=[cfg.DeprecatedOpt('ping_timeout',
+                                                  group='compute')]),
     cfg.IntOpt('connect_timeout',
                default=60,
                help='Timeout in seconds to wait for the TCP connection to be '
-                    'successful.',
-               deprecated_opts=[cfg.DeprecatedOpt('ssh_channel_timeout',
-                                                  group='compute')]),
+                    'successful.'),
     cfg.IntOpt('ssh_timeout',
                default=300,
-               help='Timeout in seconds to wait for the ssh banner.',
-               deprecated_opts=[cfg.DeprecatedOpt('ssh_timeout',
+               help='Timeout in seconds to wait for the ssh banner.'),
+    cfg.StrOpt('image_ssh_user',
+               default="root",
+               help="User name used to authenticate to an instance.",
+               deprecated_opts=[cfg.DeprecatedOpt('image_ssh_user',
+                                                  group='compute'),
+                                cfg.DeprecatedOpt('ssh_user',
+                                                  group='compute'),
+                                cfg.DeprecatedOpt('ssh_user',
+                                                  group='scenario')]),
+    cfg.StrOpt('image_ssh_password',
+               default="password",
+               help="Password used to authenticate to an instance.",
+               deprecated_opts=[cfg.DeprecatedOpt('image_ssh_password',
+                                                  group='compute')]),
+    cfg.StrOpt('ssh_shell_prologue',
+               default="set -eu -o pipefail; PATH=$$PATH:/sbin;",
+               help="Shell fragments to use before executing a command "
+                    "when sshing to a guest.",
+               deprecated_opts=[cfg.DeprecatedOpt('ssh_shell_prologue',
+                                                  group='compute')]),
+    cfg.IntOpt('ping_size',
+               default=56,
+               help="The packet size for ping packets originating "
+                    "from remote linux hosts",
+               deprecated_opts=[cfg.DeprecatedOpt('ping_size',
+                                                  group='compute')]),
+    cfg.IntOpt('ping_count',
+               default=1,
+               help="The number of ping packets originating from remote "
+                    "linux hosts",
+               deprecated_opts=[cfg.DeprecatedOpt('ping_count',
+                                                  group='compute')]),
+    cfg.StrOpt('floating_ip_range',
+               default='10.0.0.0/29',
+               help='Unallocated floating IP range, which will be used to '
+                    'test the floating IP bulk feature for CRUD operation. '
+                    'This block must not overlap an existing floating IP '
+                    'pool.',
+               deprecated_opts=[cfg.DeprecatedOpt('floating_ip_range',
+                                                  group='compute')]),
+    cfg.StrOpt('network_for_ssh',
+               default='public',
+               help="Network used for SSH connections. Ignored if "
+                    "use_floatingip_for_ssh=true or run_validation=false.",
+               deprecated_opts=[cfg.DeprecatedOpt('network_for_ssh',
                                                   group='compute')]),
 ]
 
@@ -718,11 +728,21 @@
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the volume service."),
     cfg.StrOpt('backend1_name',
-               default='BACKEND_1',
-               help="Name of the backend1 (must be declared in cinder.conf)"),
+               default='',
+               help='Name of the backend1 (must be declared in cinder.conf)',
+               deprecated_for_removal=True),
     cfg.StrOpt('backend2_name',
-               default='BACKEND_2',
-               help="Name of the backend2 (must be declared in cinder.conf)"),
+               default='',
+               help='Name of the backend2 (must be declared in cinder.conf)',
+               deprecated_for_removal=True),
+    cfg.ListOpt('backend_names',
+                default=['BACKEND_1', 'BACKEND_2'],
+                help='A list of backend names separated by comma. '
+                     'The backend name must be declared in cinder.conf',
+                deprecated_opts=[cfg.DeprecatedOpt('BACKEND_1',
+                                                   group='volume'),
+                                 cfg.DeprecatedOpt('BACKEND_2',
+                                                   group='volume')]),
     cfg.StrOpt('storage_protocol',
                default='iSCSI',
                help='Backend protocol to target when creating volume types'),
@@ -765,9 +785,10 @@
                 default=True,
                 help="Is the v2 volume API enabled"),
     cfg.BoolOpt('bootable',
-                default=False,
+                default=True,
                 help='Update bootable status of a volume '
-                     'Not implemented on icehouse ')
+                     'Not implemented on icehouse ',
+                deprecated_for_removal=True)
 ]
 
 
@@ -913,6 +934,20 @@
                      "notification tests")
 ]
 
+alarming_group = cfg.OptGroup(name='alarming',
+                              title='Alarming Service Options')
+
+AlarmingGroup = [
+    cfg.StrOpt('catalog_type',
+               default='alarming',
+               help="Catalog type of the Alarming service."),
+    cfg.StrOpt('endpoint_type',
+               default='publicURL',
+               choices=['public', 'admin', 'internal',
+                        'publicURL', 'adminURL', 'internalURL'],
+               help="The endpoint type to use for the alarming service."),
+]
+
 
 telemetry_feature_group = cfg.OptGroup(name='telemetry-feature-enabled',
                                        title='Enabled Ceilometer Features')
@@ -967,54 +1002,6 @@
                 help="List of enabled data processing plugins")
 ]
 
-
-boto_group = cfg.OptGroup(name='boto',
-                          title='EC2/S3 options')
-BotoGroup = [
-    cfg.StrOpt('ec2_url',
-               default="http://localhost:8773/services/Cloud",
-               help="EC2 URL"),
-    cfg.StrOpt('s3_url',
-               default="http://localhost:8080",
-               help="S3 URL"),
-    cfg.StrOpt('aws_secret',
-               help="AWS Secret Key",
-               secret=True),
-    cfg.StrOpt('aws_access',
-               help="AWS Access Key"),
-    cfg.StrOpt('aws_zone',
-               default="nova",
-               help="AWS Zone for EC2 tests"),
-    cfg.StrOpt('s3_materials_path',
-               default="/opt/stack/devstack/files/images/"
-                       "s3-materials/cirros-0.3.0",
-               help="S3 Materials Path"),
-    cfg.StrOpt('ari_manifest',
-               default="cirros-0.3.0-x86_64-initrd.manifest.xml",
-               help="ARI Ramdisk Image manifest"),
-    cfg.StrOpt('ami_manifest',
-               default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
-               help="AMI Machine Image manifest"),
-    cfg.StrOpt('aki_manifest',
-               default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
-               help="AKI Kernel Image manifest"),
-    cfg.StrOpt('instance_type',
-               default="m1.tiny",
-               help="Instance type"),
-    cfg.IntOpt('http_socket_timeout',
-               default=3,
-               help="boto Http socket timeout"),
-    cfg.IntOpt('num_retries',
-               default=1,
-               help="boto num_retries on error"),
-    cfg.IntOpt('build_timeout',
-               default=60,
-               help="Status Change Timeout"),
-    cfg.IntOpt('build_interval',
-               default=1,
-               help="Status Change Test Interval"),
-]
-
 stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
 
 StressGroup = [
@@ -1059,7 +1046,8 @@
     cfg.StrOpt('img_dir',
                default='/opt/stack/new/devstack/files/images/'
                'cirros-0.3.1-x86_64-uec',
-               help='Directory containing image files'),
+               help='Directory containing image files',
+               deprecated_for_removal=True),
     cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
                default='cirros-0.3.1-x86_64-disk.img',
                help='Image file name'),
@@ -1073,16 +1061,16 @@
                 'Use for custom images which require them'),
     cfg.StrOpt('ami_img_file',
                default='cirros-0.3.1-x86_64-blank.img',
-               help='AMI image file name'),
+               help='AMI image file name',
+               deprecated_for_removal=True),
     cfg.StrOpt('ari_img_file',
                default='cirros-0.3.1-x86_64-initrd',
-               help='ARI image file name'),
+               help='ARI image file name',
+               deprecated_for_removal=True),
     cfg.StrOpt('aki_img_file',
                default='cirros-0.3.1-x86_64-vmlinuz',
-               help='AKI image file name'),
-    cfg.StrOpt('ssh_user',
-               default='cirros',
-               help='ssh username for the image file'),
+               help='AKI image file name',
+               deprecated_for_removal=True),
     cfg.IntOpt(
         'large_ops_number',
         default=0,
@@ -1123,6 +1111,9 @@
     cfg.BoolOpt('ceilometer',
                 default=True,
                 help="Whether or not Ceilometer is expected to be available"),
+    cfg.BoolOpt('aodh',
+                default=False,
+                help="Whether or not Aodh is expected to be available"),
     cfg.BoolOpt('horizon',
                 default=True,
                 help="Whether or not Horizon is expected to be available"),
@@ -1198,6 +1189,11 @@
                                     'live_migration, pause, rescue, resize '
                                     'shelve, snapshot, and suspend')
 
+
+# NOTE(deva): Ironic tests have been ported to tempest-lib. New config options
+#             should be added to ironic/ironic_tempest_plugin/config.py.
+#             However, these options need to remain here for testing stable
+#             branches until Liberty release reaches EOL.
 BaremetalGroup = [
     cfg.StrOpt('catalog_type',
                default='baremetal',
@@ -1269,10 +1265,10 @@
     (orchestration_group, OrchestrationGroup),
     (telemetry_group, TelemetryGroup),
     (telemetry_feature_group, TelemetryFeaturesGroup),
+    (alarming_group, AlarmingGroup),
     (dashboard_group, DashboardGroup),
     (data_processing_group, DataProcessingGroup),
     (data_processing_feature_group, DataProcessingFeaturesGroup),
-    (boto_group, BotoGroup),
     (stress_group, StressGroup),
     (scenario_group, ScenarioGroup),
     (service_available_group, ServiceAvailableGroup),
@@ -1342,7 +1338,6 @@
         self.data_processing = _CONF['data-processing']
         self.data_processing_feature_enabled = _CONF[
             'data-processing-feature-enabled']
-        self.boto = _CONF.boto
         self.stress = _CONF.stress
         self.scenario = _CONF.scenario
         self.service_available = _CONF.service_available
@@ -1356,6 +1351,7 @@
         _CONF.set_default('alt_domain_name',
                           self.auth.default_credentials_domain_name,
                           group='identity')
+        logging.tempest_set_log_file('tempest.log')
 
     def __init__(self, parse_conf=True, config_path=None):
         """Initialize a configuration from a conf directory and conf file."""
@@ -1386,6 +1382,13 @@
             _CONF([], project='tempest', default_config_files=config_files)
         else:
             _CONF([], project='tempest')
+
+        logging_cfg_path = "%s/logging.conf" % os.path.dirname(path)
+        if (not hasattr(_CONF, 'log_config_append') and
+            os.path.isfile(logging_cfg_path)):
+            # if logging conf is in place we need to set log_config_append
+            _CONF.log_config_append = logging_cfg_path
+
         logging.setup(_CONF, 'tempest')
         LOG = logging.getLogger('tempest')
         LOG.info("Using tempest config file %s" % path)
@@ -1412,6 +1415,8 @@
     def __getattr__(self, attr):
         if not self._config:
             self._fix_log_levels()
+            lock_dir = os.path.join(tempfile.gettempdir(), 'tempest-lock')
+            lockutils.set_defaults(lock_dir)
             self._config = TempestConfigPrivate(config_path=self._path)
 
         return getattr(self._config, attr)
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index b3d60f6..86e8460 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -17,8 +17,7 @@
 
 
 class TempestException(Exception):
-    """
-    Base Tempest Exception
+    """Base Tempest Exception
 
     To correctly use this class, inherit from it and define
     a 'message' property. That message will get printf'd
@@ -140,6 +139,13 @@
     message = "%(num)d cleanUp operation failed"
 
 
+# NOTE(andreaf) This exception is added here to facilitate the migration
+# of get_network_from_name and preprov_creds to tempest-lib, and it should
+# be migrated along with them
+class InvalidTestResource(TempestException):
+    message = "%(name) is not a valid %(type), or the name is ambiguous"
+
+
 class RFCViolation(RestClientException):
     message = "RFC Violation"
 
@@ -170,6 +176,20 @@
     message = "Invalid structure of table with details"
 
 
+class InvalidAPIVersionString(TempestException):
+    message = ("API Version String %(version)s is of invalid format. Must "
+               "be of format MajorNum.MinorNum or string 'latest'.")
+
+
+class JSONSchemaNotFound(TempestException):
+    message = ("JSON Schema for %(version)s is not found in \n"
+               " %(schema_versions_info)s")
+
+
+class InvalidAPIVersionRange(TempestException):
+    message = ("API Min Version is greater than Max version")
+
+
 class CommandFailed(Exception):
     def __init__(self, returncode, cmd, output, stderr):
         super(CommandFailed, self).__init__()
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 06ca09b..88598de 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -30,6 +30,10 @@
 RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
 mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
 TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
+METHOD = re.compile(r"^    def .+")
+METHOD_GET_RESOURCE = re.compile(r"^\s*def (list|show)\_.+")
+METHOD_DELETE_RESOURCE = re.compile(r"^\s*def delete_.+")
+CLASS = re.compile(r"^class .+")
 
 
 def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
@@ -143,6 +147,82 @@
                "decorators.skip_because from tempest-lib")
 
 
+def _common_service_clients_check(logical_line, physical_line, filename,
+                                  ignored_list_file=None):
+    if 'tempest/services/' not in filename:
+        return False
+
+    if ignored_list_file is not None:
+        ignored_list = []
+        with open('tempest/hacking/' + ignored_list_file) as f:
+            for line in f:
+                ignored_list.append(line.strip())
+
+        if filename in ignored_list:
+            return False
+
+    if not METHOD.match(physical_line):
+        return False
+
+    if pep8.noqa(physical_line):
+        return False
+
+    return True
+
+
+def get_resources_on_service_clients(logical_line, physical_line, filename,
+                                     line_number, lines):
+    """Check that service client names of GET should be consistent
+
+    T110
+    """
+    if not _common_service_clients_check(logical_line, physical_line,
+                                         filename, 'ignored_list_T110.txt'):
+        return
+
+    for line in lines[line_number:]:
+        if METHOD.match(line) or CLASS.match(line):
+            # the end of a method
+            return
+
+        if 'self.get(' not in line and ('self.show_resource(' not in line and
+                                        'self.list_resources(' not in line):
+            continue
+
+        if METHOD_GET_RESOURCE.match(logical_line):
+            return
+
+        msg = ("T110: [GET /resources] methods should be list_<resource name>s"
+               " or show_<resource name>")
+        yield (0, msg)
+
+
+def delete_resources_on_service_clients(logical_line, physical_line, filename,
+                                        line_number, lines):
+    """Check that service client names of DELETE should be consistent
+
+    T111
+    """
+    if not _common_service_clients_check(logical_line, physical_line,
+                                         filename, 'ignored_list_T111.txt'):
+        return
+
+    for line in lines[line_number:]:
+        if METHOD.match(line) or CLASS.match(line):
+            # the end of a method
+            return
+
+        if 'self.delete(' not in line and 'self.delete_resource(' not in line:
+            continue
+
+        if METHOD_DELETE_RESOURCE.match(logical_line):
+            return
+
+        msg = ("T111: [DELETE /resources/<id>] methods should be "
+               "delete_<resource name>")
+        yield (0, msg)
+
+
 def factory(register):
     register(import_no_clients_in_api_and_scenario_tests)
     register(scenario_tests_need_service_tags)
@@ -152,3 +232,5 @@
     register(no_hyphen_at_end_of_rand_name)
     register(no_mutable_default_args)
     register(no_testtools_skip_decorator)
+    register(get_resources_on_service_clients)
+    register(delete_resources_on_service_clients)
diff --git a/tempest/hacking/ignored_list_T110.txt b/tempest/hacking/ignored_list_T110.txt
new file mode 100644
index 0000000..f1f21d1
--- /dev/null
+++ b/tempest/hacking/ignored_list_T110.txt
@@ -0,0 +1,8 @@
+./tempest/services/messaging/json/messaging_client.py
+./tempest/services/object_storage/object_client.py
+./tempest/services/telemetry/json/alarming_client.py
+./tempest/services/telemetry/json/telemetry_client.py
+./tempest/services/volume/base/base_qos_client.py
+./tempest/services/volume/base/base_backups_client.py
+./tempest/services/baremetal/base.py
+./tempest/services/network/json/network_client.py
diff --git a/tempest/hacking/ignored_list_T111.txt b/tempest/hacking/ignored_list_T111.txt
new file mode 100644
index 0000000..20d58d2
--- /dev/null
+++ b/tempest/hacking/ignored_list_T111.txt
@@ -0,0 +1,2 @@
+./tempest/services/baremetal/base.py
+./tempest/services/network/json/quotas_client.py
diff --git a/tempest/manager.py b/tempest/manager.py
index d7c3128..9904aa6 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -23,30 +23,25 @@
 
 
 class Manager(object):
-
-    """
-    Base manager class
+    """Base manager class
 
     Manager objects are responsible for providing a configuration object
     and a client object for a test case to use in performing actions.
     """
 
-    def __init__(self, credentials=None):
-        """
-        We allow overriding of the credentials used within the various
-        client classes managed by the Manager object. Left as None, the
-        standard username/password/tenant_name[/domain_name] is used.
+    def __init__(self, credentials):
+        """Initialization of base manager class
 
-        :param credentials: Override of the credentials
+        Credentials to be used within the various client classes managed by the
+        Manager object must be defined.
+
+        :param credentials: type Credentials or TestResources
         """
-        self.auth_version = CONF.identity.auth_version
-        if credentials is None:
-            self.credentials = cred_provider.get_configured_credentials('user')
-        else:
-            self.credentials = credentials
+        self.credentials = credentials
         # Check if passed or default credentials are valid
         if not self.credentials.is_valid():
             raise exceptions.InvalidCredentials()
+        self.auth_version = CONF.identity.auth_version
         # Tenant isolation creates TestResources, but
         # PreProvisionedCredentialProvider and some tests create Credentials
         if isinstance(credentials, cred_provider.TestResources):
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index a10dc83..1962286 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -23,7 +23,7 @@
 from tempest_lib.common.utils import misc as misc_utils
 from tempest_lib import exceptions as lib_exc
 
-from tempest.common import fixed_network
+from tempest.common import compute
 from tempest.common.utils import data_utils
 from tempest.common.utils.linux import remote_client
 from tempest.common import waiters
@@ -47,22 +47,30 @@
         super(ScenarioTest, cls).setup_clients()
         # Clients (in alphabetical order)
         cls.flavors_client = cls.manager.flavors_client
-        cls.floating_ips_client = cls.manager.floating_ips_client
-        # Glance image client v1
-        cls.image_client = cls.manager.image_client
+        cls.compute_floating_ips_client = (
+            cls.manager.compute_floating_ips_client)
+        if CONF.service_available.glance:
+            # Glance image client v1
+            cls.image_client = cls.manager.image_client
         # Compute image client
-        cls.images_client = cls.manager.images_client
+        cls.compute_images_client = cls.manager.compute_images_client
         cls.keypairs_client = cls.manager.keypairs_client
         # Nova security groups client
-        cls.security_groups_client = cls.manager.security_groups_client
-        cls.security_group_rules_client = (
-            cls.manager.security_group_rules_client)
+        cls.compute_security_groups_client = (
+            cls.manager.compute_security_groups_client)
+        cls.compute_security_group_rules_client = (
+            cls.manager.compute_security_group_rules_client)
         cls.servers_client = cls.manager.servers_client
         cls.interface_client = cls.manager.interfaces_client
         # Neutron network client
         cls.network_client = cls.manager.network_client
         cls.networks_client = cls.manager.networks_client
+        cls.ports_client = cls.manager.ports_client
         cls.subnets_client = cls.manager.subnets_client
+        cls.floating_ips_client = cls.manager.floating_ips_client
+        cls.security_groups_client = cls.manager.security_groups_client
+        cls.security_group_rules_client = (
+            cls.manager.security_group_rules_client)
         # Heat client
         cls.orchestration_client = cls.manager.orchestration_client
 
@@ -129,14 +137,13 @@
         self.cleanup_waits.append(wait_dict)
 
     def _wait_for_cleanups(self):
-        """To handle async delete actions, a list of waits is added
-        which will be iterated over as the last step of clearing the
-        cleanup queue. That way all the delete calls are made up front
-        and the tests won't succeed unless the deletes are eventually
-        successful. This is the same basic approach used in the api tests to
-        limit cleanup execution time except here it is multi-resource,
-        because of the nature of the scenario tests.
-        """
+        # To handle async delete actions, a list of waits is added
+        # which will be iterated over as the last step of clearing the
+        # cleanup queue. That way all the delete calls are made up front
+        # and the tests won't succeed unless the deletes are eventually
+        # successful. This is the same basic approach used in the api tests to
+        # limit cleanup execution time except here it is multi-resource,
+        # because of the nature of the scenario tests.
         for wait in self.cleanup_waits:
             waiter_callable = wait.pop('waiter_callable')
             waiter_callable(**wait)
@@ -155,62 +162,115 @@
         self.addCleanup(client.delete_keypair, name)
         return body['keypair']
 
-    def create_server(self, name=None, image=None, flavor=None,
-                      wait_on_boot=True, wait_on_delete=True,
-                      create_kwargs=None):
-        """Creates VM instance.
+    def create_server(self, name=None, image_id=None, flavor=None,
+                      validatable=False, wait_until=None,
+                      wait_on_delete=True, clients=None, **kwargs):
+        """Wrapper utility that returns a test server.
 
-        @param image: image from which to create the instance
-        @param wait_on_boot: wait for status ACTIVE before continue
-        @param wait_on_delete: force synchronous delete on cleanup
-        @param create_kwargs: additional details for instance creation
-        @return: server dict
+        This wrapper utility calls the common create test server and
+        returns a test server. The purpose of this wrapper is to minimize
+        the impact on the code of the tests already using this
+        function.
         """
-        if name is None:
-            name = data_utils.rand_name(self.__class__.__name__)
-        if image is None:
-            image = CONF.compute.image_ref
-        if flavor is None:
-            flavor = CONF.compute.flavor_ref
-        if create_kwargs is None:
-            create_kwargs = {}
-        network = self.get_tenant_network()
-        create_kwargs = fixed_network.set_networks_kwarg(network,
-                                                         create_kwargs)
 
-        LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
-                  name, image, flavor)
-        server = self.servers_client.create_server(name=name, imageRef=image,
-                                                   flavorRef=flavor,
-                                                   **create_kwargs)['server']
+        # NOTE(jlanoux): As a first step, ssh checks in the scenario
+        # tests need to be run regardless of the run_validation and
+        # validatable parameters and thus until the ssh validation job
+        # becomes voting in CI. The test resources management and IP
+        # association are taken care of in the scenario tests.
+        # Therefore, the validatable parameter is set to false in all
+        # those tests. In this way create_server just return a standard
+        # server and the scenario tests always perform ssh checks.
+
+        # Needed for the cross_tenant_traffic test:
+        if clients is None:
+            clients = self.manager
+
+        vnic_type = CONF.network.port_vnic_type
+
+        # If vnic_type is configured create port for
+        # every network
+        if vnic_type:
+            ports = []
+            networks = []
+            create_port_body = {'binding:vnic_type': vnic_type,
+                                'namestart': 'port-smoke'}
+            if kwargs:
+                # Convert security group names to security group ids
+                # to pass to create_port
+                if 'security_groups' in kwargs:
+                    security_groups =\
+                        clients.security_groups_client.list_security_groups(
+                        ).get('security_groups')
+                    sec_dict = dict([(s['name'], s['id'])
+                                    for s in security_groups])
+
+                    sec_groups_names = [s['name'] for s in kwargs.pop(
+                        'security_groups')]
+                    security_groups_ids = [sec_dict[s]
+                                           for s in sec_groups_names]
+
+                    if security_groups_ids:
+                        create_port_body[
+                            'security_groups'] = security_groups_ids
+                networks = kwargs.pop('networks')
+
+            # If there are no networks passed to us we look up
+            # for the tenant's private networks and create a port
+            # if there is only one private network. The same behaviour
+            # as we would expect when passing the call to the clients
+            # with no networks
+            if not networks:
+                networks = clients.networks_client.list_networks(
+                    filters={'router:external': False})
+                self.assertEqual(1, len(networks),
+                                 "There is more than one"
+                                 " network for the tenant")
+            for net in networks:
+                net_id = net['uuid']
+                port = self._create_port(network_id=net_id,
+                                         client=clients.ports_client,
+                                         **create_port_body)
+                ports.append({'port': port.id})
+            if ports:
+                kwargs['networks'] = ports
+            self.ports = ports
+
+        tenant_network = self.get_tenant_network()
+
+        body, servers = compute.create_test_server(
+            clients,
+            tenant_network=tenant_network,
+            wait_until=wait_until,
+            name=name, flavor=flavor,
+            image_id=image_id, **kwargs)
+
+        # TODO(jlanoux) Move wait_on_delete in compute.py
         if wait_on_delete:
             self.addCleanup(waiters.wait_for_server_termination,
-                            self.servers_client,
-                            server['id'])
+                            clients.servers_client,
+                            body['id'])
+
         self.addCleanup_with_wait(
             waiter_callable=waiters.wait_for_server_termination,
-            thing_id=server['id'], thing_id_param='server_id',
+            thing_id=body['id'], thing_id_param='server_id',
             cleanup_callable=self.delete_wrapper,
-            cleanup_args=[self.servers_client.delete_server, server['id']],
-            waiter_client=self.servers_client)
-        if wait_on_boot:
-            waiters.wait_for_server_status(self.servers_client,
-                                           server_id=server['id'],
-                                           status='ACTIVE')
-        # The instance retrieved on creation is missing network
-        # details, necessitating retrieval after it becomes active to
-        # ensure correct details.
-        server = self.servers_client.show_server(server['id'])['server']
-        self.assertEqual(server['name'], name)
+            cleanup_args=[clients.servers_client.delete_server, body['id']],
+            waiter_client=clients.servers_client)
+        server = clients.servers_client.show_server(body['id'])['server']
         return server
 
     def create_volume(self, size=None, name=None, snapshot_id=None,
                       imageRef=None, volume_type=None, wait_on_delete=True):
         if name is None:
             name = data_utils.rand_name(self.__class__.__name__)
-        volume = self.volumes_client.create_volume(
-            size=size, display_name=name, snapshot_id=snapshot_id,
-            imageRef=imageRef, volume_type=volume_type)['volume']
+        kwargs = {'display_name': name,
+                  'snapshot_id': snapshot_id,
+                  'imageRef': imageRef,
+                  'volume_type': volume_type}
+        if size is not None:
+            kwargs.update({'size': size})
+        volume = self.volumes_client.create_volume(**kwargs)['volume']
 
         if wait_on_delete:
             self.addCleanup(self.volumes_client.wait_for_resource_deletion,
@@ -236,8 +296,8 @@
         return volume
 
     def _create_loginable_secgroup_rule(self, secgroup_id=None):
-        _client = self.security_groups_client
-        _client_rules = self.security_group_rules_client
+        _client = self.compute_security_groups_client
+        _client_rules = self.compute_security_group_rules_client
         if secgroup_id is None:
             sgs = _client.list_security_groups()['security_groups']
             for sg in sgs:
@@ -268,9 +328,6 @@
         for ruleset in rulesets:
             sg_rule = _client_rules.create_security_group_rule(
                 parent_group_id=secgroup_id, **ruleset)['security_group_rule']
-            self.addCleanup(self.delete_wrapper,
-                            _client_rules.delete_security_group_rule,
-                            sg_rule['id'])
             rules.append(sg_rule)
         return rules
 
@@ -278,70 +335,55 @@
         # Create security group
         sg_name = data_utils.rand_name(self.__class__.__name__)
         sg_desc = sg_name + " description"
-        secgroup = self.security_groups_client.create_security_group(
+        secgroup = self.compute_security_groups_client.create_security_group(
             name=sg_name, description=sg_desc)['security_group']
         self.assertEqual(secgroup['name'], sg_name)
         self.assertEqual(secgroup['description'], sg_desc)
-        self.addCleanup(self.delete_wrapper,
-                        self.security_groups_client.delete_security_group,
-                        secgroup['id'])
+        self.addCleanup(
+            self.delete_wrapper,
+            self.compute_security_groups_client.delete_security_group,
+            secgroup['id'])
 
         # Add rules to the security group
         self._create_loginable_secgroup_rule(secgroup['id'])
 
         return secgroup
 
-    def get_remote_client(self, server_or_ip, username=None, private_key=None,
-                          log_console_of_servers=None):
+    def get_remote_client(self, ip_address, username=None, private_key=None):
         """Get a SSH client to a remote server
 
-        @param server_or_ip a server object as returned by Tempest compute
-            client or an IP address to connect to
+        @param ip_address the server floating or fixed IP address to use
+                          for ssh validation
         @param username name of the Linux account on the remote server
         @param private_key the SSH private key to use
-        @param log_console_of_servers a list of server objects. Each server
-            in the list will have its console printed in the logs in case the
-            SSH connection failed to be established
         @return a RemoteClient object
         """
-        if isinstance(server_or_ip, six.string_types):
-            ip = server_or_ip
-        else:
-            addrs = server_or_ip['addresses'][CONF.compute.network_for_ssh]
-            try:
-                ip = (addr['addr'] for addr in addrs if
-                      netaddr.valid_ipv4(addr['addr'])).next()
-            except StopIteration:
-                raise lib_exc.NotFound("No IPv4 addresses to use for SSH to "
-                                       "remote server.")
 
         if username is None:
-            username = CONF.scenario.ssh_user
+            username = CONF.validation.image_ssh_user
         # Set this with 'keypair' or others to log in with keypair or
         # username/password.
-        if CONF.compute.ssh_auth_method == 'keypair':
+        if CONF.validation.auth_method == 'keypair':
             password = None
             if private_key is None:
                 private_key = self.keypair['private_key']
         else:
-            password = CONF.compute.image_ssh_password
+            password = CONF.validation.image_ssh_password
             private_key = None
-        linux_client = remote_client.RemoteClient(ip, username,
+        linux_client = remote_client.RemoteClient(ip_address, username,
                                                   pkey=private_key,
                                                   password=password)
         try:
             linux_client.validate_authentication()
         except Exception as e:
             message = ('Initializing SSH connection to %(ip)s failed. '
-                       'Error: %(error)s' % {'ip': ip, 'error': e})
+                       'Error: %(error)s' % {'ip': ip_address,
+                                             'error': e})
             caller = misc_utils.find_test_caller()
             if caller:
                 message = '(%s) %s' % (caller, message)
             LOG.exception(message)
-            # If we don't explicitly set for which servers we want to
-            # log the console output then all the servers will be logged.
-            # See the definition of _log_console_output()
-            self._log_console_output(log_console_of_servers)
+            self._log_console_output()
             raise
 
         return linux_client
@@ -379,20 +421,22 @@
                   (img_path, img_container_format, img_disk_format,
                    img_properties, ami_img_path, ari_img_path, aki_img_path))
         try:
-            self.image = self._image_create('scenario-img',
-                                            img_container_format,
-                                            img_path,
-                                            disk_format=img_disk_format,
-                                            properties=img_properties)
+            image = self._image_create('scenario-img',
+                                       img_container_format,
+                                       img_path,
+                                       disk_format=img_disk_format,
+                                       properties=img_properties)
         except IOError:
             LOG.debug("A qcow2 image was not found. Try to get a uec image.")
             kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
             ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
             properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
-            self.image = self._image_create('scenario-ami', 'ami',
-                                            path=ami_img_path,
-                                            properties=properties)
-        LOG.debug("image:%s" % self.image)
+            image = self._image_create('scenario-ami', 'ami',
+                                       path=ami_img_path,
+                                       properties=properties)
+        LOG.debug("image:%s" % image)
+
+        return image
 
     def _log_console_output(self, servers=None):
         if not CONF.compute_feature_enabled.console_output:
@@ -403,7 +447,7 @@
             servers = servers['servers']
         for server in servers:
             console_output = self.servers_client.get_console_output(
-                server['id'], length=None)['output']
+                server['id'])['output']
             LOG.debug('Console output for %s\nbody=\n%s',
                       server['id'], console_output)
 
@@ -416,7 +460,7 @@
         # Glance client
         _image_client = self.image_client
         # Compute client
-        _images_client = self.images_client
+        _images_client = self.compute_images_client
         if name is None:
             name = data_utils.rand_name('scenario-snapshot')
         LOG.debug("Creating a snapshot image for server: %s", server['name'])
@@ -487,7 +531,7 @@
 
     def ping_ip_address(self, ip_address, should_succeed=True,
                         ping_timeout=None):
-        timeout = ping_timeout or CONF.compute.ping_timeout
+        timeout = ping_timeout or CONF.validation.ping_timeout
         cmd = ['ping', '-c1', '-w1', ip_address]
 
         def ping():
@@ -517,7 +561,8 @@
                               username=None,
                               private_key=None,
                               should_connect=True):
-        """
+        """Check server connectivity
+
         :param ip_address: server to test against
         :param username: server's ssh username
         :param private_key: server's ssh private key to be used
@@ -560,22 +605,20 @@
             raise
 
     def create_floating_ip(self, thing, pool_name=None):
-        """Creates a floating IP and associates to a server using
-        Nova clients
-        """
+        """Create a floating IP and associates to a server on Nova"""
 
-        floating_ip = (self.floating_ips_client.create_floating_ip(pool_name)
-                       ['floating_ip'])
+        floating_ip = (self.compute_floating_ips_client.
+                       create_floating_ip(pool=pool_name)['floating_ip'])
         self.addCleanup(self.delete_wrapper,
-                        self.floating_ips_client.delete_floating_ip,
+                        self.compute_floating_ips_client.delete_floating_ip,
                         floating_ip['id'])
-        self.floating_ips_client.associate_floating_ip_to_server(
+        self.compute_floating_ips_client.associate_floating_ip_to_server(
             floating_ip['ip'], thing['id'])
         return floating_ip
 
-    def create_timestamp(self, server_or_ip, dev_name=None, mount_path='/mnt',
+    def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
                          private_key=None):
-        ssh_client = self.get_remote_client(server_or_ip,
+        ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key)
         if dev_name is not None:
             ssh_client.make_fs(dev_name)
@@ -588,9 +631,9 @@
             ssh_client.umount(mount_path)
         return timestamp
 
-    def get_timestamp(self, server_or_ip, dev_name=None, mount_path='/mnt',
+    def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
                       private_key=None):
-        ssh_client = self.get_remote_client(server_or_ip,
+        ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key)
         if dev_name is not None:
             ssh_client.mount(dev_name, mount_path)
@@ -600,9 +643,30 @@
             ssh_client.umount(mount_path)
         return timestamp
 
+    def get_server_ip(self, server):
+        """Get the server fixed or floating IP.
+
+        Based on the configuration we're in, return a correct ip
+        address for validating that a guest is up.
+        """
+        if CONF.validation.connect_method == 'floating':
+            # The tests calling this method don't have a floating IP
+            # and can't make use of the validattion resources. So the
+            # method is creating the floating IP there.
+            return self.create_floating_ip(server)['ip']
+        elif CONF.validation.connect_method == 'fixed':
+            addresses = server['addresses'][CONF.validation.network_for_ssh]
+            for address in addresses:
+                if address['version'] == CONF.validation.ip_version_for_ssh:
+                    return address['addr']
+            raise exceptions.ServerUnreachable()
+        else:
+            raise exceptions.InvalidConfiguration()
+
 
 class NetworkScenarioTest(ScenarioTest):
     """Base class for network scenario tests.
+
     This class provide helpers for network scenario tests, using the neutron
     API. Helpers from ancestor which use the nova network API are overridden
     with the neutron API.
@@ -661,21 +725,21 @@
 
     def _list_ports(self, *args, **kwargs):
         """List ports using admin creds """
-        ports_list = self.admin_manager.network_client.list_ports(
+        ports_list = self.admin_manager.ports_client.list_ports(
             *args, **kwargs)
         return ports_list['ports']
 
     def _list_agents(self, *args, **kwargs):
         """List agents using admin creds """
-        agents_list = self.admin_manager.network_client.list_agents(
+        agents_list = self.admin_manager.network_agents_client.list_agents(
             *args, **kwargs)
         return agents_list['agents']
 
     def _create_subnet(self, network, client=None, subnets_client=None,
                        namestart='subnet-smoke', **kwargs):
-        """
-        Create a subnet for the given network within the cidr block
-        configured for tenant networks.
+        """Create a subnet for the given network
+
+        within the cidr block configured for tenant networks.
         """
         if not client:
             client = self.network_client
@@ -683,9 +747,10 @@
             subnets_client = self.subnets_client
 
         def cidr_in_use(cidr, tenant_id):
-            """
-            :return True if subnet with cidr already exist in tenant
-                False else
+            """Check cidr existence
+
+            :returns: True if subnet with cidr already exist in tenant
+                  False else
             """
             cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
             return len(cidr_in_use) != 0
@@ -735,14 +800,14 @@
     def _create_port(self, network_id, client=None, namestart='port-quotatest',
                      **kwargs):
         if not client:
-            client = self.network_client
+            client = self.ports_client
         name = data_utils.rand_name(namestart)
         result = client.create_port(
             name=name,
             network_id=network_id,
             **kwargs)
         self.assertIsNotNone(result, 'Unable to allocate port')
-        port = net_resources.DeletablePort(client=client,
+        port = net_resources.DeletablePort(ports_client=client,
                                            **result['port'])
         self.addCleanup(self.delete_wrapper, port.delete)
         return port
@@ -750,13 +815,16 @@
     def _get_server_port_id_and_ip4(self, server, ip_addr=None):
         ports = self._list_ports(device_id=server['id'], status='ACTIVE',
                                  fixed_ip=ip_addr)
-        # it might happen here that this port has more then one ip address
-        # as in case of dual stack- when this port is created on 2 subnets
+        # A port can have more then one IP address in some cases.
+        # If the network is dual-stack (IPv4 + IPv6), this port is associated
+        # with 2 subnets
         port_map = [(p["id"], fxip["ip_address"])
                     for p in ports
                     for fxip in p["fixed_ips"]
                     if netaddr.valid_ipv4(fxip["ip_address"])]
 
+        self.assertNotEqual(0, len(port_map),
+                            "No IPv4 addresses found in: %s" % ports)
         self.assertEqual(len(port_map), 1,
                          "Found multiple IPv4 addresses: %s. "
                          "Unable to determine which port to target."
@@ -771,13 +839,11 @@
 
     def create_floating_ip(self, thing, external_network_id=None,
                            port_id=None, client=None):
-        """Creates a floating IP and associates to a resource/port using
-        Neutron client
-        """
+        """Create a floating IP and associates to a resource/port on Neutron"""
         if not external_network_id:
             external_network_id = CONF.network.public_network_id
         if not client:
-            client = self.network_client
+            client = self.floating_ips_client
         if not port_id:
             port_id, ip4 = self._get_server_port_id_and_ip4(thing)
         else:
@@ -801,9 +867,7 @@
         return floating_ip
 
     def _disassociate_floating_ip(self, floating_ip):
-        """
-        :param floating_ip: type DeletableFloatingIp
-        """
+        """:param floating_ip: type DeletableFloatingIp"""
         floating_ip.update(port_id=None)
         self.assertIsNone(floating_ip.port_id)
         return floating_ip
@@ -855,42 +919,49 @@
             self._log_net_info(e)
             raise
 
-    def _check_remote_connectivity(self, source, dest, should_succeed=True):
-        """
-        check ping server via source ssh connection
+    def _check_remote_connectivity(self, source, dest, should_succeed=True,
+                                   nic=None):
+        """check ping server via source ssh connection
 
         :param source: RemoteClient: an ssh connection from which to ping
         :param dest: and IP to ping against
         :param should_succeed: boolean should ping succeed or not
+        :param nic: specific network interface to ping from
         :returns: boolean -- should_succeed == ping
         :returns: ping is false if ping failed
         """
         def ping_remote():
             try:
-                source.ping_host(dest)
+                source.ping_host(dest, nic=nic)
             except lib_exc.SSHExecCommandFailed:
-                LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
-                         % (dest, source.ssh_client.host))
+                LOG.warning('Failed to ping IP: %s via a ssh connection '
+                            'from: %s.' % (dest, source.ssh_client.host))
                 return not should_succeed
             return should_succeed
 
         return tempest.test.call_until_true(ping_remote,
-                                            CONF.compute.ping_timeout,
+                                            CONF.validation.ping_timeout,
                                             1)
 
-    def _create_security_group(self, client=None, tenant_id=None,
-                               namestart='secgroup-smoke'):
-        if client is None:
-            client = self.network_client
+    def _create_security_group(self, security_group_rules_client=None,
+                               tenant_id=None,
+                               namestart='secgroup-smoke',
+                               security_groups_client=None):
+        if security_group_rules_client is None:
+            security_group_rules_client = self.security_group_rules_client
+        if security_groups_client is None:
+            security_groups_client = self.security_groups_client
         if tenant_id is None:
-            tenant_id = client.tenant_id
-        secgroup = self._create_empty_security_group(namestart=namestart,
-                                                     client=client,
-                                                     tenant_id=tenant_id)
+            tenant_id = security_groups_client.tenant_id
+        secgroup = self._create_empty_security_group(
+            namestart=namestart, client=security_groups_client,
+            tenant_id=tenant_id)
 
         # Add rules to the security group
-        rules = self._create_loginable_secgroup_rule(client=client,
-                                                     secgroup=secgroup)
+        rules = self._create_loginable_secgroup_rule(
+            security_group_rules_client=security_group_rules_client,
+            secgroup=secgroup,
+            security_groups_client=security_groups_client)
         for rule in rules:
             self.assertEqual(tenant_id, rule.tenant_id)
             self.assertEqual(secgroup.id, rule.security_group_id)
@@ -908,7 +979,7 @@
         :returns: DeletableSecurityGroup -- containing the secgroup created
         """
         if client is None:
-            client = self.network_client
+            client = self.security_groups_client
         if not tenant_id:
             tenant_id = client.tenant_id
         sg_name = data_utils.rand_name(namestart)
@@ -933,7 +1004,7 @@
         :returns: DeletableSecurityGroup -- default secgroup for given tenant
         """
         if client is None:
-            client = self.network_client
+            client = self.security_groups_client
         if not tenant_id:
             tenant_id = client.tenant_id
         sgs = [
@@ -945,8 +1016,10 @@
         return net_resources.DeletableSecurityGroup(client=client,
                                                     **sgs[0])
 
-    def _create_security_group_rule(self, secgroup=None, client=None,
-                                    tenant_id=None, **kwargs):
+    def _create_security_group_rule(self, secgroup=None,
+                                    sec_group_rules_client=None,
+                                    tenant_id=None,
+                                    security_groups_client=None, **kwargs):
         """Create a rule from a dictionary of rule parameters.
 
         Create a rule in a secgroup. if secgroup not defined will search for
@@ -964,38 +1037,45 @@
                     port_range_max: 22
                     }
         """
-        if client is None:
-            client = self.network_client
+        if sec_group_rules_client is None:
+            sec_group_rules_client = self.security_group_rules_client
+        if security_groups_client is None:
+            security_groups_client = self.security_groups_client
         if not tenant_id:
-            tenant_id = client.tenant_id
+            tenant_id = security_groups_client.tenant_id
         if secgroup is None:
-            secgroup = self._default_security_group(client=client,
-                                                    tenant_id=tenant_id)
+            secgroup = self._default_security_group(
+                client=security_groups_client, tenant_id=tenant_id)
 
         ruleset = dict(security_group_id=secgroup.id,
                        tenant_id=secgroup.tenant_id)
         ruleset.update(kwargs)
 
-        sg_rule = client.create_security_group_rule(**ruleset)
+        sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
         sg_rule = net_resources.DeletableSecurityGroupRule(
-            client=client,
+            client=sec_group_rules_client,
             **sg_rule['security_group_rule']
         )
-        self.addCleanup(self.delete_wrapper, sg_rule.delete)
         self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
         self.assertEqual(secgroup.id, sg_rule.security_group_id)
 
         return sg_rule
 
-    def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
-        """These rules are intended to permit inbound ssh and icmp
+    def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
+                                        secgroup=None,
+                                        security_groups_client=None):
+        """Create loginable security group rule
+
+        These rules are intended to permit inbound ssh and icmp
         traffic from all sources, so no group_id is provided.
         Setting a group_id would only permit traffic from ports
         belonging to the same security group.
         """
 
-        if client is None:
-            client = self.network_client
+        if security_group_rules_client is None:
+            security_group_rules_client = self.security_group_rules_client
+        if security_groups_client is None:
+            security_groups_client = self.security_groups_client
         rules = []
         rulesets = [
             dict(
@@ -1014,12 +1094,16 @@
                 ethertype='IPv6',
             )
         ]
+        sec_group_rules_client = security_group_rules_client
         for ruleset in rulesets:
             for r_direction in ['ingress', 'egress']:
                 ruleset['direction'] = r_direction
                 try:
                     sg_rule = self._create_security_group_rule(
-                        client=client, secgroup=secgroup, **ruleset)
+                        sec_group_rules_client=sec_group_rules_client,
+                        secgroup=secgroup,
+                        security_groups_client=security_groups_client,
+                        **ruleset)
                 except lib_exc.Conflict as ex:
                     # if rule already exist - skip rule and continue
                     msg = 'Security group rule already exists'
@@ -1031,12 +1115,6 @@
 
         return rules
 
-    def _ssh_to_server(self, server, private_key):
-        ssh_login = CONF.compute.image_ssh_user
-        return self.get_remote_client(server,
-                                      username=ssh_login,
-                                      private_key=private_key)
-
     def _get_router(self, client=None, tenant_id=None):
         """Retrieve a router for the given tenant id.
 
@@ -1124,69 +1202,6 @@
             subnet.add_to_router(router.id)
         return network, subnet, router
 
-    def create_server(self, name=None, image=None, flavor=None,
-                      wait_on_boot=True, wait_on_delete=True,
-                      network_client=None, networks_client=None,
-                      create_kwargs=None):
-        if network_client is None:
-            network_client = self.network_client
-        if networks_client is None:
-            networks_client = self.networks_client
-
-        vnic_type = CONF.network.port_vnic_type
-
-        # If vnic_type is configured create port for
-        # every network
-        if vnic_type:
-            ports = []
-            networks = []
-            create_port_body = {'binding:vnic_type': vnic_type,
-                                'namestart': 'port-smoke'}
-            if create_kwargs:
-                # Convert security group names to security group ids
-                # to pass to create_port
-                if create_kwargs.get('security_groups'):
-                    security_groups = network_client.list_security_groups(
-                        ).get('security_groups')
-                    sec_dict = dict([(s['name'], s['id'])
-                                    for s in security_groups])
-
-                    sec_groups_names = [s['name'] for s in create_kwargs.get(
-                        'security_groups')]
-                    security_groups_ids = [sec_dict[s]
-                                           for s in sec_groups_names]
-
-                    if security_groups_ids:
-                        create_port_body[
-                            'security_groups'] = security_groups_ids
-                networks = create_kwargs.get('networks')
-
-            # If there are no networks passed to us we look up
-            # for the tenant's private networks and create a port
-            # if there is only one private network. The same behaviour
-            # as we would expect when passing the call to the clients
-            # with no networks
-            if not networks:
-                networks = networks_client.list_networks(filters={
-                    'router:external': False})
-                self.assertEqual(1, len(networks),
-                                 "There is more than one"
-                                 " network for the tenant")
-            for net in networks:
-                net_id = net['uuid']
-                port = self._create_port(network_id=net_id,
-                                         client=network_client,
-                                         **create_port_body)
-                ports.append({'port': port.id})
-            if ports:
-                create_kwargs['networks'] = ports
-            self.ports = ports
-
-        return super(NetworkScenarioTest, self).create_server(
-            name=name, image=image, flavor=flavor,
-            wait_on_boot=wait_on_boot, wait_on_delete=wait_on_delete,
-            create_kwargs=create_kwargs)
-
 
 # power/provision states as of icehouse
 class BaremetalPowerStates(object):
@@ -1301,19 +1316,9 @@
     def add_keypair(self):
         self.keypair = self.create_keypair()
 
-    def verify_connectivity(self, ip=None):
-        if ip:
-            dest = self.get_remote_client(ip)
-        else:
-            dest = self.get_remote_client(self.instance)
-        dest.validate_authentication()
-
     def boot_instance(self):
-        create_kwargs = {
-            'key_name': self.keypair['name']
-        }
         self.instance = self.create_server(
-            wait_on_boot=False, create_kwargs=create_kwargs)
+            key_name=self.keypair['name'])
 
         self.wait_node(self.instance['id'])
         self.node = self.get_node(instance_id=self.instance['id'])
@@ -1347,9 +1352,7 @@
 
 
 class EncryptionScenarioTest(ScenarioTest):
-    """
-    Base class for encryption scenario tests
-    """
+    """Base class for encryption scenario tests"""
 
     credentials = ['primary', 'admin']
 
@@ -1361,16 +1364,6 @@
         else:
             cls.admin_volume_types_client = cls.os_adm.volume_types_v2_client
 
-    def _wait_for_volume_status(self, status):
-        self.status_timeout(
-            self.volume_client.volumes, self.volume.id, status)
-
-    def nova_boot(self):
-        self.keypair = self.create_keypair()
-        create_kwargs = {'key_name': self.keypair['name']}
-        self.server = self.create_server(image=self.image,
-                                         create_kwargs=create_kwargs)
-
     def create_volume_type(self, client=None, name=None):
         if not client:
             client = self.admin_volume_types_client
@@ -1379,7 +1372,7 @@
         randomized_name = data_utils.rand_name('scenario-type-' + name)
         LOG.debug("Creating a volume type: %s", randomized_name)
         body = client.create_volume_type(
-            randomized_name)['volume_type']
+            name=randomized_name)['volume_type']
         self.assertIn('id', body)
         self.addCleanup(client.delete_volume_type, body['id'])
         return body
@@ -1399,8 +1392,7 @@
 
 
 class ObjectStorageScenarioTest(ScenarioTest):
-    """
-    Provide harness to do Object Storage scenario tests.
+    """Provide harness to do Object Storage scenario tests.
 
     Subclasses implement the tests that use the methods provided by this
     class.
@@ -1468,10 +1460,8 @@
     def list_and_check_container_objects(self, container_name,
                                          present_obj=None,
                                          not_present_obj=None):
-        """
-        List objects for a given container and assert which are present and
-        which are not.
-        """
+        # List objects for a given container and assert which are present and
+        # which are not.
         if present_obj is None:
             present_obj = []
         if not_present_obj is None:
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 22d2603..cace90b 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -13,20 +13,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest.common import tempest_fixtures as fixtures
 from tempest.common.utils import data_utils
 from tempest.scenario import manager
 from tempest import test
 
 
-LOG = logging.getLogger(__name__)
-
-
 class TestAggregatesBasicOps(manager.ScenarioTest):
-    """
-    Creates an aggregate within an availability zone
+    """Creates an aggregate within an availability zone
+
     Adds a host to the aggregate
     Checks aggregate details
     Updates aggregate's name
@@ -48,16 +43,14 @@
     def _create_aggregate(self, **kwargs):
         aggregate = (self.aggregates_client.create_aggregate(**kwargs)
                      ['aggregate'])
-        self.addCleanup(self._delete_aggregate, aggregate)
+        self.addCleanup(self.aggregates_client.delete_aggregate,
+                        aggregate['id'])
         aggregate_name = kwargs['name']
         availability_zone = kwargs['availability_zone']
         self.assertEqual(aggregate['name'], aggregate_name)
         self.assertEqual(aggregate['availability_zone'], availability_zone)
         return aggregate
 
-    def _delete_aggregate(self, aggregate):
-        self.aggregates_client.delete_aggregate(aggregate['id'])
-
     def _get_host_name(self):
         hosts = self.hosts_client.list_hosts()['hosts']
         self.assertTrue(len(hosts) >= 1)
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index c0b5a44..15d9b66 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -26,9 +26,9 @@
 
 
 class BaremetalBasicOps(manager.BaremetalScenarioTest):
-    """
-    This smoke test tests the pxe_ssh Ironic driver.  It follows this basic
-    set of operations:
+    """This smoke test tests the pxe_ssh Ironic driver.
+
+    It follows this basic set of operations:
         * Creates a keypair
         * Boots an instance using the keypair
         * Monitors the associated Ironic node for power and
@@ -63,15 +63,6 @@
             server_id=self.instance['id'],
             status='ACTIVE')
 
-    def create_remote_file(self, client, filename):
-        """Create a file on the remote client connection.
-
-        After creating the file, force a filesystem sync. Otherwise,
-        if we issue a rebuild too quickly, the file may not exist.
-        """
-        client.exec_command('sudo touch ' + filename)
-        client.exec_command('sync')
-
     def verify_partition(self, client, label, mount, gib_size):
         """Verify a labeled partition's mount point and size."""
         LOG.info("Looking for partition %s mounted on %s" % (label, mount))
@@ -107,17 +98,10 @@
             return None
         return int(ephemeral)
 
-    def add_floating_ip(self):
-        floating_ip = (self.floating_ips_client.create_floating_ip()
-                       ['floating_ip'])
-        self.floating_ips_client.associate_floating_ip_to_server(
-            floating_ip['ip'], self.instance['id'])
-        return floating_ip['ip']
-
     def validate_ports(self):
         for port in self.get_ports(self.node['uuid']):
             n_port_id = port['extra']['vif_port_id']
-            body = self.network_client.show_port(n_port_id)
+            body = self.ports_client.show_port(n_port_id)
             n_port = body['port']
             self.assertEqual(n_port['device_id'], self.instance['id'])
             self.assertEqual(n_port['mac_address'], port['address'])
@@ -125,37 +109,20 @@
     @test.idempotent_id('549173a5-38ec-42bb-b0e2-c8b9f4a08943')
     @test.services('baremetal', 'compute', 'image', 'network')
     def test_baremetal_server_ops(self):
-        test_filename = '/mnt/rebuild_test.txt'
         self.add_keypair()
         self.boot_instance()
         self.validate_ports()
-        self.verify_connectivity()
-        if CONF.compute.ssh_connect_method == 'floating':
-            floating_ip = self.add_floating_ip()
-            self.verify_connectivity(ip=floating_ip)
-
-        vm_client = self.get_remote_client(self.instance)
+        ip_address = self.get_server_ip(self.instance)
+        self.get_remote_client(ip_address).validate_authentication()
+        vm_client = self.get_remote_client(ip_address)
 
         # We expect the ephemeral partition to be mounted on /mnt and to have
         # the same size as our flavor definition.
         eph_size = self.get_flavor_ephemeral_size()
-        if eph_size > 0:
-            preserve_ephemeral = True
-
+        if eph_size:
             self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
             # Create the test file
-            self.create_remote_file(vm_client, test_filename)
-        else:
-            preserve_ephemeral = False
-
-        # Rebuild and preserve the ephemeral partition if it exists
-        self.rebuild_instance(preserve_ephemeral)
-        self.verify_connectivity()
-
-        # Check that we maintained our data
-        if eph_size > 0:
-            vm_client = self.get_remote_client(self.instance)
-            self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
-            vm_client.exec_command('ls ' + test_filename)
+            self.create_timestamp(
+                ip_address, private_key=self.keypair['private_key'])
 
         self.terminate_instance()
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index f6d9f88..5d4f7b3 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -58,7 +58,8 @@
 
 class TestDashboardBasicOps(manager.ScenarioTest):
 
-    """
+    """The test suite for dashboard basic operations
+
     This is a basic scenario test:
     * checks that the login page is available
     * logs in as a regular user
@@ -96,9 +97,13 @@
         req = request.Request(login_url)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('Referer', CONF.dashboard.dashboard_url)
+
+        # Pass the default domain name regardless of the auth version in order
+        # to test the scenario of when horizon is running with keystone v3
         params = {'username': username,
                   'password': password,
                   'region': parser.region,
+                  'domain': CONF.auth.default_credentials_domain_name,
                   'csrfmiddlewaretoken': parser.csrf_token}
         self.opener.open(req, parse.urlencode(params))
 
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 3f0123d..dcd77ad 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -22,7 +22,8 @@
 
 class TestEncryptedCinderVolumes(manager.EncryptionScenarioTest):
 
-    """
+    """The test suite for encrypted cinder volumes
+
     This test is for verifying the functionality of encrypted cinder volumes.
 
     For both LUKS and cryptsetup encryption types, this test performs
@@ -41,8 +42,12 @@
             raise cls.skipException('Encrypted volume attach is not supported')
 
     def launch_instance(self):
-        self.glance_image_create()
-        self.nova_boot()
+        image = self.glance_image_create()
+        keypair = self.create_keypair()
+
+        return self.create_server(image_id=image,
+                                  key_name=keypair['name'],
+                                  wait_until='ACTIVE')
 
     def create_encrypted_volume(self, encryption_provider, volume_type):
         volume_type = self.create_volume_type(name=volume_type)
@@ -51,26 +56,26 @@
                                     key_size=512,
                                     cipher='aes-xts-plain64',
                                     control_location='front-end')
-        self.volume = self.create_volume(volume_type=volume_type['name'])
+        return self.create_volume(volume_type=volume_type['name'])
 
-    def attach_detach_volume(self):
-        self.volume = self.nova_volume_attach(self.server, self.volume)
-        self.nova_volume_detach(self.server, self.volume)
+    def attach_detach_volume(self, server, volume):
+        attached_volume = self.nova_volume_attach(server, volume)
+        self.nova_volume_detach(server, attached_volume)
 
     @test.idempotent_id('79165fb4-5534-4b9d-8429-97ccffb8f86e')
     @test.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_luks(self):
-        self.launch_instance()
-        self.create_encrypted_volume('nova.volume.encryptors.'
-                                     'luks.LuksEncryptor',
-                                     volume_type='luks')
-        self.attach_detach_volume()
+        server = self.launch_instance()
+        volume = self.create_encrypted_volume('nova.volume.encryptors.'
+                                              'luks.LuksEncryptor',
+                                              volume_type='luks')
+        self.attach_detach_volume(server, volume)
 
     @test.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
     @test.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_cryptsetup(self):
-        self.launch_instance()
-        self.create_encrypted_volume('nova.volume.encryptors.'
-                                     'cryptsetup.CryptsetupEncryptor',
-                                     volume_type='cryptsetup')
-        self.attach_detach_volume()
+        server = self.launch_instance()
+        volume = self.create_encrypted_volume('nova.volume.encryptors.'
+                                              'cryptsetup.CryptsetupEncryptor',
+                                              volume_type='cryptsetup')
+        self.attach_detach_volume(server, volume)
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 63dd4f0..402077f 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from tempest_lib import exceptions as lib_exc
 
 from tempest.common import fixed_network
@@ -26,13 +25,9 @@
 CONF = config.CONF
 
 
-LOG = logging.getLogger(__name__)
-
-
 class TestLargeOpsScenario(manager.ScenarioTest):
 
-    """
-    Test large operations.
+    """Test large operations.
 
     This test below:
     * Spin up multiple instances in one nova call, and repeat three times
@@ -80,17 +75,18 @@
             waiters.wait_for_server_status(self.servers_client,
                                            server['id'], status)
 
-    def nova_boot(self):
+    def nova_boot(self, image):
         name = data_utils.rand_name('scenario-server')
         flavor_id = CONF.compute.flavor_ref
         # Explicitly create secgroup to avoid cleanup at the end of testcases.
         # Since no traffic is tested, we don't need to actually add rules to
         # secgroup
-        secgroup = self.security_groups_client.create_security_group(
+        secgroup = self.compute_security_groups_client.create_security_group(
             name='secgroup-%s' % name,
             description='secgroup-desc-%s' % name)['security_group']
-        self.addCleanupClass(self.security_groups_client.delete_security_group,
-                             secgroup['id'])
+        self.addCleanupClass(
+            self.compute_security_groups_client.delete_security_group,
+            secgroup['id'])
         create_kwargs = {
             'min_count': CONF.scenario.large_ops_number,
             'security_groups': [{'name': secgroup['name']}]
@@ -100,7 +96,7 @@
                                                          create_kwargs)
         self.servers_client.create_server(
             name=name,
-            imageRef=self.image,
+            imageRef=image,
             flavorRef=flavor_id,
             **create_kwargs)
         # needed because of bug 1199788
@@ -119,8 +115,8 @@
         self._wait_for_server_status('ACTIVE')
 
     def _large_ops_scenario(self):
-        self.glance_image_create()
-        self.nova_boot()
+        image = self.glance_image_create()
+        self.nova_boot(image)
 
     @test.idempotent_id('14ba0e78-2ed9-4d17-9659-a48f4756ecb3')
     @test.services('compute', 'image')
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 22aa06c..f7c7434 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest.common import custom_matchers
 from tempest.common import waiters
 from tempest import config
@@ -24,13 +22,10 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class TestMinimumBasicScenario(manager.ScenarioTest):
 
-    """
-    This is a basic minimum scenario test.
+    """This is a basic minimum scenario test.
 
     This test below:
     * across the multiple components
@@ -38,20 +33,20 @@
     * with and without optional parameters
     * check command outputs
 
+    Steps:
+    1. Create image
+    2. Create keypair
+    3. Boot instance with keypair and get list of instances
+    4. Create volume and show list of volumes
+    5. Attach volume to instance and getlist of volumes
+    6. Add IP to instance
+    7. Create and add security group to instance
+    8. Check SSH connection to instance
+    9. Reboot instance
+    10. Check SSH connection to instance after reboot
+
     """
 
-    def _wait_for_server_status(self, server, status):
-        server_id = server['id']
-        # Raise on error defaults to True, which is consistent with the
-        # original function from scenario tests here
-        waiters.wait_for_server_status(self.servers_client,
-                                       server_id, status)
-
-    def nova_boot(self, keypair):
-        create_kwargs = {'key_name': keypair['name']}
-        return self.create_server(image=self.image,
-                                  create_kwargs=create_kwargs)
-
     def nova_list(self):
         servers = self.servers_client.list_servers()
         # The list servers in the compute client is inconsistent...
@@ -67,19 +62,14 @@
             server, custom_matchers.MatchesDictExceptForKeys(
                 got_server, excluded_keys=excluded_keys))
 
-    def cinder_create(self):
-        return self.create_volume()
-
-    def cinder_list(self):
-        return self.volumes_client.list_volumes()['volumes']
-
     def cinder_show(self, volume):
         got_volume = self.volumes_client.show_volume(volume['id'])['volume']
         self.assertEqual(volume, got_volume)
 
     def nova_reboot(self, server):
-        self.servers_client.reboot_server(server['id'], 'SOFT')
-        self._wait_for_server_status(server, 'ACTIVE')
+        self.servers_client.reboot_server(server['id'], type='SOFT')
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
 
     def check_partitions(self):
         # NOTE(andreaf) The device name may be different on different guest OS
@@ -89,9 +79,9 @@
     def create_and_add_security_group_to_server(self, server):
         secgroup = self._create_security_group()
         self.servers_client.add_security_group(server['id'],
-                                               secgroup['name'])
+                                               name=secgroup['name'])
         self.addCleanup(self.servers_client.remove_security_group,
-                        server['id'], secgroup['name'])
+                        server['id'], name=secgroup['name'])
 
         def wait_for_secgroup_add():
             body = (self.servers_client.show_server(server['id'])
@@ -108,18 +98,19 @@
     @test.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
     @test.services('compute', 'volume', 'image', 'network')
     def test_minimum_basic_scenario(self):
-        self.glance_image_create()
-
+        image = self.glance_image_create()
         keypair = self.create_keypair()
 
-        server = self.nova_boot(keypair)
+        server = self.create_server(image_id=image,
+                                    key_name=keypair['name'],
+                                    wait_until='ACTIVE')
         servers = self.nova_list()
         self.assertIn(server['id'], [x['id'] for x in servers])
 
         self.nova_show(server)
 
-        volume = self.cinder_create()
-        volumes = self.cinder_list()
+        volume = self.create_volume()
+        volumes = self.volumes_client.list_volumes()['volumes']
         self.assertIn(volume['id'], [x['id'] for x in volumes])
 
         self.cinder_show(volume)
@@ -131,10 +122,15 @@
         floating_ip = self.create_floating_ip(server)
         self.create_and_add_security_group_to_server(server)
 
+        # check that we can SSH to the server before reboot
         self.linux_client = self.get_remote_client(
             floating_ip['ip'], private_key=keypair['private_key'])
+
         self.nova_reboot(server)
 
+        # check that we can SSH to the server after reboot
+        # (both connections are part of the scenario)
         self.linux_client = self.get_remote_client(
             floating_ip['ip'], private_key=keypair['private_key'])
+
         self.check_partitions()
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 704342f..2cbe6dc 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 import testtools
 
 from tempest.common.utils import data_utils
@@ -23,14 +22,10 @@
 from tempest import test
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
-
-    """
-    This test case checks VM connectivity after some advanced
-    instance operations executed:
+    """Check VM connectivity after some advanced instance operations executed:
 
      * Stop/Start an instance
      * Reboot an instance
@@ -60,16 +55,13 @@
         security_group = self._create_security_group()
         network, subnet, router = self.create_networks()
         public_network_id = CONF.network.public_network_id
-        create_kwargs = {
-            'networks': [
-                {'uuid': network.id},
-            ],
-            'key_name': keypair['name'],
-            'security_groups': [{'name': security_group['name']}],
-        }
         server_name = data_utils.rand_name('server-smoke')
-        server = self.create_server(name=server_name,
-                                    create_kwargs=create_kwargs)
+        server = self.create_server(
+            name=server_name,
+            networks=[{'uuid': network.id}],
+            key_name=keypair['name'],
+            security_groups=[{'name': security_group['name']}],
+            wait_until='ACTIVE')
         floating_ip = self.create_floating_ip(server, public_network_id)
         # Verify that we can indeed connect to the server before we mess with
         # it's state
@@ -80,7 +72,7 @@
 
     def _check_network_connectivity(self, server, keypair, floating_ip,
                                     should_connect=True):
-        username = CONF.compute.image_ssh_user
+        username = CONF.validation.image_ssh_user
         private_key = keypair['private_key']
         self._check_tenant_network_connectivity(
             server, username, private_key,
@@ -118,7 +110,7 @@
     @test.services('compute', 'network')
     def test_server_connectivity_reboot(self):
         server, keypair, floating_ip = self._setup_network_and_servers()
-        self.servers_client.reboot_server(server['id'], reboot_type='SOFT')
+        self.servers_client.reboot_server(server['id'], type='SOFT')
         self._wait_server_status_and_check_network_connectivity(
             server, keypair, floating_ip)
 
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 31ccd5b..79a5099 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -36,7 +36,8 @@
 
 class TestNetworkBasicOps(manager.NetworkScenarioTest):
 
-    """
+    """The test suite of network basic operations
+
     This smoke test suite assumes that Nova has been configured to
     boot VM's with Neutron-managed networking, and attempts to
     verify network connectivity as follows:
@@ -123,9 +124,9 @@
         self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
 
     def check_networks(self):
-        """
-        Checks that we see the newly created network/subnet/router via
-        checking the result of list_[networks,routers,subnets]
+        """Checks that we see the newly created network/subnet/router
+
+        via checking the result of list_[networks,routers,subnets]
         """
 
         seen_nets = self._list_networks()
@@ -154,16 +155,16 @@
         keypair = self.create_keypair()
         self.keypairs[keypair['name']] = keypair
         security_groups = [{'name': self.security_group['name']}]
-        create_kwargs = {
-            'networks': [
-                {'uuid': network.id},
-            ],
-            'key_name': keypair['name'],
-            'security_groups': security_groups,
-        }
+        network = {'uuid': network.id}
         if port_id is not None:
-            create_kwargs['networks'][0]['port'] = port_id
-        server = self.create_server(name=name, create_kwargs=create_kwargs)
+            network['port'] = port_id
+
+        server = self.create_server(
+            name=name,
+            networks=[network],
+            key_name=keypair['name'],
+            security_groups=security_groups,
+            wait_until='ACTIVE')
         self.servers.append(server)
         return server
 
@@ -171,7 +172,7 @@
         return self.keypairs[server['key_name']]['private_key']
 
     def _check_tenant_network_connectivity(self):
-        ssh_login = CONF.compute.image_ssh_user
+        ssh_login = CONF.validation.image_ssh_user
         for server in self.servers:
             # call the common method in the parent class
             super(TestNetworkBasicOps, self).\
@@ -182,7 +183,8 @@
     def check_public_network_connectivity(
             self, should_connect=True, msg=None,
             should_check_floating_ip_status=True):
-        """Verifies connectivty to a VM via public network and floating IP,
+        """Verifies connectivty to a VM via public network and floating IP
+
         and verifies floating IP has resource status is correct.
 
         :param should_connect: bool. determines if connectivity check is
@@ -193,7 +195,7 @@
         :param should_check_floating_ip_status: bool. should status of
         floating_ip be checked or not
         """
-        ssh_login = CONF.compute.image_ssh_user
+        ssh_login = CONF.validation.image_ssh_user
         floating_ip, server = self.floating_ip_tuple
         ip_address = floating_ip.floating_ip_address
         private_key = None
@@ -238,8 +240,8 @@
         old_floating_ip, server = self.floating_ip_tuple
         ip_address = old_floating_ip.floating_ip_address
         private_key = self._get_server_key(server)
-        ssh_client = self.get_remote_client(ip_address,
-                                            private_key=private_key)
+        ssh_client = self.get_remote_client(
+            ip_address, private_key=private_key)
         old_nic_list = self._get_server_nics(ssh_client)
         # get a port from a list of one item
         port_list = self._list_ports(device_id=server['id'])
@@ -250,7 +252,7 @@
             net_id=self.new_net.id)['interfaceAttachment']
         self.addCleanup(self.network_client.wait_for_resource_deletion,
                         'port',
-                        interface['port_id'])
+                        interface['port_id'], client=self.ports_client)
         self.addCleanup(self.delete_wrapper,
                         self.interface_client.delete_interface,
                         server['id'], interface['port_id'])
@@ -268,7 +270,7 @@
                 "Old port: %s. Number of new ports: %d" % (
                     CONF.network.build_timeout, old_port,
                     len(self.new_port_list)))
-        new_port = net_resources.DeletablePort(client=self.network_client,
+        new_port = net_resources.DeletablePort(ports_client=self.ports_client,
                                                **self.new_port_list[0])
 
         def check_new_nic():
@@ -285,7 +287,7 @@
         num, new_nic = self.diff_list[0]
         ssh_client.assign_static_ip(nic=new_nic,
                                     addr=new_port.fixed_ips[0]['ip_address'])
-        ssh_client.turn_nic_on(nic=new_nic)
+        ssh_client.set_nic_state(nic=new_nic)
 
     def _get_server_nics(self, ssh_client):
         reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+):')
@@ -294,8 +296,8 @@
 
     def _check_network_internal_connectivity(self, network,
                                              should_connect=True):
-        """
-        via ssh check VM internal connectivity:
+        """via ssh check VM internal connectivity:
+
         - ping internal gateway and DHCP port, implying in-tenant connectivity
         pinging both, because L3 and DHCP agents might be on different nodes
         """
@@ -312,10 +314,7 @@
                                         should_connect)
 
     def _check_network_external_connectivity(self):
-        """
-        ping public network default gateway to imply external connectivity
-
-        """
+        """ping default gateway to imply external connectivity"""
         if not CONF.network.public_network_id:
             msg = 'public network not defined.'
             LOG.info(msg)
@@ -337,7 +336,8 @@
                                    should_connect=True):
         ip_address = floating_ip.floating_ip_address
         private_key = self._get_server_key(self.floating_ip_tuple.server)
-        ssh_source = self._ssh_to_server(ip_address, private_key)
+        ssh_source = self.get_remote_client(
+            ip_address, private_key=private_key)
 
         for remote_ip in address_list:
             if should_connect:
@@ -359,7 +359,8 @@
     @test.idempotent_id('f323b3ba-82f8-4db7-8ea6-6a895869ec49')
     @test.services('compute', 'network')
     def test_network_basic_ops(self):
-        """
+        """Basic network operation test
+
         For a freshly-booted VM with an IP address ("port") on a given
             network:
 
@@ -412,7 +413,8 @@
                       'Baremetal relies on a shared physical network.')
     @test.services('compute', 'network')
     def test_connectivity_between_vms_on_different_networks(self):
-        """
+        """Test connectivity between VMs on different networks
+
         For a freshly-booted VM with an IP address ("port") on a given
             network:
 
@@ -460,7 +462,8 @@
                       'vnic_type direct or macvtap')
     @test.services('compute', 'network')
     def test_hotplug_nic(self):
-        """
+        """Test hotplug network interface
+
         1. create a new network, with no gateway (to prevent overwriting VM's
             gateway)
         2. connect VM to new network
@@ -480,7 +483,8 @@
                       'network')
     @test.services('compute', 'network')
     def test_update_router_admin_state(self):
-        """
+        """Test to update admin state up of router
+
         1. Check public connectivity before updating
                 admin_state_up attribute of router to False
         2. Check public connectivity after updating
@@ -512,8 +516,9 @@
                           "DHCP client is not available.")
     @test.services('compute', 'network')
     def test_subnet_details(self):
-        """Tests that subnet's extra configuration details are affecting
-        the VMs. This test relies on non-shared, isolated tenant networks.
+        """Tests that subnet's extra configuration details are affecting VMs.
+
+         This test relies on non-shared, isolated tenant networks.
 
          NOTE: Neutron subnets push data to servers via dhcp-agent, so any
          update in subnet requires server to actively renew its DHCP lease.
@@ -549,7 +554,8 @@
         floating_ip, server = self.floating_ip_tuple
         ip_address = floating_ip.floating_ip_address
         private_key = self._get_server_key(server)
-        ssh_client = self._ssh_to_server(ip_address, private_key)
+        ssh_client = self.get_remote_client(
+            ip_address, private_key=private_key)
 
         dns_servers = [initial_dns_server]
         servers = ssh_client.get_dns_servers()
@@ -567,12 +573,11 @@
                          "Failed to update subnet's nameservers")
 
         def check_new_dns_server():
-            """Server needs to renew its dhcp lease in order to get the new dns
-            definitions from subnet
-            NOTE(amuller): we are renewing the lease as part of the retry
-            because Neutron updates dnsmasq asynchronously after the
-            subnet-update API call returns.
-            """
+            # NOTE: Server needs to renew its dhcp lease in order to get new
+            # definitions from subnet
+            # NOTE(amuller): we are renewing the lease as part of the retry
+            # because Neutron updates dnsmasq asynchronously after the
+            # subnet-update API call returns.
             ssh_client.renew_lease(fixed_ip=floating_ip['fixed_ip_address'])
             if ssh_client.get_dns_servers() != [alt_dns_server]:
                 LOG.debug("Failed to update DNS nameservers")
@@ -594,7 +599,8 @@
                           "by the test environment")
     @test.services('compute', 'network')
     def test_update_instance_port_admin_state(self):
-        """
+        """Test to update admin_state_up attribute of instance port
+
         1. Check public connectivity before updating
                 admin_state_up attribute of instance port to False
         2. Check public connectivity after updating
@@ -609,12 +615,12 @@
         self.check_public_network_connectivity(
             should_connect=True, msg="before updating "
             "admin_state_up of instance port to False")
-        self.network_client.update_port(port_id, admin_state_up=False)
+        self.ports_client.update_port(port_id, admin_state_up=False)
         self.check_public_network_connectivity(
             should_connect=False, msg="after updating "
             "admin_state_up of instance port to False",
             should_check_floating_ip_status=False)
-        self.network_client.update_port(port_id, admin_state_up=True)
+        self.ports_client.update_port(port_id, admin_state_up=True)
         self.check_public_network_connectivity(
             should_connect=True, msg="after updating "
             "admin_state_up of instance port to True")
@@ -625,8 +631,10 @@
                           'supported in the version of Nova being tested.')
     @test.services('compute', 'network')
     def test_preserve_preexisting_port(self):
-        """Tests that a pre-existing port provided on server boot is not
-        deleted if the server is deleted.
+        """Test preserve pre-existing port
+
+        Tests that a pre-existing port provided on server boot is not deleted
+        if the server is deleted.
 
         Nova should unbind the port from the instance on delete if the port was
         not created by Nova as part of the boot request.
@@ -653,7 +661,7 @@
         waiters.wait_for_server_termination(self.servers_client, server['id'])
         # Assert the port still exists on the network but is unbound from
         # the deleted server.
-        port = self.network_client.show_port(port_id)['port']
+        port = self.ports_client.show_port(port_id)['port']
         self.assertEqual(self.network['id'], port['network_id'])
         self.assertEqual('', port['device_id'])
         self.assertEqual('', port['device_owner'])
@@ -677,10 +685,10 @@
 
         list_hosts = (self.admin_manager.network_client.
                       list_l3_agents_hosting_router)
-        schedule_router = (self.admin_manager.network_client.
-                           add_router_to_l3_agent)
-        unschedule_router = (self.admin_manager.network_client.
-                             remove_router_from_l3_agent)
+        schedule_router = (self.admin_manager.network_agents_client.
+                           create_router_on_l3_agent)
+        unschedule_router = (self.admin_manager.network_agents_client.
+                             delete_router_from_l3_agent)
 
         agent_list = set(a["id"] for a in
                          self._list_agents(agent_type="L3 agent"))
@@ -720,7 +728,7 @@
         target_agent = list(hosting_agents if no_migration else
                             agent_list - hosting_agents)[0]
         schedule_router(target_agent,
-                        self.router['id'])
+                        router_id=self.router['id'])
         self.assertEqual(
             target_agent,
             list_hosts(self.router.id)['agents'][0]['id'],
@@ -731,3 +739,57 @@
         self.check_public_network_connectivity(
             should_connect=True,
             msg='After router rescheduling')
+
+    @test.requires_ext(service='network', extension='port-security')
+    @test.idempotent_id('7c0bb1a2-d053-49a4-98f9-ca1a1d849f63')
+    @test.services('compute', 'network')
+    def test_port_security_macspoofing_port(self):
+        """Tests port_security extension enforces mac spoofing
+
+        Neutron security groups always apply anti-spoof rules on the VMs. This
+        allows traffic to originate and terminate at the VM as expected, but
+        prevents traffic to pass through the VM. Anti-spoof rules are not
+        required in cases where the VM routes traffic through it.
+
+        The test steps are :
+        1. Create a new network.
+        2. Connect (hotplug) the VM to a new network.
+        3. Check the VM can ping the DHCP interface of this network.
+        4. Spoof the mac address of the new VM interface.
+        5. Check the Security Group enforces mac spoofing and blocks pings via
+           spoofed interface (VM cannot ping the DHCP interface).
+        6. Disable port-security of the spoofed port- set the flag to false.
+        7. Retest 3rd step and check that the Security Group allows pings via
+        the spoofed interface.
+        """
+
+        spoof_mac = "00:00:00:00:00:01"
+
+        # Create server
+        self._setup_network_and_servers()
+        self.check_public_network_connectivity(should_connect=True)
+        self._create_new_network()
+        self._hotplug_server()
+        fip, server = self.floating_ip_tuple
+        new_ports = self._list_ports(device_id=server["id"],
+                                     network_id=self.new_net["id"])
+        spoof_port = new_ports[0]
+        private_key = self._get_server_key(server)
+        ssh_client = self.get_remote_client(fip.floating_ip_address,
+                                            private_key=private_key)
+        spoof_nic = ssh_client.get_nic_name(spoof_port["mac_address"])
+        dhcp_ports = self._list_ports(device_owner="network:dhcp",
+                                      network_id=self.new_net["id"])
+        new_net_dhcp = dhcp_ports[0]["fixed_ips"][0]["ip_address"]
+        self._check_remote_connectivity(ssh_client, dest=new_net_dhcp,
+                                        nic=spoof_nic, should_succeed=True)
+        ssh_client.set_mac_address(spoof_nic, spoof_mac)
+        new_mac = ssh_client.get_mac_address(nic=spoof_nic)
+        self.assertEqual(spoof_mac, new_mac)
+        self._check_remote_connectivity(ssh_client, dest=new_net_dhcp,
+                                        nic=spoof_nic, should_succeed=False)
+        self.ports_client.update_port(spoof_port["id"],
+                                      port_security_enabled=False,
+                                      security_groups=[])
+        self._check_remote_connectivity(ssh_client, dest=new_net_dhcp,
+                                        nic=spoof_nic, should_succeed=True)
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index f82e7e4..fc33dd9 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -14,7 +14,6 @@
 #    under the License.
 import functools
 
-from oslo_log import log as logging
 import six
 
 from tempest import config
@@ -23,7 +22,6 @@
 
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class TestGettingAddress(manager.NetworkScenarioTest):
@@ -65,17 +63,15 @@
         super(TestGettingAddress, self).setUp()
         self.keypair = self.create_keypair()
         self.sec_grp = self._create_security_group(tenant_id=self.tenant_id)
-        self.srv_kwargs = {
-            'key_name': self.keypair['name'],
-            'security_groups': [{'name': self.sec_grp['name']}]}
 
     def prepare_network(self, address6_mode, n_subnets6=1, dualnet=False):
-        """Creates network with
-         given number of IPv6 subnets in the given mode and
-         one IPv4 subnet
-         Creates router with ports on all subnets
-         if dualnet - create IPv6 subnets on a different network
-         :return: list of created networks
+        """Prepare network
+
+        Creates network with given number of IPv6 subnets in the given mode and
+        one IPv4 subnet.
+        Creates router with ports on all subnets.
+        if dualnet - create IPv6 subnets on a different network
+        :return: list of created networks
         """
         self.network = self._create_network(tenant_id=self.tenant_id)
         if dualnet:
@@ -116,17 +112,19 @@
         return ips
 
     def prepare_server(self, networks=None):
-        username = CONF.compute.image_ssh_user
+        username = CONF.validation.image_ssh_user
 
-        create_kwargs = self.srv_kwargs
         networks = networks or [self.network]
-        create_kwargs['networks'] = [{'uuid': n.id} for n in networks]
 
-        srv = self.create_server(create_kwargs=create_kwargs)
+        srv = self.create_server(
+            key_name=self.keypair['name'],
+            security_groups=[{'name': self.sec_grp['name']}],
+            networks=[{'uuid': n.id} for n in networks],
+            wait_until='ACTIVE')
         fip = self.create_floating_ip(thing=srv)
         ips = self.define_server_ips(srv=srv)
         ssh = self.get_remote_client(
-            server_or_ip=fip.floating_ip_address,
+            ip_address=fip.floating_ip_address,
             username=username)
         return ssh, ips, srv["id"]
 
@@ -147,7 +145,7 @@
                                   "ports: %s")
                          % (self.network_v6, ports))
         mac6 = ports[0]
-        ssh.turn_nic_on(ssh.get_nic_name(mac6))
+        ssh.set_nic_state(ssh.get_nic_name(mac6))
 
     def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
         net_list = self.prepare_network(address6_mode=address6_mode,
@@ -180,10 +178,10 @@
                 guest_has_address, sshv4_2, ips_from_api_2['6'][i])
 
             self.assertTrue(test.call_until_true(srv1_v6_addr_assigned,
-                                                 CONF.compute.ping_timeout, 1))
+                            CONF.validation.ping_timeout, 1))
 
             self.assertTrue(test.call_until_true(srv2_v6_addr_assigned,
-                                                 CONF.compute.ping_timeout, 1))
+                            CONF.validation.ping_timeout, 1))
 
         self._check_connectivity(sshv4_1, ips_from_api_2['4'])
         self._check_connectivity(sshv4_2, ips_from_api_1['4'])
@@ -205,31 +203,37 @@
             (dest, source.ssh_client.host)
         )
 
+    @test.attr(type='slow')
     @test.idempotent_id('2c92df61-29f0-4eaa-bee3-7c65bef62a43')
     @test.services('compute', 'network')
     def test_slaac_from_os(self):
         self._prepare_and_test(address6_mode='slaac')
 
+    @test.attr(type='slow')
     @test.idempotent_id('d7e1f858-187c-45a6-89c9-bdafde619a9f')
     @test.services('compute', 'network')
     def test_dhcp6_stateless_from_os(self):
         self._prepare_and_test(address6_mode='dhcpv6-stateless')
 
+    @test.attr(type='slow')
     @test.idempotent_id('7ab23f41-833b-4a16-a7c9-5b42fe6d4123')
     @test.services('compute', 'network')
     def test_multi_prefix_dhcpv6_stateless(self):
         self._prepare_and_test(address6_mode='dhcpv6-stateless', n_subnets6=2)
 
+    @test.attr(type='slow')
     @test.idempotent_id('dec222b1-180c-4098-b8c5-cc1b8342d611')
     @test.services('compute', 'network')
     def test_multi_prefix_slaac(self):
         self._prepare_and_test(address6_mode='slaac', n_subnets6=2)
 
+    @test.attr(type='slow')
     @test.idempotent_id('b6399d76-4438-4658-bcf5-0d6c8584fde2')
     @test.services('compute', 'network')
     def test_dualnet_slaac_from_os(self):
         self._prepare_and_test(address6_mode='slaac', dualnet=True)
 
+    @test.attr(type='slow')
     @test.idempotent_id('76f26acd-9688-42b4-bc3e-cd134c4cb09e')
     @test.services('compute', 'network')
     def test_dualnet_dhcp6_stateless_from_os(self):
diff --git a/tempest/scenario/test_object_storage_basic_ops.py b/tempest/scenario/test_object_storage_basic_ops.py
index 49768c5..63ffa0b 100644
--- a/tempest/scenario/test_object_storage_basic_ops.py
+++ b/tempest/scenario/test_object_storage_basic_ops.py
@@ -13,20 +13,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest import config
 from tempest.scenario import manager
 from tempest import test
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class TestObjectStorageBasicOps(manager.ObjectStorageScenarioTest):
-    """
-    Test swift basic ops.
+    """Test swift basic ops.
+
      * get swift stat.
      * create container.
      * upload a file to the created container.
@@ -57,6 +53,7 @@
     @test.services('object_storage')
     def test_swift_acl_anonymous_download(self):
         """This test will cover below steps:
+
         1. Create container
         2. Upload object to the new container
         3. Change the ACL of the container
diff --git a/tempest/scenario/test_object_storage_telemetry_middleware.py b/tempest/scenario/test_object_storage_telemetry_middleware.py
index 3376a7c..eee4d3d 100644
--- a/tempest/scenario/test_object_storage_telemetry_middleware.py
+++ b/tempest/scenario/test_object_storage_telemetry_middleware.py
@@ -35,8 +35,8 @@
 
 
 class TestObjectStorageTelemetry(manager.ObjectStorageScenarioTest):
-    """
-    Test that swift uses the ceilometer middleware.
+    """Test that swift uses the ceilometer middleware.
+
      * create container.
      * upload a file to the created container.
      * retrieve the file from the created container.
@@ -57,19 +57,15 @@
         cls.telemetry_client = cls.os_operator.telemetry_client
 
     def _confirm_notifications(self, container_name, obj_name):
-        """
-        Loop seeking for appropriate notifications about the containers
-        and objects sent to swift.
-        """
+        # NOTE: Loop seeking for appropriate notifications about the containers
+        # and objects sent to swift.
 
         def _check_samples():
-            """
-            Return True only if we have notifications about some
-            containers and some objects and the notifications are about
-            the expected containers and objects.
-            Otherwise returning False will case _check_samples to be
-            called again.
-            """
+            # NOTE: Return True only if we have notifications about some
+            # containers and some objects and the notifications are about
+            # the expected containers and objects.
+            # Otherwise returning False will case _check_samples to be
+            # called again.
             results = self.telemetry_client.list_samples(
                 'storage.objects.incoming.bytes')
             LOG.debug('got samples %s', results)
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 3c11c22..18bd764 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import config
@@ -23,12 +21,11 @@
 
 CONF = config.CONF
 
-LOG = logging.getLogger(__name__)
-
 
 class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
 
-    """
+    """The test suite for security groups
+
     This test suite assumes that Nova has been configured to
     boot VM's with Neutron-managed networking, and attempts to
     verify cross tenant connectivity as follows
@@ -95,8 +92,8 @@
     credentials = ['primary', 'alt', 'admin']
 
     class TenantProperties(object):
-        """
-        helper class to save tenant details
+        """helper class to save tenant details
+
             id
             credentials
             network
@@ -136,6 +133,9 @@
             msg = ('Either tenant_networks_reachable must be "true", or '
                    'public_network_id must be defined.')
             raise cls.skipException(msg)
+        if not test.is_extension_enabled('security-group', 'network'):
+            msg = "security-group extension not enabled."
+            raise cls.skipException(msg)
 
     @classmethod
     def setup_credentials(cls):
@@ -175,14 +175,14 @@
         access_sg = self._create_empty_security_group(
             namestart='secgroup_access-',
             tenant_id=tenant.creds.tenant_id,
-            client=tenant.manager.network_client
+            client=tenant.manager.security_groups_client
         )
 
         # don't use default secgroup since it allows in-tenant traffic
         def_sg = self._create_empty_security_group(
             namestart='secgroup_general-',
             tenant_id=tenant.creds.tenant_id,
-            client=tenant.manager.network_client
+            client=tenant.manager.security_groups_client
         )
         tenant.security_groups.update(access=access_sg, default=def_sg)
         ssh_rule = dict(
@@ -191,9 +191,11 @@
             port_range_max=22,
             direction='ingress',
         )
-        self._create_security_group_rule(secgroup=access_sg,
-                                         client=tenant.manager.network_client,
-                                         **ssh_rule)
+        sec_group_rules_client = tenant.manager.security_group_rules_client
+        self._create_security_group_rule(
+            secgroup=access_sg,
+            sec_group_rules_client=sec_group_rules_client,
+            **ssh_rule)
 
     def _verify_network_details(self, tenant):
         # Checks that we see the newly created network/subnet/router via
@@ -232,25 +234,17 @@
         return port['device_owner'].startswith('network:router_interface')
 
     def _create_server(self, name, tenant, security_groups=None):
-        """
-        creates a server and assigns to security group
-        """
-        self._set_compute_context(tenant)
+        """creates a server and assigns to security group"""
         if security_groups is None:
             security_groups = [tenant.security_groups['default']]
         security_groups_names = [{'name': s['name']} for s in security_groups]
-        create_kwargs = {
-            'networks': [
-                {'uuid': tenant.network.id},
-            ],
-            'key_name': tenant.keypair['name'],
-            'security_groups': security_groups_names
-        }
         server = self.create_server(
             name=name,
-            network_client=tenant.manager.network_client,
-            networks_client=tenant.manager.networks_client,
-            create_kwargs=create_kwargs)
+            networks=[{'uuid': tenant.network.id}],
+            key_name=tenant.keypair['name'],
+            security_groups=security_groups_names,
+            wait_until='ACTIVE',
+            clients=tenant.manager)
         self.assertEqual(
             sorted([s['name'] for s in security_groups]),
             sorted([s['name'] for s in server['security_groups']]))
@@ -267,11 +261,9 @@
             tenant.servers.append(server)
 
     def _set_access_point(self, tenant):
-        """
-        creates a server in a secgroup with rule allowing external ssh
-        in order to access tenant internal network
-        workaround ip namespace
-        """
+        # creates a server in a secgroup with rule allowing external ssh
+        # in order to access tenant internal network
+        # workaround ip namespace
         secgroups = tenant.security_groups.values()
         name = 'server-{tenant}-access_point'.format(
             tenant=tenant.creds.tenant_name)
@@ -285,7 +277,7 @@
         public_network_id = CONF.network.public_network_id
         floating_ip = self.create_floating_ip(
             server, public_network_id,
-            client=tenant.manager.network_client)
+            client=tenant.manager.floating_ips_client)
         self.floating_ips.setdefault(server['id'], floating_ip)
 
     def _create_tenant_network(self, tenant):
@@ -295,13 +287,9 @@
             subnets_client=tenant.manager.subnets_client)
         tenant.set_network(network, subnet, router)
 
-    def _set_compute_context(self, tenant):
-        self.servers_client = tenant.manager.servers_client
-        return self.servers_client
-
     def _deploy_tenant(self, tenant_or_id):
-        """
-        creates:
+        """creates:
+
             network
             subnet
             router (if public not defined)
@@ -312,16 +300,13 @@
             tenant = self.tenants[tenant_or_id]
         else:
             tenant = tenant_or_id
-        self._set_compute_context(tenant)
         self._create_tenant_keypairs(tenant)
         self._create_tenant_network(tenant)
         self._create_tenant_security_groups(tenant)
         self._set_access_point(tenant)
 
     def _get_server_ip(self, server, floating=False):
-        """
-        returns the ip (floating/internal) of a server
-        """
+        """returns the ip (floating/internal) of a server"""
         if floating:
             server_ip = self.floating_ips[server['id']].floating_ip_address
         else:
@@ -332,14 +317,12 @@
         return server_ip
 
     def _connect_to_access_point(self, tenant):
-        """
-        create ssh connection to tenant access point
-        """
+        """create ssh connection to tenant access point"""
         access_point_ssh = \
             self.floating_ips[tenant.access_point['id']].floating_ip_address
         private_key = tenant.keypair['private_key']
-        access_point_ssh = self._ssh_to_server(access_point_ssh,
-                                               private_key=private_key)
+        access_point_ssh = self.get_remote_client(
+            access_point_ssh, private_key=private_key)
         return access_point_ssh
 
     def _check_connectivity(self, access_point, ip, should_succeed=True):
@@ -373,10 +356,8 @@
                                      ip=self._get_server_ip(server))
 
     def _test_cross_tenant_block(self, source_tenant, dest_tenant):
-        """
-        if public router isn't defined, then dest_tenant access is via
-        floating-ip
-        """
+        # if public router isn't defined, then dest_tenant access is via
+        # floating-ip
         access_point_ssh = self._connect_to_access_point(source_tenant)
         ip = self._get_server_ip(dest_tenant.access_point,
                                  floating=self.floating_ip_access)
@@ -384,17 +365,19 @@
                                  should_succeed=False)
 
     def _test_cross_tenant_allow(self, source_tenant, dest_tenant):
-        """
-        check for each direction:
+        """check for each direction:
+
         creating rule for tenant incoming traffic enables only 1way traffic
         """
         ruleset = dict(
             protocol='icmp',
             direction='ingress'
         )
+        sec_group_rules_client = (
+            dest_tenant.manager.security_group_rules_client)
         self._create_security_group_rule(
             secgroup=dest_tenant.security_groups['default'],
-            client=dest_tenant.manager.network_client,
+            sec_group_rules_client=sec_group_rules_client,
             **ruleset
         )
         access_point_ssh = self._connect_to_access_point(source_tenant)
@@ -406,9 +389,11 @@
         self._test_cross_tenant_block(dest_tenant, source_tenant)
 
         # allow reverse traffic and check
+        sec_group_rules_client = (
+            source_tenant.manager.security_group_rules_client)
         self._create_security_group_rule(
             secgroup=source_tenant.security_groups['default'],
-            client=source_tenant.manager.network_client,
+            sec_group_rules_client=sec_group_rules_client,
             **ruleset
         )
 
@@ -418,10 +403,8 @@
         self._check_connectivity(access_point_ssh_2, ip)
 
     def _verify_mac_addr(self, tenant):
-        """
-        verify that VM (tenant's access point) has the same ip,mac as listed in
-        port list
-        """
+        """Verify that VM has the same ip, mac as listed in port"""
+
         access_point_ssh = self._connect_to_access_point(tenant)
         mac_addr = access_point_ssh.get_mac_address()
         mac_addr = mac_addr.strip().lower()
@@ -476,9 +459,9 @@
     @test.idempotent_id('f4d556d7-1526-42ad-bafb-6bebf48568f6')
     @test.services('compute', 'network')
     def test_port_update_new_security_group(self):
-        """
-        This test verifies the traffic after updating the vm port with new
-        security group having appropriate rule.
+        """Verifies the traffic after updating the vm port
+
+        With new security group having appropriate rule.
         """
         new_tenant = self.primary_tenant
 
@@ -486,14 +469,15 @@
         new_sg = self._create_empty_security_group(
             namestart='secgroup_new-',
             tenant_id=new_tenant.creds.tenant_id,
-            client=new_tenant.manager.network_client)
+            client=new_tenant.manager.security_groups_client)
         icmp_rule = dict(
             protocol='icmp',
             direction='ingress',
         )
+        sec_group_rules_client = new_tenant.manager.security_group_rules_client
         self._create_security_group_rule(
             secgroup=new_sg,
-            client=new_tenant.manager.network_client,
+            sec_group_rules_client=sec_group_rules_client,
             **icmp_rule)
         new_tenant.security_groups.update(new_sg=new_sg)
 
@@ -514,7 +498,7 @@
             port_id = self._list_ports(device_id=server_id)[0]['id']
 
             # update port with new security group and check connectivity
-            self.network_client.update_port(port_id, security_groups=[
+            self.ports_client.update_port(port_id, security_groups=[
                 new_tenant.security_groups['new_sg'].id])
             self._check_connectivity(
                 access_point=access_point_ssh,
@@ -527,14 +511,14 @@
     @test.idempotent_id('d2f77418-fcc4-439d-b935-72eca704e293')
     @test.services('compute', 'network')
     def test_multiple_security_groups(self):
-        """
-        This test verifies multiple security groups and checks that rules
+        """Verify multiple security groups and checks that rules
+
         provided in the both the groups is applied onto VM
         """
         tenant = self.primary_tenant
         ip = self._get_server_ip(tenant.access_point,
                                  floating=self.floating_ip_access)
-        ssh_login = CONF.compute.image_ssh_user
+        ssh_login = CONF.validation.image_ssh_user
         private_key = tenant.keypair['private_key']
         self.check_vm_connectivity(ip,
                                    should_connect=False)
@@ -546,13 +530,11 @@
             secgroup=tenant.security_groups['default'],
             **ruleset
         )
-        """
-        Vm now has 2 security groups one with ssh rule(
-        already added in setUp() method),and other with icmp rule
-        (added in the above step).The check_vm_connectivity tests
-        -that vm ping test is successful
-        -ssh to vm is successful
-        """
+        # NOTE: Vm now has 2 security groups one with ssh rule(
+        # already added in setUp() method),and other with icmp rule
+        # (added in the above step).The check_vm_connectivity tests
+        # -that vm ping test is successful
+        # -ssh to vm is successful
         self.check_vm_connectivity(ip,
                                    username=ssh_login,
                                    private_key=private_key,
@@ -562,10 +544,7 @@
     @test.idempotent_id('7c811dcc-263b-49a3-92d2-1b4d8405f50c')
     @test.services('compute', 'network')
     def test_port_security_disable_security_group(self):
-        """
-        This test verifies port_security_enabled=False disables
-        the default security group rules.
-        """
+        """Verify the default security group rules is disabled."""
         new_tenant = self.primary_tenant
 
         # Create server
@@ -581,16 +560,16 @@
 
         # Flip the port's port security and check connectivity
         try:
-            self.network_client.update_port(port_id,
-                                            port_security_enabled=True,
-                                            security_groups=[])
+            self.ports_client.update_port(port_id,
+                                          port_security_enabled=True,
+                                          security_groups=[])
             self._check_connectivity(access_point=access_point_ssh,
                                      ip=self._get_server_ip(server),
                                      should_succeed=False)
 
-            self.network_client.update_port(port_id,
-                                            port_security_enabled=False,
-                                            security_groups=[])
+            self.ports_client.update_port(port_id,
+                                          port_security_enabled=False,
+                                          security_groups=[])
             self._check_connectivity(
                 access_point=access_point_ssh,
                 ip=self._get_server_ip(server))
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index c83dbb1..4b932ce 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -28,9 +28,9 @@
 
 class TestServerAdvancedOps(manager.ScenarioTest):
 
-    """
-    This test case stresses some advanced server instance operations:
+    """The test suite for server advanced operations
 
+    This test case stresses some advanced server instance operations:
      * Resizing an instance
      * Sequence suspend resume
     """
@@ -53,7 +53,7 @@
     @test.services('compute')
     def test_resize_server_confirm(self):
         # We create an instance for use in this test
-        instance = self.create_server()
+        instance = self.create_server(wait_until='ACTIVE')
         instance_id = instance['id']
         resize_flavor = CONF.compute.flavor_ref_alt
         LOG.debug("Resizing instance %s from flavor %s to flavor %s",
@@ -74,7 +74,7 @@
     @test.services('compute')
     def test_server_sequence_suspend_resume(self):
         # We create an instance for use in this test
-        instance = self.create_server()
+        instance = self.create_server(wait_until='ACTIVE')
         instance_id = instance['id']
         LOG.debug("Suspending instance %s. Current status: %s",
                   instance_id, instance['status'])
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index e2f8adb..dcb095b 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -32,9 +32,9 @@
 
 class TestServerBasicOps(manager.ScenarioTest):
 
-    """
-    This smoke test case follows this basic set of operations:
+    """The test suite for server basic operations
 
+    This smoke test case follows this basic set of operations:
      * Create a keypair for use in launching an instance
      * Create a security group to control network access in instance
      * Add simple permissive rules to the security group
@@ -69,39 +69,15 @@
                       image=self.image_ref, flavor=self.flavor_ref,
                       ssh=self.run_ssh, ssh_user=self.ssh_user))
 
-    def add_keypair(self):
-        self.keypair = self.create_keypair()
-
-    def boot_instance(self):
-        # Create server with image and flavor from input scenario
-        security_groups = [{'name': self.security_group['name']}]
-        self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
-        create_kwargs = {
-            'key_name': self.keypair['name'],
-            'security_groups': security_groups,
-            'config_drive': CONF.compute_feature_enabled.config_drive,
-            'metadata': self.md
-        }
-        self.instance = self.create_server(image=self.image_ref,
-                                           flavor=self.flavor_ref,
-                                           create_kwargs=create_kwargs)
-
-    def verify_ssh(self):
+    def verify_ssh(self, keypair):
         if self.run_ssh:
             # Obtain a floating IP
-            self.floating_ip = (self.floating_ips_client.create_floating_ip()
-                                ['floating_ip'])
-            self.addCleanup(self.delete_wrapper,
-                            self.floating_ips_client.delete_floating_ip,
-                            self.floating_ip['id'])
-            # Attach a floating IP
-            self.floating_ips_client.associate_floating_ip_to_server(
-                self.floating_ip['ip'], self.instance['id'])
+            self.fip = self.create_floating_ip(self.instance)['ip']
             # Check ssh
             self.ssh_client = self.get_remote_client(
-                server_or_ip=self.floating_ip['ip'],
+                ip_address=self.fip,
                 username=self.image_utils.ssh_user(self.image_ref),
-                private_key=self.keypair['private_key'])
+                private_key=keypair['private_key'])
 
     def verify_metadata(self):
         if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
@@ -110,12 +86,11 @@
 
             def exec_cmd_and_verify_output():
                 cmd = 'curl ' + md_url
-                floating_ip = self.floating_ip['ip']
                 result = self.ssh_client.exec_command(cmd)
                 if result:
                     msg = ('Failed while verifying metadata on server. Result '
-                           'of command "%s" is NOT "%s".' % (cmd, floating_ip))
-                    self.assertEqual(floating_ip, result, msg)
+                           'of command "%s" is NOT "%s".' % (cmd, self.fip))
+                    self.assertEqual(self.fip, result, msg)
                     return 'Verification is successful!'
 
             if not test.call_until_true(exec_cmd_and_verify_output,
@@ -145,10 +120,19 @@
     @test.attr(type='smoke')
     @test.services('compute', 'network')
     def test_server_basicops(self):
-        self.add_keypair()
+        keypair = self.create_keypair()
         self.security_group = self._create_security_group()
-        self.boot_instance()
-        self.verify_ssh()
+        security_groups = [{'name': self.security_group['name']}]
+        self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
+        self.instance = self.create_server(
+            image_id=self.image_ref,
+            flavor=self.flavor_ref,
+            key_name=keypair['name'],
+            security_groups=security_groups,
+            config_drive=CONF.compute_feature_enabled.config_drive,
+            metadata=self.md,
+            wait_until='ACTIVE')
+        self.verify_ssh(keypair)
         self.verify_metadata()
         self.verify_metadata_on_config_drive()
         self.servers_client.delete_server(self.instance['id'])
diff --git a/tempest/scenario/test_server_multinode.py b/tempest/scenario/test_server_multinode.py
new file mode 100644
index 0000000..0cf72c3
--- /dev/null
+++ b/tempest/scenario/test_server_multinode.py
@@ -0,0 +1,84 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from tempest import config
+from tempest import exceptions
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+
+
+class TestServerMultinode(manager.ScenarioTest):
+    """This is a set of tests specific to multinode testing."""
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    def skip_checks(cls):
+        super(TestServerMultinode, cls).skip_checks()
+
+        if CONF.compute.min_compute_nodes < 2:
+            raise cls.skipException(
+                "Less than 2 compute nodes, skipping multinode tests.")
+
+    @classmethod
+    def setup_clients(cls):
+        super(TestServerMultinode, cls).setup_clients()
+        # Use admin client by default
+        cls.manager = cls.admin_manager
+        # this is needed so that we can use the availability_zone:host
+        # scheduler hint, which is admin_only by default
+        cls.servers_client = cls.admin_manager.servers_client
+        super(TestServerMultinode, cls).resource_setup()
+
+    @test.idempotent_id('9cecbe35-b9d4-48da-a37e-7ce70aa43d30')
+    @test.attr(type='smoke')
+    @test.services('compute', 'network')
+    def test_schedule_to_all_nodes(self):
+        host_client = self.manager.hosts_client
+        hosts = host_client.list_hosts()['hosts']
+        hosts = [x for x in hosts if x['service'] == 'compute']
+
+        # ensure we have at least as many compute hosts as we expect
+        if len(hosts) < CONF.compute.min_compute_nodes:
+            raise exceptions.InvalidConfiguration(
+                "Host list %s is shorter than min_compute_nodes. "
+                "Did a compute worker not boot correctly?" % hosts)
+
+        # create 1 compute for each node, up to the min_compute_nodes
+        # threshold (so that things don't get crazy if you have 1000
+        # compute nodes but set min to 3).
+        servers = []
+
+        for host in hosts[:CONF.compute.min_compute_nodes]:
+            # by getting to active state here, this means this has
+            # landed on the host in question.
+            inst = self.create_server(
+                availability_zone='%(zone)s:%(host_name)s' % host,
+                wait_until='ACTIVE')
+            server = self.servers_client.show_server(inst['id'])['server']
+            servers.append(server)
+
+        # make sure we really have the number of servers we think we should
+        self.assertEqual(
+            len(servers), CONF.compute.min_compute_nodes,
+            "Incorrect number of servers built %s" % servers)
+
+        # ensure that every server ended up on a different host
+        host_ids = [x['hostId'] for x in servers]
+        self.assertEqual(
+            len(set(host_ids)), len(servers),
+            "Incorrect number of distinct host_ids scheduled to %s" % servers)
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index bc80412..77de47e 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log
 import testtools
 
 from tempest.common import waiters
@@ -23,12 +22,10 @@
 
 CONF = config.CONF
 
-LOG = log.getLogger(__name__)
-
 
 class TestShelveInstance(manager.ScenarioTest):
-    """
-    This test shelves then unshelves a Nova instance
+    """This test shelves then unshelves a Nova instance
+
     The following is the scenario outline:
      * boot an instance and create a timestamp file in it
      * shelve the instance
@@ -59,10 +56,6 @@
 
         security_group = self._create_security_group()
         security_groups = [{'name': security_group['name']}]
-        create_kwargs = {
-            'key_name': keypair['name'],
-            'security_groups': security_groups
-        }
 
         if boot_from_volume:
             volume = self.create_volume(size=CONF.volume.volume_size,
@@ -72,36 +65,29 @@
                 'volume_id': volume['id'],
                 'delete_on_termination': '0'}]
 
-            create_kwargs['block_device_mapping'] = bd_map
-            server = self.create_server(create_kwargs=create_kwargs)
+            server = self.create_server(
+                key_name=keypair['name'],
+                security_groups=security_groups,
+                block_device_mapping=bd_map,
+                wait_until='ACTIVE')
         else:
-            server = self.create_server(image=CONF.compute.image_ref,
-                                        create_kwargs=create_kwargs)
+            server = self.create_server(
+                image_id=CONF.compute.image_ref,
+                key_name=keypair['name'],
+                security_groups=security_groups,
+                wait_until='ACTIVE')
 
-        if CONF.compute.use_floatingip_for_ssh:
-            floating_ip = (self.floating_ips_client.create_floating_ip()
-                           ['floating_ip'])
-            self.addCleanup(self.delete_wrapper,
-                            self.floating_ips_client.delete_floating_ip,
-                            floating_ip['id'])
-            self.floating_ips_client.associate_floating_ip_to_server(
-                floating_ip['ip'], server['id'])
-            timestamp = self.create_timestamp(
-                floating_ip['ip'], private_key=keypair['private_key'])
-        else:
-            timestamp = self.create_timestamp(
-                server, private_key=keypair['private_key'])
+        instance_ip = self.get_server_ip(server)
+        timestamp = self.create_timestamp(instance_ip,
+                                          private_key=keypair['private_key'])
 
         # Prevent bug #1257594 from coming back
         # Unshelve used to boot the instance with the original image, not
         # with the instance snapshot
         self._shelve_then_unshelve_server(server)
-        if CONF.compute.use_floatingip_for_ssh:
-            timestamp2 = self.get_timestamp(floating_ip['ip'],
-                                            private_key=keypair['private_key'])
-        else:
-            timestamp2 = self.get_timestamp(server,
-                                            private_key=keypair['private_key'])
+
+        timestamp2 = self.get_timestamp(instance_ip,
+                                        private_key=keypair['private_key'])
         self.assertEqual(timestamp, timestamp2)
 
     @test.idempotent_id('1164e700-0af0-4a4c-8792-35909a88743c')
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 5ac3a7e..d6528a3 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log
 import testtools
 
 from tempest import config
@@ -22,12 +21,10 @@
 
 CONF = config.CONF
 
-LOG = log.getLogger(__name__)
-
 
 class TestSnapshotPattern(manager.ScenarioTest):
-    """
-    This test is for snapshotting an instance and booting with it.
+    """This test is for snapshotting an instance and booting with it.
+
     The following is the scenario outline:
      * boot an instance and create a timestamp file in it
      * snapshot the instance
@@ -36,14 +33,6 @@
 
     """
 
-    def _boot_image(self, image_id, keypair, security_group):
-        security_groups = [{'name': security_group['name']}]
-        create_kwargs = {
-            'key_name': keypair['name'],
-            'security_groups': security_groups
-        }
-        return self.create_server(image=image_id, create_kwargs=create_kwargs)
-
     @test.idempotent_id('608e604b-1d63-4a82-8e3e-91bc665c90b4')
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
@@ -54,29 +43,28 @@
         security_group = self._create_security_group()
 
         # boot an instance and create a timestamp file in it
-        server = self._boot_image(CONF.compute.image_ref, keypair,
-                                  security_group)
-        if CONF.compute.use_floatingip_for_ssh:
-            fip_for_server = self.create_floating_ip(server)
-            timestamp = self.create_timestamp(
-                fip_for_server['ip'], private_key=keypair['private_key'])
-        else:
-            timestamp = self.create_timestamp(
-                server, private_key=keypair['private_key'])
+        server = self.create_server(
+            image_id=CONF.compute.image_ref,
+            key_name=keypair['name'],
+            security_groups=[{'name': security_group['name']}],
+            wait_until='ACTIVE')
+
+        instance_ip = self.get_server_ip(server)
+        timestamp = self.create_timestamp(instance_ip,
+                                          private_key=keypair['private_key'])
 
         # snapshot the instance
         snapshot_image = self.create_server_snapshot(server=server)
 
         # boot a second instance from the snapshot
-        server_from_snapshot = self._boot_image(snapshot_image['id'],
-                                                keypair, security_group)
+        server_from_snapshot = self.create_server(
+            image_id=snapshot_image['id'],
+            key_name=keypair['name'],
+            security_groups=[{'name': security_group['name']}],
+            wait_until='ACTIVE')
 
         # check the existence of the timestamp file in the second instance
-        if CONF.compute.use_floatingip_for_ssh:
-            fip_for_snapshot = self.create_floating_ip(server_from_snapshot)
-            timestamp2 = self.get_timestamp(fip_for_snapshot['ip'],
-                                            private_key=keypair['private_key'])
-        else:
-            timestamp2 = self.get_timestamp(server_from_snapshot,
-                                            private_key=keypair['private_key'])
+        server_from_snapshot_ip = self.get_server_ip(server_from_snapshot)
+        timestamp2 = self.get_timestamp(server_from_snapshot_ip,
+                                        private_key=keypair['private_key'])
         self.assertEqual(timestamp, timestamp2)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 6eceeb2..1d09fe7 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -25,15 +25,14 @@
 from tempest import exceptions
 from tempest.scenario import manager
 from tempest import test
-import tempest.test
 
 CONF = config.CONF
-
 LOG = logging.getLogger(__name__)
 
 
 class TestStampPattern(manager.ScenarioTest):
-    """
+    """The test suite for both snapshoting and attaching of volume
+
     This test is for snapshotting an instance/volume and attaching the volume
     created from snapshot to the instance booted from snapshot.
     The following is the scenario outline:
@@ -59,22 +58,10 @@
         if not CONF.volume_feature_enabled.snapshot:
             raise cls.skipException("Cinder volume snapshots are disabled")
 
-    def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
-        self.snapshots_client.wait_for_snapshot_status(volume_snapshot['id'],
-                                                       status)
-
-    def _boot_image(self, image_id, keypair, security_group):
-        security_groups = [{'name': security_group['name']}]
-        create_kwargs = {
-            'key_name': keypair['name'],
-            'security_groups': security_groups
-        }
-        return self.create_server(image=image_id, create_kwargs=create_kwargs)
-
     def _create_volume_snapshot(self, volume):
         snapshot_name = data_utils.rand_name('scenario-snapshot')
         snapshot = self.snapshots_client.create_snapshot(
-            volume['id'], display_name=snapshot_name)['snapshot']
+            volume_id=volume['id'], display_name=snapshot_name)['snapshot']
 
         def cleaner():
             self.snapshots_client.delete_snapshot(snapshot['id'])
@@ -85,72 +72,54 @@
             except lib_exc.NotFound:
                 pass
         self.addCleanup(cleaner)
-        self._wait_for_volume_status(volume, 'available')
+        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
         self.snapshots_client.wait_for_snapshot_status(snapshot['id'],
                                                        'available')
         self.assertEqual(snapshot_name, snapshot['display_name'])
         return snapshot
 
-    def _wait_for_volume_status(self, volume, status):
-        self.volumes_client.wait_for_volume_status(volume['id'], status)
-
-    def _create_volume(self, snapshot_id=None):
-        return self.create_volume(snapshot_id=snapshot_id)
-
-    def _attach_volume(self, server, volume):
-        attached_volume = self.servers_client.attach_volume(
-            server['id'], volumeId=volume['id'], device='/dev/%s'
-            % CONF.compute.volume_device_name)['volumeAttachment']
-        self.assertEqual(volume['id'], attached_volume['id'])
-        self._wait_for_volume_status(attached_volume, 'in-use')
-
-    def _detach_volume(self, server, volume):
-        self.servers_client.detach_volume(server['id'], volume['id'])
-        self._wait_for_volume_status(volume, 'available')
-
-    def _wait_for_volume_available_on_the_system(self, server_or_ip,
+    def _wait_for_volume_available_on_the_system(self, ip_address,
                                                  private_key):
-        ssh = self.get_remote_client(server_or_ip, private_key=private_key)
+        ssh = self.get_remote_client(ip_address, private_key=private_key)
 
         def _func():
             part = ssh.get_partitions()
             LOG.debug("Partitions:%s" % part)
             return CONF.compute.volume_device_name in part
 
-        if not tempest.test.call_until_true(_func,
-                                            CONF.compute.build_timeout,
-                                            CONF.compute.build_interval):
+        if not test.call_until_true(_func,
+                                    CONF.compute.build_timeout,
+                                    CONF.compute.build_interval):
             raise exceptions.TimeoutException
 
     @decorators.skip_because(bug="1205344")
     @test.idempotent_id('10fd234a-515c-41e5-b092-8323060598c5')
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
-    @tempest.test.services('compute', 'network', 'volume', 'image')
+    @test.services('compute', 'network', 'volume', 'image')
     def test_stamp_pattern(self):
         # prepare for booting an instance
         keypair = self.create_keypair()
         security_group = self._create_security_group()
 
         # boot an instance and create a timestamp file in it
-        volume = self._create_volume()
-        server = self._boot_image(CONF.compute.image_ref, keypair,
-                                  security_group)
+        volume = self.create_volume()
+        server = self.create_server(
+            image_id=CONF.compute.image_ref,
+            key_name=keypair['name'],
+            security_groups=security_group,
+            wait_until='ACTIVE')
 
         # create and add floating IP to server1
-        if CONF.compute.use_floatingip_for_ssh:
-            floating_ip_for_server = self.create_floating_ip(server)
-            ip_for_server = floating_ip_for_server['ip']
-        else:
-            ip_for_server = server
+        ip_for_server = self.get_server_ip(server)
 
-        self._attach_volume(server, volume)
+        self.nova_volume_attach(server, volume)
         self._wait_for_volume_available_on_the_system(ip_for_server,
                                                       keypair['private_key'])
         timestamp = self.create_timestamp(ip_for_server,
                                           CONF.compute.volume_device_name,
                                           private_key=keypair['private_key'])
-        self._detach_volume(server, volume)
+        self.nova_volume_detach(server, volume)
 
         # snapshot the volume
         volume_snapshot = self._create_volume_snapshot(volume)
@@ -159,23 +128,20 @@
         snapshot_image = self.create_server_snapshot(server=server)
 
         # create second volume from the snapshot(volume2)
-        volume_from_snapshot = self._create_volume(
+        volume_from_snapshot = self.create_volume(
             snapshot_id=volume_snapshot['id'])
 
         # boot second instance from the snapshot(instance2)
-        server_from_snapshot = self._boot_image(snapshot_image['id'],
-                                                keypair, security_group)
+        server_from_snapshot = self.create_server(
+            image_id=snapshot_image['id'],
+            key_name=keypair['name'],
+            security_groups=security_group)
 
         # create and add floating IP to server_from_snapshot
-        if CONF.compute.use_floatingip_for_ssh:
-            floating_ip_for_snapshot = self.create_floating_ip(
-                server_from_snapshot)
-            ip_for_snapshot = floating_ip_for_snapshot['ip']
-        else:
-            ip_for_snapshot = server_from_snapshot
+        ip_for_snapshot = self.get_server_ip(server_from_snapshot)
 
         # attach volume2 to instance2
-        self._attach_volume(server_from_snapshot, volume_from_snapshot)
+        self.nova_volume_attach(server_from_snapshot, volume_from_snapshot)
         self._wait_for_volume_available_on_the_system(ip_for_snapshot,
                                                       keypair['private_key'])
 
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 414305d..4ce57db 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -10,8 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log
-
 from tempest.common.utils import data_utils
 from tempest.common import waiters
 from tempest import config
@@ -20,13 +18,10 @@
 
 CONF = config.CONF
 
-LOG = log.getLogger(__name__)
-
 
 class TestVolumeBootPattern(manager.ScenarioTest):
 
-    """
-    This test case attempts to reproduce the following steps:
+    """This test case attempts to reproduce the following steps:
 
      * Create in Cinder some bootable volume importing a Glance image
      * Boot an instance from the bootable volume
@@ -70,7 +65,10 @@
                 {'name': security_group['name']}]
         create_kwargs.update(self._get_bdm(
             vol_id, delete_on_termination=delete_on_termination))
-        return self.create_server(image='', create_kwargs=create_kwargs)
+        return self.create_server(
+            image_id='',
+            wait_until='ACTIVE',
+            **create_kwargs)
 
     def _create_snapshot_from_volume(self, vol_id):
         snap_name = data_utils.rand_name('snapshot')
@@ -95,13 +93,6 @@
         vol_name = data_utils.rand_name('volume')
         return self.create_volume(name=vol_name, snapshot_id=snap_id)
 
-    def _get_server_ip(self, server):
-        if CONF.compute.use_floatingip_for_ssh:
-            ip = self.create_floating_ip(server)['ip']
-        else:
-            ip = server
-        return ip
-
     def _delete_server(self, server):
         self.servers_client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.servers_client, server['id'])
@@ -119,7 +110,7 @@
                                                        keypair, security_group)
 
         # write content to volume on instance
-        ip_instance_1st = self._get_server_ip(instance_1st)
+        ip_instance_1st = self.get_server_ip(instance_1st)
         timestamp = self.create_timestamp(ip_instance_1st,
                                           private_key=keypair['private_key'])
 
@@ -131,7 +122,7 @@
                                                        keypair, security_group)
 
         # check the content of written file
-        ip_instance_2nd = self._get_server_ip(instance_2nd)
+        ip_instance_2nd = self.get_server_ip(instance_2nd)
         timestamp2 = self.get_timestamp(ip_instance_2nd,
                                         private_key=keypair['private_key'])
         self.assertEqual(timestamp, timestamp2)
@@ -141,13 +132,13 @@
 
         # create a 3rd instance from snapshot
         volume = self._create_volume_from_snapshot(snapshot['id'])
-        instance_from_snapshot = (
+        server_from_snapshot = (
             self._boot_instance_from_volume(volume['id'],
                                             keypair, security_group))
 
         # check the content of written file
-        ip_instance_from_snapshot = self._get_server_ip(instance_from_snapshot)
-        timestamp3 = self.get_timestamp(ip_instance_from_snapshot,
+        server_from_snapshot_ip = self.get_server_ip(server_from_snapshot)
+        timestamp3 = self.get_timestamp(server_from_snapshot_ip,
                                         private_key=keypair['private_key'])
         self.assertEqual(timestamp, timestamp3)
 
@@ -166,7 +157,8 @@
         self._delete_server(instance)
 
         # boot instance from EBS image
-        instance = self.create_server(image=image['id'])
+        instance = self.create_server(
+            image_id=image['id'])
         # just ensure that instance booted
 
         # delete instance
diff --git a/tempest/scenario/utils.py b/tempest/scenario/utils.py
index 12509f7..3cbb3bc 100644
--- a/tempest/scenario/utils.py
+++ b/tempest/scenario/utils.py
@@ -24,7 +24,7 @@
 import testtools
 
 from tempest import clients
-from tempest.common import credentials
+from tempest.common import credentials_factory as credentials
 from tempest import config
 
 CONF = config.CONF
@@ -40,11 +40,11 @@
         self.non_ssh_image_pattern = \
             CONF.input_scenario.non_ssh_image_regex
         # Setup clients
-        self.images_client = os.images_client
+        self.compute_images_client = os.compute_images_client
         self.flavors_client = os.flavors_client
 
     def ssh_user(self, image_id):
-        _image = self.images_client.show_image(image_id)['image']
+        _image = self.compute_images_client.show_image(image_id)['image']
         for regex, user in self.ssh_users:
             # First match wins
             if re.match(regex, _image['name']) is not None:
@@ -57,14 +57,14 @@
                              string=str(image['name']))
 
     def is_sshable_image(self, image_id):
-        _image = self.images_client.show_image(image_id)['image']
+        _image = self.compute_images_client.show_image(image_id)['image']
         return self._is_sshable_image(_image)
 
     def _is_flavor_enough(self, flavor, image):
         return image['minDisk'] <= flavor['disk']
 
     def is_flavor_enough(self, flavor_id, image_id):
-        _image = self.images_client.show_image(image_id)['image']
+        _image = self.compute_images_client.show_image(image_id)['image']
         _flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
         return self._is_flavor_enough(_flavor, _image)
 
@@ -72,8 +72,7 @@
 @misc.singleton
 class InputScenarioUtils(object):
 
-    """
-    Example usage:
+    """Example usage:
 
     import testscenarios
     (...)
@@ -109,7 +108,7 @@
             identity_version=CONF.identity.auth_version,
             network_resources=network_resources)
         os = clients.Manager(self.cred_provider.get_primary_creds())
-        self.images_client = os.images_client
+        self.compute_images_client = os.compute_images_client
         self.flavors_client = os.flavors_client
         self.image_pattern = CONF.input_scenario.image_regex
         self.flavor_pattern = CONF.input_scenario.flavor_regex
@@ -124,14 +123,12 @@
 
     @property
     def scenario_images(self):
-        """
-        :return: a scenario with name and uuid of images
-        """
+        """:return: a scenario with name and uuid of images"""
         if not CONF.service_available.glance:
             return []
         if not hasattr(self, '_scenario_images'):
             try:
-                images = self.images_client.list_images()['images']
+                images = self.compute_images_client.list_images()['images']
                 self._scenario_images = [
                     (self._normalize_name(i['name']), dict(image_ref=i['id']))
                     for i in images if re.search(self.image_pattern,
@@ -143,9 +140,7 @@
 
     @property
     def scenario_flavors(self):
-        """
-        :return: a scenario with name and uuid of flavors
-        """
+        """:return: a scenario with name and uuid of flavors"""
         if not hasattr(self, '_scenario_flavors'):
             try:
                 flavors = self.flavors_client.list_flavors()['flavors']
@@ -160,10 +155,11 @@
 
 
 def load_tests_input_scenario_utils(*args):
+    """Wrapper for testscenarios to set the scenarios
+
+    The purpose is to avoid running a getattr on the CONF object at import.
     """
-    Wrapper for testscenarios to set the scenarios to avoid running a getattr
-    on the CONF object at import.
-    """
+
     if getattr(args[0], 'suiteClass', None) is not None:
         loader, standard_tests, pattern = args
     else:
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index 2ac3fb2..d8cb99d 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -40,10 +40,7 @@
 
 
 class BaremetalClient(service_client.ServiceClient):
-    """
-    Base Tempest REST client for Ironic API.
-
-    """
+    """Base Tempest REST client for Ironic API."""
 
     uri_prefix = ''
 
@@ -58,12 +55,11 @@
         return json.loads(object_str)
 
     def _get_uri(self, resource_name, uuid=None, permanent=False):
-        """
-        Get URI for a specific resource or object.
+        """Get URI for a specific resource or object.
 
         :param resource_name: The name of the REST resource, e.g., 'nodes'.
         :param uuid: The unique identifier of an object in UUID format.
-        :return: Relative URI for the resource or object.
+        :returns: Relative URI for the resource or object.
 
         """
         prefix = self.uri_prefix if not permanent else ''
@@ -72,19 +68,18 @@
                                            res=resource_name,
                                            uuid='/%s' % uuid if uuid else '')
 
-    def _make_patch(self, allowed_attributes, **kw):
-        """
-        Create a JSON patch according to RFC 6902.
+    def _make_patch(self, allowed_attributes, **kwargs):
+        """Create a JSON patch according to RFC 6902.
 
         :param allowed_attributes: An iterable object that contains a set of
             allowed attributes for an object.
-        :param **kw: Attributes and new values for them.
-        :return: A JSON path that sets values of the specified attributes to
+        :param **kwargs: Attributes and new values for them.
+        :returns: A JSON path that sets values of the specified attributes to
             the new ones.
 
         """
-        def get_change(kw, path='/'):
-            for name, value in six.iteritems(kw):
+        def get_change(kwargs, path='/'):
+            for name, value in six.iteritems(kwargs):
                 if isinstance(value, dict):
                     for ch in get_change(value, path + '%s/' % name):
                         yield ch
@@ -97,18 +92,17 @@
                                'value': value,
                                'op': 'replace'}
 
-        patch = [ch for ch in get_change(kw)
+        patch = [ch for ch in get_change(kwargs)
                  if ch['path'].lstrip('/') in allowed_attributes]
 
         return patch
 
     def _list_request(self, resource, permanent=False, **kwargs):
-        """
-        Get the list of objects of the specified type.
+        """Get the list of objects of the specified type.
 
         :param resource: The name of the REST resource, e.g., 'nodes'.
-        "param **kw: Parameters for the request.
-        :return: A tuple with the server response and deserialized JSON list
+        :param **kwargs: Parameters for the request.
+        :returns: A tuple with the server response and deserialized JSON list
                  of objects
 
         """
@@ -122,11 +116,10 @@
         return resp, self.deserialize(body)
 
     def _show_request(self, resource, uuid, permanent=False, **kwargs):
-        """
-        Gets a specific object of the specified type.
+        """Gets a specific object of the specified type.
 
         :param uuid: Unique identifier of the object in UUID format.
-        :return: Serialized object as a dictionary.
+        :returns: Serialized object as a dictionary.
 
         """
         if 'uri' in kwargs:
@@ -139,13 +132,12 @@
         return resp, self.deserialize(body)
 
     def _create_request(self, resource, object_dict):
-        """
-        Create an object of the specified type.
+        """Create an object of the specified type.
 
         :param resource: The name of the REST resource, e.g., 'nodes'.
         :param object_dict: A Python dict that represents an object of the
                             specified type.
-        :return: A tuple with the server response and the deserialized created
+        :returns: A tuple with the server response and the deserialized created
                  object.
 
         """
@@ -158,12 +150,11 @@
         return resp, self.deserialize(body)
 
     def _delete_request(self, resource, uuid):
-        """
-        Delete specified object.
+        """Delete specified object.
 
         :param resource: The name of the REST resource, e.g., 'nodes'.
         :param uuid: The unique identifier of an object in UUID format.
-        :return: A tuple with the server response and the response body.
+        :returns: A tuple with the server response and the response body.
 
         """
         uri = self._get_uri(resource, uuid)
@@ -173,12 +164,11 @@
         return resp, body
 
     def _patch_request(self, resource, uuid, patch_object):
-        """
-        Update specified object with JSON-patch.
+        """Update specified object with JSON-patch.
 
         :param resource: The name of the REST resource, e.g., 'nodes'.
         :param uuid: The unique identifier of an object in UUID format.
-        :return: A tuple with the server response and the serialized patched
+        :returns: A tuple with the server response and the serialized patched
                  object.
 
         """
@@ -197,20 +187,16 @@
 
     @handle_errors
     def get_version_description(self, version='v1'):
-        """
-        Retrieves the desctription of the API.
+        """Retrieves the desctription of the API.
 
         :param version: The version of the API. Default: 'v1'.
-        :return: Serialized description of API resources.
+        :returns: Serialized description of API resources.
 
         """
         return self._list_request(version, permanent=True)
 
     def _put_request(self, resource, put_object):
-        """
-        Update specified object with JSON-patch.
-
-        """
+        """Update specified object with JSON-patch."""
         uri = self._get_uri(resource)
         put_body = json.dumps(put_object)
 
diff --git a/tempest/services/baremetal/v1/json/baremetal_client.py b/tempest/services/baremetal/v1/json/baremetal_client.py
index 479402a..f24ef68 100644
--- a/tempest/services/baremetal/v1/json/baremetal_client.py
+++ b/tempest/services/baremetal/v1/json/baremetal_client.py
@@ -14,9 +14,7 @@
 
 
 class BaremetalClient(base.BaremetalClient):
-    """
-    Base Tempest REST client for Ironic API v1.
-    """
+    """Base Tempest REST client for Ironic API v1."""
     version = '1'
     uri_prefix = 'v1'
 
@@ -62,8 +60,7 @@
 
     @base.handle_errors
     def show_node(self, uuid):
-        """
-        Gets a specific node.
+        """Gets a specific node.
 
         :param uuid: Unique identifier of the node in UUID format.
         :return: Serialized node as a dictionary.
@@ -73,8 +70,7 @@
 
     @base.handle_errors
     def show_node_by_instance_uuid(self, instance_uuid):
-        """
-        Gets a node associated with given instance uuid.
+        """Gets a node associated with given instance uuid.
 
         :param uuid: Unique identifier of the node in UUID format.
         :return: Serialized node as a dictionary.
@@ -88,8 +84,7 @@
 
     @base.handle_errors
     def show_chassis(self, uuid):
-        """
-        Gets a specific chassis.
+        """Gets a specific chassis.
 
         :param uuid: Unique identifier of the chassis in UUID format.
         :return: Serialized chassis as a dictionary.
@@ -99,8 +94,7 @@
 
     @base.handle_errors
     def show_port(self, uuid):
-        """
-        Gets a specific port.
+        """Gets a specific port.
 
         :param uuid: Unique identifier of the port in UUID format.
         :return: Serialized port as a dictionary.
@@ -110,8 +104,7 @@
 
     @base.handle_errors
     def show_port_by_address(self, address):
-        """
-        Gets a specific port by address.
+        """Gets a specific port by address.
 
         :param address: MAC address of the port.
         :return: Serialized port as a dictionary.
@@ -122,8 +115,7 @@
         return self._show_request('ports', uuid=None, uri=uri)
 
     def show_driver(self, driver_name):
-        """
-        Gets a specific driver.
+        """Gets a specific driver.
 
         :param driver_name: Name of driver.
         :return: Serialized driver as a dictionary.
@@ -132,8 +124,7 @@
 
     @base.handle_errors
     def create_node(self, chassis_id=None, **kwargs):
-        """
-        Create a baremetal node with the specified parameters.
+        """Create a baremetal node with the specified parameters.
 
         :param cpu_arch: CPU architecture of the node. Default: x86_64.
         :param cpus: Number of CPUs. Default: 8.
@@ -154,8 +145,7 @@
 
     @base.handle_errors
     def create_chassis(self, **kwargs):
-        """
-        Create a chassis with the specified parameters.
+        """Create a chassis with the specified parameters.
 
         :param description: The description of the chassis.
             Default: test-chassis
@@ -168,8 +158,7 @@
 
     @base.handle_errors
     def create_port(self, node_id, **kwargs):
-        """
-        Create a port with the specified parameters.
+        """Create a port with the specified parameters.
 
         :param node_id: The ID of the node which owns the port.
         :param address: MAC address of the port.
@@ -191,8 +180,7 @@
 
     @base.handle_errors
     def delete_node(self, uuid):
-        """
-        Deletes a node having the specified UUID.
+        """Deletes a node having the specified UUID.
 
         :param uuid: The unique identifier of the node.
         :return: A tuple with the server response and the response body.
@@ -202,8 +190,7 @@
 
     @base.handle_errors
     def delete_chassis(self, uuid):
-        """
-        Deletes a chassis having the specified UUID.
+        """Deletes a chassis having the specified UUID.
 
         :param uuid: The unique identifier of the chassis.
         :return: A tuple with the server response and the response body.
@@ -213,8 +200,7 @@
 
     @base.handle_errors
     def delete_port(self, uuid):
-        """
-        Deletes a port having the specified UUID.
+        """Deletes a port having the specified UUID.
 
         :param uuid: The unique identifier of the port.
         :return: A tuple with the server response and the response body.
@@ -224,8 +210,7 @@
 
     @base.handle_errors
     def update_node(self, uuid, **kwargs):
-        """
-        Update the specified node.
+        """Update the specified node.
 
         :param uuid: The unique identifier of the node.
         :return: A tuple with the server response and the updated node.
@@ -244,8 +229,7 @@
 
     @base.handle_errors
     def update_chassis(self, uuid, **kwargs):
-        """
-        Update the specified chassis.
+        """Update the specified chassis.
 
         :param uuid: The unique identifier of the chassis.
         :return: A tuple with the server response and the updated chassis.
@@ -258,8 +242,7 @@
 
     @base.handle_errors
     def update_port(self, uuid, patch):
-        """
-        Update the specified port.
+        """Update the specified port.
 
         :param uuid: The unique identifier of the port.
         :param patch: List of dicts representing json patches.
@@ -271,8 +254,7 @@
 
     @base.handle_errors
     def set_node_power_state(self, node_uuid, state):
-        """
-        Set power state of the specified node.
+        """Set power state of the specified node.
 
         :param node_uuid: The unique identifier of the node.
         :state: desired state to set (on/off/reboot).
@@ -284,8 +266,7 @@
 
     @base.handle_errors
     def validate_driver_interface(self, node_uuid):
-        """
-        Get all driver interfaces of a specific node.
+        """Get all driver interfaces of a specific node.
 
         :param uuid: Unique identifier of the node in UUID format.
 
@@ -300,8 +281,7 @@
 
     @base.handle_errors
     def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
-        """
-        Set the boot device of the specified node.
+        """Set the boot device of the specified node.
 
         :param node_uuid: The unique identifier of the node.
         :param boot_device: The boot device name.
@@ -318,8 +298,7 @@
 
     @base.handle_errors
     def get_node_boot_device(self, node_uuid):
-        """
-        Get the current boot device of the specified node.
+        """Get the current boot device of the specified node.
 
         :param node_uuid: The unique identifier of the node.
 
@@ -331,8 +310,7 @@
 
     @base.handle_errors
     def get_node_supported_boot_devices(self, node_uuid):
-        """
-        Get the supported boot devices of the specified node.
+        """Get the supported boot devices of the specified node.
 
         :param node_uuid: The unique identifier of the node.
 
@@ -344,8 +322,7 @@
 
     @base.handle_errors
     def get_console(self, node_uuid):
-        """
-        Get connection information about the console.
+        """Get connection information about the console.
 
         :param node_uuid: Unique identifier of the node in UUID format.
 
@@ -357,8 +334,7 @@
 
     @base.handle_errors
     def set_console_mode(self, node_uuid, enabled):
-        """
-        Start and stop the node console.
+        """Start and stop the node console.
 
         :param node_uuid: Unique identifier of the node in UUID format.
         :param enabled: Boolean value; whether to enable or disable the
diff --git a/tempest/services/base_microversion_client.py b/tempest/services/base_microversion_client.py
new file mode 100644
index 0000000..4c750f5
--- /dev/null
+++ b/tempest/services/base_microversion_client.py
@@ -0,0 +1,54 @@
+# Copyright 2016 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest_lib.common import rest_client
+
+
+class BaseMicroversionClient(rest_client.RestClient):
+    """Base class to support microversion in service clients
+
+    This class is used to support microversion in service clients.
+    This provides feature to make API request with microversion.
+    Service clients derived from this class will be able to send API
+    request to server with or without microversion.
+    If api_microversion is not set on service client then API request will be
+    normal request without microversion.
+
+    """
+    def __init__(self, auth_provider, service, region,
+                 api_microversion_header_name, **kwargs):
+        """Base Microversion Client __init__
+
+        :param auth_provider: an auth provider object used to wrap requests in
+                              auth
+        :param str service: The service name to use for the catalog lookup
+        :param str region: The region to use for the catalog lookup
+        :param str api_microversion_header_name: The microversion header name
+                                                 to use for sending API
+                                                 request with microversion
+        :param kwargs: kwargs required by rest_client.RestClient
+        """
+        super(BaseMicroversionClient, self).__init__(
+            auth_provider, service, region, **kwargs)
+        self.api_microversion_header_name = api_microversion_header_name
+        self.api_microversion = None
+
+    def get_headers(self):
+        headers = super(BaseMicroversionClient, self).get_headers()
+        if self.api_microversion:
+            headers[self.api_microversion_header_name] = self.api_microversion
+        return headers
+
+    def set_api_microversion(self, microversion):
+        self.api_microversion = microversion
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
deleted file mode 100644
index bedb9ec..0000000
--- a/tempest/services/botoclients.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import types
-
-import boto
-import boto.ec2
-import boto.s3.connection
-from six.moves import configparser as ConfigParser
-from six.moves.urllib import parse as urlparse
-from tempest_lib import exceptions as lib_exc
-
-from tempest import config
-
-CONF = config.CONF
-
-
-class BotoClientBase(object):
-
-    ALLOWED_METHODS = set()
-
-    def __init__(self, identity_client):
-        self.identity_client = identity_client
-
-        self.ca_cert = CONF.identity.ca_certificates_file
-        self.connection_timeout = str(CONF.boto.http_socket_timeout)
-        self.num_retries = str(CONF.boto.num_retries)
-        self.build_timeout = CONF.boto.build_timeout
-
-        self.connection_data = {}
-
-    def _config_boto_timeout(self, timeout, retries):
-        try:
-            boto.config.add_section("Boto")
-        except ConfigParser.DuplicateSectionError:
-            pass
-        boto.config.set("Boto", "http_socket_timeout", timeout)
-        boto.config.set("Boto", "num_retries", retries)
-
-    def _config_boto_ca_certificates_file(self, ca_cert):
-        if ca_cert is None:
-            return
-
-        try:
-            boto.config.add_section("Boto")
-        except ConfigParser.DuplicateSectionError:
-            pass
-        boto.config.set("Boto", "ca_certificates_file", ca_cert)
-
-    def __getattr__(self, name):
-        """Automatically creates methods for the allowed methods set."""
-        if name in self.ALLOWED_METHODS:
-            def func(self, *args, **kwargs):
-                with contextlib.closing(self.get_connection()) as conn:
-                    return getattr(conn, name)(*args, **kwargs)
-
-            func.__name__ = name
-            setattr(self, name, types.MethodType(func, self, self.__class__))
-            setattr(self.__class__, name,
-                    types.MethodType(func, None, self.__class__))
-            return getattr(self, name)
-        else:
-            raise AttributeError(name)
-
-    def get_connection(self):
-        self._config_boto_timeout(self.connection_timeout, self.num_retries)
-        self._config_boto_ca_certificates_file(self.ca_cert)
-
-        ec2_client_args = {'aws_access_key_id': CONF.boto.aws_access,
-                           'aws_secret_access_key': CONF.boto.aws_secret}
-        if not all(ec2_client_args.values()):
-            ec2_client_args = self.get_aws_credentials(self.identity_client)
-
-        self.connection_data.update(ec2_client_args)
-        return self.connect_method(**self.connection_data)
-
-    def get_aws_credentials(self, identity_client):
-        """
-        Obtain existing, or create new AWS credentials
-        :param identity_client: identity client with embedded credentials
-        :return: EC2 credentials
-        """
-        ec2_cred_list = identity_client.list_user_ec2_credentials(
-            identity_client.user_id)['credentials']
-        for cred in ec2_cred_list:
-            if cred['tenant_id'] == identity_client.tenant_id:
-                ec2_cred = cred
-                break
-        else:
-            ec2_cred = (identity_client.create_user_ec2_credentials(
-                identity_client.user_id, identity_client.tenant_id)
-                ['credential'])
-        if not all((ec2_cred, ec2_cred['access'], ec2_cred['secret'])):
-            raise lib_exc.NotFound("Unable to get access and secret keys")
-        else:
-            ec2_cred_aws = {}
-            ec2_cred_aws['aws_access_key_id'] = ec2_cred['access']
-            ec2_cred_aws['aws_secret_access_key'] = ec2_cred['secret']
-        return ec2_cred_aws
-
-
-class APIClientEC2(BotoClientBase):
-
-    def connect_method(self, *args, **kwargs):
-        return boto.connect_ec2(*args, **kwargs)
-
-    def __init__(self, identity_client):
-        super(APIClientEC2, self).__init__(identity_client)
-        insecure_ssl = CONF.identity.disable_ssl_certificate_validation
-        purl = urlparse.urlparse(CONF.boto.ec2_url)
-
-        region_name = CONF.compute.region
-        if not region_name:
-            region_name = CONF.identity.region
-        region = boto.ec2.regioninfo.RegionInfo(name=region_name,
-                                                endpoint=purl.hostname)
-        port = purl.port
-        if port is None:
-            if purl.scheme is not "https":
-                port = 80
-            else:
-                port = 443
-        else:
-            port = int(port)
-        self.connection_data.update({"is_secure": purl.scheme == "https",
-                                     "validate_certs": not insecure_ssl,
-                                     "region": region,
-                                     "host": purl.hostname,
-                                     "port": port,
-                                     "path": purl.path})
-
-    ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
-                           'delete_key_pair', 'import_key_pair',
-                           'get_all_key_pairs',
-                           'get_all_tags',
-                           'create_image', 'get_image',
-                           'register_image', 'deregister_image',
-                           'get_all_images', 'get_image_attribute',
-                           'modify_image_attribute', 'reset_image_attribute',
-                           'get_all_kernels',
-                           'create_volume', 'delete_volume',
-                           'get_all_volume_status', 'get_all_volumes',
-                           'get_volume_attribute', 'modify_volume_attribute'
-                           'bundle_instance', 'cancel_spot_instance_requests',
-                           'confirm_product_instanc',
-                           'get_all_instance_status', 'get_all_instances',
-                           'get_all_reserved_instances',
-                           'get_all_spot_instance_requests',
-                           'get_instance_attribute', 'monitor_instance',
-                           'monitor_instances', 'unmonitor_instance',
-                           'unmonitor_instances',
-                           'purchase_reserved_instance_offering',
-                           'reboot_instances', 'request_spot_instances',
-                           'reset_instance_attribute', 'run_instances',
-                           'start_instances', 'stop_instances',
-                           'terminate_instances',
-                           'attach_network_interface', 'attach_volume',
-                           'detach_network_interface', 'detach_volume',
-                           'get_console_output',
-                           'delete_network_interface', 'create_subnet',
-                           'create_network_interface', 'delete_subnet',
-                           'get_all_network_interfaces',
-                           'allocate_address', 'associate_address',
-                           'disassociate_address', 'get_all_addresses',
-                           'release_address',
-                           'create_snapshot', 'delete_snapshot',
-                           'get_all_snapshots', 'get_snapshot_attribute',
-                           'modify_snapshot_attribute',
-                           'reset_snapshot_attribute', 'trim_snapshots',
-                           'get_all_regions', 'get_all_zones',
-                           'get_all_security_groups', 'create_security_group',
-                           'delete_security_group', 'authorize_security_group',
-                           'authorize_security_group_egress',
-                           'revoke_security_group',
-                           'revoke_security_group_egress'))
-
-
-class ObjectClientS3(BotoClientBase):
-
-    def connect_method(self, *args, **kwargs):
-        return boto.connect_s3(*args, **kwargs)
-
-    def __init__(self, identity_client):
-        super(ObjectClientS3, self).__init__(identity_client)
-        insecure_ssl = CONF.identity.disable_ssl_certificate_validation
-        purl = urlparse.urlparse(CONF.boto.s3_url)
-        port = purl.port
-        if port is None:
-            if purl.scheme is not "https":
-                port = 80
-            else:
-                port = 443
-        else:
-            port = int(port)
-        self.connection_data.update({"is_secure": purl.scheme == "https",
-                                     "validate_certs": not insecure_ssl,
-                                     "host": purl.hostname,
-                                     "port": port,
-                                     "calling_format": boto.s3.connection.
-                                     OrdinaryCallingFormat()})
-
-    ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
-                           'get_all_buckets', 'get_bucket', 'delete_key',
-                           'lookup'))
diff --git a/tempest/services/compute/json/base_compute_client.py b/tempest/services/compute/json/base_compute_client.py
new file mode 100644
index 0000000..5349af6
--- /dev/null
+++ b/tempest/services/compute/json/base_compute_client.py
@@ -0,0 +1,72 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import api_version_request
+from tempest.common import api_version_utils
+from tempest import exceptions
+from tempest.services import base_microversion_client
+
+
+class BaseComputeClient(base_microversion_client.BaseMicroversionClient):
+
+    def __init__(self, auth_provider, service, region,
+                 api_microversion_header_name='X-OpenStack-Nova-API-Version',
+                 **kwargs):
+        super(BaseComputeClient, self).__init__(
+            auth_provider, service, region,
+            api_microversion_header_name, **kwargs)
+
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
+        resp, resp_body = super(BaseComputeClient, self).request(
+            method, url, extra_headers, headers, body)
+        if self.api_microversion and self.api_microversion != 'latest':
+            api_version_utils.assert_version_header_matches_request(
+                self.api_microversion_header_name,
+                self.api_microversion,
+                resp)
+        return resp, resp_body
+
+    def get_schema(self, schema_versions_info):
+        """Get JSON schema
+
+        This method provides the matching schema for requested
+        microversion (self.api_microversion).
+        :param schema_versions_info: List of dict which provides schema
+        information with range of valid versions.
+        Example -
+        schema_versions_info = [
+            {'min': None, 'max': '2.1', 'schema': schemav21},
+            {'min': '2.2', 'max': '2.9', 'schema': schemav22},
+            {'min': '2.10', 'max': None, 'schema': schemav210}]
+        """
+        schema = None
+        version = api_version_request.APIVersionRequest(self.api_microversion)
+        for items in schema_versions_info:
+            min_version = api_version_request.APIVersionRequest(items['min'])
+            max_version = api_version_request.APIVersionRequest(items['max'])
+            # This is case where self.api_microversion is None, which means
+            # request without microversion So select base v2.1 schema.
+            if version.is_null() and items['min'] is None:
+                schema = items['schema']
+                break
+            # else select appropriate schema as per self.api_microversion
+            elif version.matches(min_version, max_version):
+                schema = items['schema']
+                break
+        if schema is None:
+            raise exceptions.JSONSchemaNotFound(
+                version=version.get_string(),
+                schema_versions_info=schema_versions_info)
+        return schema
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
deleted file mode 100644
index 69d06a3..0000000
--- a/tempest/services/compute/json/floating_ips_client.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-from tempest_lib import exceptions as lib_exc
-
-from tempest.api_schema.response.compute.v2_1 import floating_ips as schema
-from tempest.common import service_client
-
-
-class FloatingIPsClient(service_client.ServiceClient):
-
-    def list_floating_ips(self, **params):
-        """Returns a list of all floating IPs filtered by any parameters."""
-        url = 'os-floating-ips'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_floating_ips, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_floating_ip(self, floating_ip_id):
-        """Get the details of a floating IP."""
-        url = "os-floating-ips/%s" % floating_ip_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_floating_ip(self, pool_name=None):
-        """Allocate a floating IP to the project."""
-        url = 'os-floating-ips'
-        post_body = {'pool': pool_name}
-        post_body = json.dumps(post_body)
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_floating_ip(self, floating_ip_id):
-        """Deletes the provided floating IP from the project."""
-        url = "os-floating-ips/%s" % floating_ip_id
-        resp, body = self.delete(url)
-        self.validate_response(schema.add_remove_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def associate_floating_ip_to_server(self, floating_ip, server_id):
-        """Associate the provided floating IP to a specific server."""
-        url = "servers/%s/action" % server_id
-        post_body = {
-            'addFloatingIp': {
-                'address': floating_ip,
-            }
-        }
-
-        post_body = json.dumps(post_body)
-        resp, body = self.post(url, post_body)
-        self.validate_response(schema.add_remove_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def disassociate_floating_ip_from_server(self, floating_ip, server_id):
-        """Disassociate the provided floating IP from a specific server."""
-        url = "servers/%s/action" % server_id
-        post_body = {
-            'removeFloatingIp': {
-                'address': floating_ip,
-            }
-        }
-
-        post_body = json.dumps(post_body)
-        resp, body = self.post(url, post_body)
-        self.validate_response(schema.add_remove_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_floating_ip(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'floating_ip'
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
deleted file mode 100644
index 99fdfe6..0000000
--- a/tempest/services/compute/json/images_client.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-from tempest_lib import exceptions as lib_exc
-
-from tempest.api_schema.response.compute.v2_1 import images as schema
-from tempest.common import service_client
-
-
-class ImagesClient(service_client.ServiceClient):
-
-    def create_image(self, server_id, **kwargs):
-        """Creates an image of the original server."""
-
-        post_body = {'createImage': kwargs}
-        post_body = json.dumps(post_body)
-        resp, body = self.post('servers/%s/action' % server_id,
-                               post_body)
-        self.validate_response(schema.create_image, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_images(self, detail=False, **params):
-        """Returns a list of all images filtered by any parameters."""
-        url = 'images'
-        _schema = schema.list_images
-        if detail:
-            url += '/detail'
-            _schema = schema.list_images_details
-
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(_schema, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_image(self, image_id):
-        """Returns the details of a single image."""
-        resp, body = self.get("images/%s" % image_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        self.validate_response(schema.get_image, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_image(self, image_id):
-        """Deletes the provided image."""
-        resp, body = self.delete("images/%s" % image_id)
-        self.validate_response(schema.delete, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_image_metadata(self, image_id):
-        """Lists all metadata items for an image."""
-        resp, body = self.get("images/%s/metadata" % image_id)
-        body = json.loads(body)
-        self.validate_response(schema.image_metadata, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def set_image_metadata(self, image_id, meta):
-        """Sets the metadata for an image."""
-        post_body = json.dumps({'metadata': meta})
-        resp, body = self.put('images/%s/metadata' % image_id, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.image_metadata, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_image_metadata(self, image_id, meta):
-        """Updates the metadata for an image."""
-        post_body = json.dumps({'metadata': meta})
-        resp, body = self.post('images/%s/metadata' % image_id, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.image_metadata, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_image_metadata_item(self, image_id, key):
-        """Returns the value for a specific image metadata key."""
-        resp, body = self.get("images/%s/metadata/%s" % (image_id, key))
-        body = json.loads(body)
-        self.validate_response(schema.image_meta_item, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def set_image_metadata_item(self, image_id, key, meta):
-        """Sets the value for a specific image metadata key."""
-        post_body = json.dumps({'meta': meta})
-        resp, body = self.put('images/%s/metadata/%s' % (image_id, key),
-                              post_body)
-        body = json.loads(body)
-        self.validate_response(schema.image_meta_item, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_image_metadata_item(self, image_id, key):
-        """Deletes a single image metadata key/value pair."""
-        resp, body = self.delete("images/%s/metadata/%s" %
-                                 (image_id, key))
-        self.validate_response(schema.delete, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_image(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'image'
diff --git a/tempest/services/compute/json/instance_usage_audit_log_client.py b/tempest/services/compute/json/instance_usage_audit_log_client.py
deleted file mode 100644
index 4d9625e..0000000
--- a/tempest/services/compute/json/instance_usage_audit_log_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2013 IBM Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import \
-    instance_usage_audit_logs as schema
-from tempest.common import service_client
-
-
-class InstanceUsagesAuditLogClient(service_client.ServiceClient):
-
-    def list_instance_usage_audit_logs(self):
-        url = 'os-instance_usage_audit_log'
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_instance_usage_audit_log,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_instance_usage_audit_log(self, time_before):
-        url = 'os-instance_usage_audit_log/%s' % time_before
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.get_instance_usage_audit_log, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
deleted file mode 100644
index fe076d8..0000000
--- a/tempest/services/compute/json/interfaces_client.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import interfaces as schema
-from tempest.common import service_client
-
-
-class InterfacesClient(service_client.ServiceClient):
-
-    def list_interfaces(self, server_id):
-        resp, body = self.get('servers/%s/os-interface' % server_id)
-        body = json.loads(body)
-        self.validate_response(schema.list_interfaces, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_interface(self, server_id, **kwargs):
-        post_body = {'interfaceAttachment': kwargs}
-        post_body = json.dumps(post_body)
-        resp, body = self.post('servers/%s/os-interface' % server_id,
-                               body=post_body)
-        body = json.loads(body)
-        self.validate_response(schema.get_create_interfaces, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_interface(self, server_id, port_id):
-        resp, body = self.get('servers/%s/os-interface/%s' % (server_id,
-                                                              port_id))
-        body = json.loads(body)
-        self.validate_response(schema.get_create_interfaces, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_interface(self, server_id, port_id):
-        resp, body = self.delete('servers/%s/os-interface/%s' % (server_id,
-                                                                 port_id))
-        self.validate_response(schema.delete_interface, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/keypairs_client.py b/tempest/services/compute/json/keypairs_client.py
index 2e22bc6..ec9b1e0 100644
--- a/tempest/services/compute/json/keypairs_client.py
+++ b/tempest/services/compute/json/keypairs_client.py
@@ -15,21 +15,28 @@
 
 from oslo_serialization import jsonutils as json
 
-from tempest.api_schema.response.compute.v2_1 import keypairs as schema
+from tempest.api_schema.response.compute.v2_1 import keypairs as schemav21
+from tempest.api_schema.response.compute.v2_2 import keypairs as schemav22
 from tempest.common import service_client
+from tempest.services.compute.json import base_compute_client
 
 
-class KeyPairsClient(service_client.ServiceClient):
+class KeyPairsClient(base_compute_client.BaseComputeClient):
+
+    schema_versions_info = [{'min': None, 'max': '2.1', 'schema': schemav21},
+                            {'min': '2.2', 'max': None, 'schema': schemav22}]
 
     def list_keypairs(self):
         resp, body = self.get("os-keypairs")
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.list_keypairs, resp, body)
         return service_client.ResponseBody(resp, body)
 
     def show_keypair(self, keypair_name):
         resp, body = self.get("os-keypairs/%s" % keypair_name)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.get_keypair, resp, body)
         return service_client.ResponseBody(resp, body)
 
@@ -37,10 +44,12 @@
         post_body = json.dumps({'keypair': kwargs})
         resp, body = self.post("os-keypairs", body=post_body)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.create_keypair, resp, body)
         return service_client.ResponseBody(resp, body)
 
     def delete_keypair(self, keypair_name):
         resp, body = self.delete("os-keypairs/%s" % keypair_name)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.delete_keypair, resp, body)
         return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/limits_client.py b/tempest/services/compute/json/limits_client.py
deleted file mode 100644
index b64b4a5..0000000
--- a/tempest/services/compute/json/limits_client.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import limits as schema
-from tempest.common import service_client
-
-
-class LimitsClient(service_client.ServiceClient):
-
-    def show_limits(self):
-        resp, body = self.get("limits")
-        body = json.loads(body)
-        self.validate_response(schema.get_limit, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/migrations_client.py b/tempest/services/compute/json/migrations_client.py
deleted file mode 100644
index b302539..0000000
--- a/tempest/services/compute/json/migrations_client.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2014 NEC Corporation.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.api_schema.response.compute.v2_1 import migrations as schema
-from tempest.common import service_client
-
-
-class MigrationsClient(service_client.ServiceClient):
-
-    def list_migrations(self, **params):
-        """Lists all migrations."""
-
-        url = 'os-migrations'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_migrations, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/networks_client.py b/tempest/services/compute/json/networks_client.py
deleted file mode 100644
index dd20ee5..0000000
--- a/tempest/services/compute/json/networks_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.common import service_client
-
-
-class NetworksClient(service_client.ServiceClient):
-
-    def list_networks(self):
-        resp, body = self.get("os-networks")
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def show_network(self, network_id):
-        resp, body = self.get("os-networks/%s" % network_id)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/quota_classes_client.py b/tempest/services/compute/json/quota_classes_client.py
deleted file mode 100644
index d55c3f1..0000000
--- a/tempest/services/compute/json/quota_classes_client.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2012 NTT Data
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1\
-    import quota_classes as classes_schema
-from tempest.common import service_client
-
-
-class QuotaClassesClient(service_client.ServiceClient):
-
-    def show_quota_class_set(self, quota_class_id):
-        """List the quota class set for a quota class."""
-
-        url = 'os-quota-class-sets/%s' % quota_class_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(classes_schema.get_quota_class_set, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_quota_class_set(self, quota_class_id, **kwargs):
-        """
-        Updates the quota class's limits for one or more resources.
-        """
-        post_body = json.dumps({'quota_class_set': kwargs})
-
-        resp, body = self.put('os-quota-class-sets/%s' % quota_class_id,
-                              post_body)
-
-        body = json.loads(body)
-        self.validate_response(classes_schema.update_quota_class_set,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
deleted file mode 100644
index 4a1b909..0000000
--- a/tempest/services/compute/json/quotas_client.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2012 NTT Data
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import quotas as schema
-from tempest.common import service_client
-
-
-class QuotasClient(service_client.ServiceClient):
-
-    def show_quota_set(self, tenant_id, user_id=None):
-        """List the quota set for a tenant."""
-
-        url = 'os-quota-sets/%s' % tenant_id
-        if user_id:
-            url += '?user_id=%s' % user_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.get_quota_set, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_default_quota_set(self, tenant_id):
-        """List the default quota set for a tenant."""
-
-        url = 'os-quota-sets/%s/defaults' % tenant_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.get_quota_set, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_quota_set(self, tenant_id, user_id=None, **kwargs):
-        """
-        Updates the tenant's quota limits for one or more resources
-        """
-        post_body = json.dumps({'quota_set': kwargs})
-
-        if user_id:
-            resp, body = self.put('os-quota-sets/%s?user_id=%s' %
-                                  (tenant_id, user_id), post_body)
-        else:
-            resp, body = self.put('os-quota-sets/%s' % tenant_id,
-                                  post_body)
-
-        body = json.loads(body)
-        self.validate_response(schema.update_quota_set, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_quota_set(self, tenant_id):
-        """Delete the tenant's quota set."""
-        resp, body = self.delete('os-quota-sets/%s' % tenant_id)
-        self.validate_response(schema.delete_quota, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/security_group_default_rules_client.py b/tempest/services/compute/json/security_group_default_rules_client.py
deleted file mode 100644
index 6e4d1e4..0000000
--- a/tempest/services/compute/json/security_group_default_rules_client.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2014 NEC Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import \
-    security_group_default_rule as schema
-from tempest.common import service_client
-
-
-class SecurityGroupDefaultRulesClient(service_client.ServiceClient):
-
-    def create_security_default_group_rule(self, **kwargs):
-        """
-        Creating security group default rules.
-        ip_protocol : ip_protocol (icmp, tcp, udp).
-        from_port: Port at start of range.
-        to_port  : Port at end of range.
-        cidr     : CIDR for address range.
-        """
-        post_body = json.dumps({'security_group_default_rule': kwargs})
-        url = 'os-security-group-default-rules'
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_security_group_default_rule,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_security_group_default_rule(self,
-                                           security_group_default_rule_id):
-        """Deletes the provided Security Group default rule."""
-        resp, body = self.delete('os-security-group-default-rules/%s' % (
-            security_group_default_rule_id))
-        self.validate_response(schema.delete_security_group_default_rule,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_security_group_default_rules(self):
-        """List all Security Group default rules."""
-        resp, body = self.get('os-security-group-default-rules')
-        body = json.loads(body)
-        self.validate_response(schema.list_security_group_default_rules,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_security_group_default_rule(self, security_group_default_rule_id):
-        """Return the details of provided Security Group default rule."""
-        resp, body = self.get('os-security-group-default-rules/%s' %
-                              security_group_default_rule_id)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_security_group_default_rule,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/security_group_rules_client.py b/tempest/services/compute/json/security_group_rules_client.py
deleted file mode 100644
index 9626f60..0000000
--- a/tempest/services/compute/json/security_group_rules_client.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import security_groups as schema
-from tempest.common import service_client
-
-
-class SecurityGroupRulesClient(service_client.ServiceClient):
-
-    def create_security_group_rule(self, **kwargs):
-        """
-        Creating a new security group rules.
-        parent_group_id :ID of Security group
-        ip_protocol : ip_proto (icmp, tcp, udp).
-        from_port: Port at start of range.
-        to_port  : Port at end of range.
-        Following optional keyword arguments are accepted:
-        cidr     : CIDR for address range.
-        group_id : ID of the Source group
-        """
-        post_body = json.dumps({'security_group_rule': kwargs})
-        url = 'os-security-group-rules'
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.create_security_group_rule, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_security_group_rule(self, group_rule_id):
-        """Deletes the provided Security Group rule."""
-        resp, body = self.delete('os-security-group-rules/%s' %
-                                 group_rule_id)
-        self.validate_response(schema.delete_security_group_rule, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
deleted file mode 100644
index 083d03a..0000000
--- a/tempest/services/compute/json/security_groups_client.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-from tempest_lib import exceptions as lib_exc
-
-from tempest.api_schema.response.compute.v2_1 import security_groups as schema
-from tempest.common import service_client
-
-
-class SecurityGroupsClient(service_client.ServiceClient):
-
-    def list_security_groups(self, **params):
-        """List all security groups for a user."""
-
-        url = 'os-security-groups'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_security_groups, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_security_group(self, security_group_id):
-        """Get the details of a Security Group."""
-        url = "os-security-groups/%s" % security_group_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.get_security_group, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_security_group(self, **kwargs):
-        """
-        Creates a new security group.
-        name (Required): Name of security group.
-        description (Required): Description of security group.
-        """
-        post_body = json.dumps({'security_group': kwargs})
-        resp, body = self.post('os-security-groups', post_body)
-        body = json.loads(body)
-        self.validate_response(schema.get_security_group, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_security_group(self, security_group_id, **kwargs):
-        """
-        Update a security group.
-        security_group_id: a security_group to update
-        name: new name of security group
-        description: new description of security group
-        """
-        post_body = json.dumps({'security_group': kwargs})
-        resp, body = self.put('os-security-groups/%s' % security_group_id,
-                              post_body)
-        body = json.loads(body)
-        self.validate_response(schema.update_security_group, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_security_group(self, security_group_id):
-        """Deletes the provided Security Group."""
-        resp, body = self.delete(
-            'os-security-groups/%s' % security_group_id)
-        self.validate_response(schema.delete_security_group, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_security_group(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'security_group'
diff --git a/tempest/services/compute/json/server_groups_client.py b/tempest/services/compute/json/server_groups_client.py
deleted file mode 100644
index 62258d3..0000000
--- a/tempest/services/compute/json/server_groups_client.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import servers as schema
-from tempest.common import service_client
-
-
-class ServerGroupsClient(service_client.ServiceClient):
-
-    def create_server_group(self, **kwargs):
-        """
-        Create the server group
-        name : Name of the server-group
-        policies : List of the policies - affinity/anti-affinity)
-        """
-        post_body = json.dumps({'server_group': kwargs})
-        resp, body = self.post('os-server-groups', post_body)
-
-        body = json.loads(body)
-        self.validate_response(schema.create_get_server_group, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_server_group(self, server_group_id):
-        """Delete the given server-group."""
-        resp, body = self.delete("os-server-groups/%s" % server_group_id)
-        self.validate_response(schema.delete_server_group, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_server_groups(self):
-        """List the server-groups."""
-        resp, body = self.get("os-server-groups")
-        body = json.loads(body)
-        self.validate_response(schema.list_server_groups, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_server_group(self, server_group_id):
-        """Get the details of given server_group."""
-        resp, body = self.get("os-server-groups/%s" % server_group_id)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_server_group, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
deleted file mode 100644
index e54cfe4..0000000
--- a/tempest/services/compute/json/servers_client.py
+++ /dev/null
@@ -1,439 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.api_schema.response.compute.v2_1 import servers as schema
-from tempest.common import service_client
-
-
-class ServersClient(service_client.ServiceClient):
-
-    def __init__(self, auth_provider, service, region,
-                 enable_instance_password=True, **kwargs):
-        super(ServersClient, self).__init__(
-            auth_provider, service, region, **kwargs)
-        self.enable_instance_password = enable_instance_password
-
-    def create_server(self, **kwargs):
-        """Create server
-
-        Most parameters except the following are passed to the API without
-        any changes.
-        :param disk_config: The name is changed to OS-DCF:diskConfig
-        :param scheduler_hints: The name is changed to os:scheduler_hints and
-        the parameter is set in the same level as the parameter 'server'.
-        """
-        body = copy.deepcopy(kwargs)
-        if body.get('disk_config'):
-            body['OS-DCF:diskConfig'] = body.pop('disk_config')
-
-        hints = None
-        if body.get('scheduler_hints'):
-            hints = {'os:scheduler_hints': body.pop('scheduler_hints')}
-
-        post_body = {'server': body}
-
-        if hints:
-            post_body = dict(post_body.items() + hints.items())
-
-        post_body = json.dumps(post_body)
-        resp, body = self.post('servers', post_body)
-
-        body = json.loads(body)
-        # NOTE(maurosr): this deals with the case of multiple server create
-        # with return reservation id set True
-        if 'reservation_id' in body:
-            return service_client.ResponseBody(resp, body)
-        if self.enable_instance_password:
-            create_schema = schema.create_server_with_admin_pass
-        else:
-            create_schema = schema.create_server
-        self.validate_response(create_schema, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_server(self, server_id, **kwargs):
-        """Update server
-
-        Most parameters except the following are passed to the API without
-        any changes.
-        :param disk_config: The name is changed to OS-DCF:diskConfig
-        """
-        if kwargs.get('disk_config'):
-            kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
-
-        post_body = json.dumps({'server': kwargs})
-        resp, body = self.put("servers/%s" % server_id, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.update_server, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_server(self, server_id):
-        """Get server details"""
-        resp, body = self.get("servers/%s" % server_id)
-        body = json.loads(body)
-        self.validate_response(schema.get_server, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_server(self, server_id):
-        """Delete server"""
-        resp, body = self.delete("servers/%s" % server_id)
-        self.validate_response(schema.delete_server, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_servers(self, detail=False, **params):
-        """List servers"""
-
-        url = 'servers'
-        _schema = schema.list_servers
-
-        if detail:
-            url += '/detail'
-            _schema = schema.list_servers_detail
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(_schema, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_addresses(self, server_id):
-        """Lists all addresses for a server."""
-        resp, body = self.get("servers/%s/ips" % server_id)
-        body = json.loads(body)
-        self.validate_response(schema.list_addresses, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_addresses_by_network(self, server_id, network_id):
-        """Lists all addresses of a specific network type for a server."""
-        resp, body = self.get("servers/%s/ips/%s" %
-                              (server_id, network_id))
-        body = json.loads(body)
-        self.validate_response(schema.list_addresses_by_network, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def action(self, server_id, action_name,
-               schema=schema.server_actions_common_schema,
-               **kwargs):
-        post_body = json.dumps({action_name: kwargs})
-        resp, body = self.post('servers/%s/action' % server_id,
-                               post_body)
-        if body:
-            body = json.loads(body)
-        self.validate_response(schema, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_backup(self, server_id, backup_type, rotation, name):
-        """Backup a server instance."""
-        return self.action(server_id, "createBackup",
-                           backup_type=backup_type,
-                           rotation=rotation,
-                           name=name)
-
-    def change_password(self, server_id, adminPass):
-        """Changes the root password for the server."""
-        return self.action(server_id, 'changePassword',
-                           adminPass=adminPass)
-
-    def get_password(self, server_id):
-        resp, body = self.get("servers/%s/os-server-password" %
-                              server_id)
-        body = json.loads(body)
-        self.validate_response(schema.get_password, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_password(self, server_id):
-        """
-        Removes the encrypted server password from the metadata server
-        Note that this does not actually change the instance server
-        password.
-        """
-        resp, body = self.delete("servers/%s/os-server-password" %
-                                 server_id)
-        self.validate_response(schema.server_actions_delete_password,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def reboot_server(self, server_id, reboot_type):
-        """Reboots a server."""
-        return self.action(server_id, 'reboot', type=reboot_type)
-
-    def rebuild_server(self, server_id, image_ref, **kwargs):
-        """Rebuilds a server with a new image.
-        Most parameters except the following are passed to the API without
-        any changes.
-        :param disk_config: The name is changed to OS-DCF:diskConfig
-        """
-        kwargs['imageRef'] = image_ref
-        if 'disk_config' in kwargs:
-            kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
-        if self.enable_instance_password:
-            rebuild_schema = schema.rebuild_server_with_admin_pass
-        else:
-            rebuild_schema = schema.rebuild_server
-        return self.action(server_id, 'rebuild',
-                           rebuild_schema, **kwargs)
-
-    def resize_server(self, server_id, flavor_ref, **kwargs):
-        """Changes the flavor of a server.
-        Most parameters except the following are passed to the API without
-        any changes.
-        :param disk_config: The name is changed to OS-DCF:diskConfig
-        """
-        kwargs['flavorRef'] = flavor_ref
-        if 'disk_config' in kwargs:
-            kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
-        return self.action(server_id, 'resize', **kwargs)
-
-    def confirm_resize_server(self, server_id, **kwargs):
-        """Confirms the flavor change for a server."""
-        return self.action(server_id, 'confirmResize',
-                           schema.server_actions_confirm_resize,
-                           **kwargs)
-
-    def revert_resize_server(self, server_id, **kwargs):
-        """Reverts a server back to its original flavor."""
-        return self.action(server_id, 'revertResize', **kwargs)
-
-    def list_server_metadata(self, server_id):
-        resp, body = self.get("servers/%s/metadata" % server_id)
-        body = json.loads(body)
-        self.validate_response(schema.list_server_metadata, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def set_server_metadata(self, server_id, meta, no_metadata_field=False):
-        if no_metadata_field:
-            post_body = ""
-        else:
-            post_body = json.dumps({'metadata': meta})
-        resp, body = self.put('servers/%s/metadata' % server_id,
-                              post_body)
-        body = json.loads(body)
-        self.validate_response(schema.set_server_metadata, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_server_metadata(self, server_id, meta):
-        post_body = json.dumps({'metadata': meta})
-        resp, body = self.post('servers/%s/metadata' % server_id,
-                               post_body)
-        body = json.loads(body)
-        self.validate_response(schema.update_server_metadata,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_server_metadata_item(self, server_id, key):
-        resp, body = self.get("servers/%s/metadata/%s" % (server_id, key))
-        body = json.loads(body)
-        self.validate_response(schema.set_get_server_metadata_item,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def set_server_metadata_item(self, server_id, key, meta):
-        post_body = json.dumps({'meta': meta})
-        resp, body = self.put('servers/%s/metadata/%s' % (server_id, key),
-                              post_body)
-        body = json.loads(body)
-        self.validate_response(schema.set_get_server_metadata_item,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_server_metadata_item(self, server_id, key):
-        resp, body = self.delete("servers/%s/metadata/%s" %
-                                 (server_id, key))
-        self.validate_response(schema.delete_server_metadata_item,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def stop_server(self, server_id, **kwargs):
-        return self.action(server_id, 'os-stop', **kwargs)
-
-    def start_server(self, server_id, **kwargs):
-        return self.action(server_id, 'os-start', **kwargs)
-
-    def attach_volume(self, server_id, **kwargs):
-        """Attaches a volume to a server instance."""
-        post_body = json.dumps({'volumeAttachment': kwargs})
-        resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
-                               post_body)
-        body = json.loads(body)
-        self.validate_response(schema.attach_volume, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def detach_volume(self, server_id, volume_id):
-        """Detaches a volume from a server instance."""
-        resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
-                                 (server_id, volume_id))
-        self.validate_response(schema.detach_volume, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_volume_attachment(self, server_id, attach_id):
-        """Return details about the given volume attachment."""
-        resp, body = self.get('servers/%s/os-volume_attachments/%s' % (
-            server_id, attach_id))
-        body = json.loads(body)
-        self.validate_response(schema.get_volume_attachment, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_volume_attachments(self, server_id):
-        """Returns the list of volume attachments for a given instance."""
-        resp, body = self.get('servers/%s/os-volume_attachments' % (
-            server_id))
-        body = json.loads(body)
-        self.validate_response(schema.list_volume_attachments, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def add_security_group(self, server_id, name):
-        """Adds a security group to the server."""
-        return self.action(server_id, 'addSecurityGroup', name=name)
-
-    def remove_security_group(self, server_id, name):
-        """Removes a security group from the server."""
-        return self.action(server_id, 'removeSecurityGroup', name=name)
-
-    def live_migrate_server(self, server_id, **kwargs):
-        """This should be called with administrator privileges ."""
-
-        req_body = json.dumps({'os-migrateLive': kwargs})
-
-        resp, body = self.post("servers/%s/action" % server_id, req_body)
-        self.validate_response(schema.server_actions_common_schema,
-                               resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def migrate_server(self, server_id, **kwargs):
-        """Migrates a server to a new host."""
-        return self.action(server_id, 'migrate', **kwargs)
-
-    def lock_server(self, server_id, **kwargs):
-        """Locks the given server."""
-        return self.action(server_id, 'lock', **kwargs)
-
-    def unlock_server(self, server_id, **kwargs):
-        """UNlocks the given server."""
-        return self.action(server_id, 'unlock', **kwargs)
-
-    def suspend_server(self, server_id, **kwargs):
-        """Suspends the provided server."""
-        return self.action(server_id, 'suspend', **kwargs)
-
-    def resume_server(self, server_id, **kwargs):
-        """Un-suspends the provided server."""
-        return self.action(server_id, 'resume', **kwargs)
-
-    def pause_server(self, server_id, **kwargs):
-        """Pauses the provided server."""
-        return self.action(server_id, 'pause', **kwargs)
-
-    def unpause_server(self, server_id, **kwargs):
-        """Un-pauses the provided server."""
-        return self.action(server_id, 'unpause', **kwargs)
-
-    def reset_state(self, server_id, state='error'):
-        """Resets the state of a server to active/error."""
-        return self.action(server_id, 'os-resetState', state=state)
-
-    def shelve_server(self, server_id, **kwargs):
-        """Shelves the provided server."""
-        return self.action(server_id, 'shelve', **kwargs)
-
-    def unshelve_server(self, server_id, **kwargs):
-        """Un-shelves the provided server."""
-        return self.action(server_id, 'unshelve', **kwargs)
-
-    def shelve_offload_server(self, server_id, **kwargs):
-        """Shelve-offload the provided server."""
-        return self.action(server_id, 'shelveOffload', **kwargs)
-
-    def get_console_output(self, server_id, length):
-        kwargs = {'length': length} if length else {}
-        return self.action(server_id, 'os-getConsoleOutput',
-                           schema.get_console_output,
-                           **kwargs)
-
-    def list_virtual_interfaces(self, server_id):
-        """
-        List the virtual interfaces used in an instance.
-        """
-        resp, body = self.get('/'.join(['servers', server_id,
-                              'os-virtual-interfaces']))
-        body = json.loads(body)
-        self.validate_response(schema.list_virtual_interfaces, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def rescue_server(self, server_id, **kwargs):
-        """Rescue the provided server."""
-        return self.action(server_id, 'rescue',
-                           schema.rescue_server,
-                           **kwargs)
-
-    def unrescue_server(self, server_id):
-        """Unrescue the provided server."""
-        return self.action(server_id, 'unrescue')
-
-    def get_server_diagnostics(self, server_id):
-        """Get the usage data for a server."""
-        resp, body = self.get("servers/%s/diagnostics" % server_id)
-        return service_client.ResponseBody(resp, json.loads(body))
-
-    def list_instance_actions(self, server_id):
-        """List the provided server action."""
-        resp, body = self.get("servers/%s/os-instance-actions" %
-                              server_id)
-        body = json.loads(body)
-        self.validate_response(schema.list_instance_actions, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_instance_action(self, server_id, request_id):
-        """Returns the action details of the provided server."""
-        resp, body = self.get("servers/%s/os-instance-actions/%s" %
-                              (server_id, request_id))
-        body = json.loads(body)
-        self.validate_response(schema.get_instance_action, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def force_delete_server(self, server_id, **kwargs):
-        """Force delete a server."""
-        return self.action(server_id, 'forceDelete', **kwargs)
-
-    def restore_soft_deleted_server(self, server_id, **kwargs):
-        """Restore a soft-deleted server."""
-        return self.action(server_id, 'restore', **kwargs)
-
-    def reset_network(self, server_id, **kwargs):
-        """Resets the Network of a server"""
-        return self.action(server_id, 'resetNetwork', **kwargs)
-
-    def inject_network_info(self, server_id, **kwargs):
-        """Inject the Network Info into server"""
-        return self.action(server_id, 'injectNetworkInfo', **kwargs)
-
-    def get_vnc_console(self, server_id, console_type):
-        """Get URL of VNC console."""
-        return self.action(server_id, "os-getVNCConsole",
-                           schema.get_vnc_console,
-                           type=console_type)
-
-    def add_fixed_ip(self, server_id, **kwargs):
-        """Add a fixed IP to input server instance."""
-        return self.action(server_id, 'addFixedIp', **kwargs)
-
-    def remove_fixed_ip(self, server_id, **kwargs):
-        """Remove input fixed IP from input server instance."""
-        return self.action(server_id, 'removeFixedIp', **kwargs)
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
deleted file mode 100644
index 6e2f320..0000000
--- a/tempest/services/compute/json/services_client.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2013 NEC Corporation
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.api_schema.response.compute.v2_1 import services as schema
-from tempest.common import service_client
-
-
-class ServicesClient(service_client.ServiceClient):
-
-    def list_services(self, **params):
-        url = 'os-services'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_services, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def enable_service(self, **kwargs):
-        """
-        Enable service on a host
-        host_name: Name of host
-        binary: Service binary
-        """
-        post_body = json.dumps(kwargs)
-        resp, body = self.put('os-services/enable', post_body)
-        body = json.loads(body)
-        self.validate_response(schema.enable_disable_service, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def disable_service(self, **kwargs):
-        """
-        Disable service on a host
-        host_name: Name of host
-        binary: Service binary
-        """
-        post_body = json.dumps(kwargs)
-        resp, body = self.put('os-services/disable', post_body)
-        body = json.loads(body)
-        self.validate_response(schema.enable_disable_service, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/snapshots_client.py b/tempest/services/compute/json/snapshots_client.py
deleted file mode 100644
index e3f92db..0000000
--- a/tempest/services/compute/json/snapshots_client.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2015 Fujitsu(fnst) Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-from tempest_lib import exceptions as lib_exc
-
-from tempest.api_schema.response.compute.v2_1 import snapshots as schema
-from tempest.common import service_client
-
-
-class SnapshotsClient(service_client.ServiceClient):
-
-    def create_snapshot(self, volume_id, **kwargs):
-        post_body = {
-            'volume_id': volume_id
-        }
-        post_body.update(kwargs)
-        post_body = json.dumps({'snapshot': post_body})
-        resp, body = self.post('os-snapshots', post_body)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_snapshot, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_snapshot(self, snapshot_id):
-        url = "os-snapshots/%s" % snapshot_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_snapshot, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_snapshots(self, detail=False, params=None):
-        url = 'os-snapshots'
-
-        if detail:
-            url += '/detail'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_snapshots, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_snapshot(self, snapshot_id):
-        resp, body = self.delete("os-snapshots/%s" % snapshot_id)
-        self.validate_response(schema.delete_snapshot, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_snapshot(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'snapshot'
diff --git a/tempest/services/compute/json/tenant_networks_client.py b/tempest/services/compute/json/tenant_networks_client.py
deleted file mode 100644
index 33166c0..0000000
--- a/tempest/services/compute/json/tenant_networks_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2015 NEC Corporation. All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import tenant_networks as schema
-from tempest.common import service_client
-
-
-class TenantNetworksClient(service_client.ServiceClient):
-
-    def list_tenant_networks(self):
-        resp, body = self.get("os-tenant-networks")
-        body = json.loads(body)
-        self.validate_response(schema.list_tenant_networks, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_tenant_network(self, network_id):
-        resp, body = self.get("os-tenant-networks/%s" % network_id)
-        body = json.loads(body)
-        self.validate_response(schema.get_tenant_network, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/tenant_usages_client.py b/tempest/services/compute/json/tenant_usages_client.py
deleted file mode 100644
index 73b4706..0000000
--- a/tempest/services/compute/json/tenant_usages_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2013 NEC Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.api_schema.response.compute.v2_1 import tenant_usages as schema
-from tempest.common import service_client
-
-
-class TenantUsagesClient(service_client.ServiceClient):
-
-    def list_tenant_usages(self, **params):
-        url = 'os-simple-tenant-usage'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_tenant_usage, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_tenant_usage(self, tenant_id, **params):
-        url = 'os-simple-tenant-usage/%s' % tenant_id
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.get_tenant_usage, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/versions_client.py b/tempest/services/compute/json/versions_client.py
deleted file mode 100644
index 48c0e8d..0000000
--- a/tempest/services/compute/json/versions_client.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves import urllib
-
-from tempest.api_schema.response.compute.v2_1 import versions as schema
-from tempest.common import service_client
-
-
-class VersionsClient(service_client.ServiceClient):
-
-    def _get_base_version_url(self):
-        # NOTE: The URL which is gotten from keystone's catalog contains
-        # API version and project-id like "v2/{project-id}", but we need
-        # to access the URL which doesn't contain them for getting API
-        # versions. For that, here should use raw_request() instead of
-        # get().
-        endpoint = self.base_url
-        url = urllib.parse.urlparse(endpoint)
-        return '%s://%s/' % (url.scheme, url.netloc)
-
-    def list_versions(self):
-        version_url = self._get_base_version_url()
-        resp, body = self.raw_request(version_url, 'GET')
-        body = json.loads(body)
-        self.validate_response(schema.list_versions, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_version_by_url(self, version_url):
-        """Get the version document by url.
-
-        This gets the version document for a url, useful in testing
-        the contents of things like /v2/ or /v2.1/ in Nova. That
-        controller needs authenticated access, so we have to get
-        ourselves a token before making the request.
-
-        """
-        # we need a token for this request
-        resp, body = self.raw_request(version_url, 'GET',
-                                      {'X-Auth-Token': self.token})
-        body = json.loads(body)
-        self.validate_response(schema.get_one_version, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/volumes_client.py b/tempest/services/compute/json/volumes_client.py
deleted file mode 100644
index e799c29..0000000
--- a/tempest/services/compute/json/volumes_client.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-from tempest_lib import exceptions as lib_exc
-
-from tempest.api_schema.response.compute.v2_1 import volumes as schema
-from tempest.common import service_client
-
-
-class VolumesClient(service_client.ServiceClient):
-
-    def list_volumes(self, detail=False, **params):
-        """List all the volumes created."""
-        url = 'os-volumes'
-
-        if detail:
-            url += '/detail'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_volumes, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_volume(self, volume_id):
-        """Returns the details of a single volume."""
-        url = "os-volumes/%s" % volume_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_volume, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_volume(self, **kwargs):
-        """
-        Creates a new Volume.
-        size(Required): Size of volume in GB.
-        Following optional keyword arguments are accepted:
-        display_name: Optional Volume Name.
-        metadata: A dictionary of values to be used as metadata.
-        """
-        post_body = json.dumps({'volume': kwargs})
-        resp, body = self.post('os-volumes', post_body)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_volume, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_volume(self, volume_id):
-        """Deletes the Specified Volume."""
-        resp, body = self.delete("os-volumes/%s" % volume_id)
-        self.validate_response(schema.delete_volume, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_volume(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'volume'
diff --git a/tempest/services/data_processing/v1_1/data_processing_client.py b/tempest/services/data_processing/v1_1/data_processing_client.py
index cba4c42..5aa2622 100644
--- a/tempest/services/data_processing/v1_1/data_processing_client.py
+++ b/tempest/services/data_processing/v1_1/data_processing_client.py
@@ -20,8 +20,7 @@
 class DataProcessingClient(service_client.ServiceClient):
 
     def _request_and_check_resp(self, request_func, uri, resp_status):
-        """Make a request using specified request_func and check response
-        status code.
+        """Make a request and check response status code.
 
         It returns a ResponseBody.
         """
@@ -30,8 +29,7 @@
         return service_client.ResponseBody(resp, body)
 
     def _request_and_check_resp_data(self, request_func, uri, resp_status):
-        """Make a request using specified request_func and check response
-        status code.
+        """Make a request and check response status code.
 
         It returns pair: resp and response data.
         """
@@ -41,8 +39,7 @@
 
     def _request_check_and_parse_resp(self, request_func, uri,
                                       resp_status, *args, **kwargs):
-        """Make a request using specified request_func, check response status
-        code and parse response body.
+        """Make a request, check response status code and parse response body.
 
         It returns a ResponseBody.
         """
diff --git a/tempest/services/database/json/flavors_client.py b/tempest/services/database/json/flavors_client.py
index 88feb17..dbb5172 100644
--- a/tempest/services/database/json/flavors_client.py
+++ b/tempest/services/database/json/flavors_client.py
@@ -14,7 +14,7 @@
 #    under the License.
 
 from oslo_serialization import jsonutils as json
-import urllib
+from six.moves import urllib
 
 from tempest.common import service_client
 
@@ -24,15 +24,15 @@
     def list_db_flavors(self, params=None):
         url = 'flavors'
         if params:
-            url += '?%s' % urllib.urlencode(params)
+            url += '?%s' % urllib.parse.urlencode(params)
 
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_db_flavor_details(self, db_flavor_id):
-        resp, body = self.get("flavors/%s" % str(db_flavor_id))
+    def show_db_flavor(self, db_flavor_id):
+        resp, body = self.get("flavors/%s" % db_flavor_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/endpoints_client.py b/tempest/services/identity/v2/json/endpoints_client.py
new file mode 100644
index 0000000..ff9907d
--- /dev/null
+++ b/tempest/services/identity/v2/json/endpoints_client.py
@@ -0,0 +1,50 @@
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class EndpointsClient(service_client.ServiceClient):
+    api_version = "v2.0"
+
+    def create_endpoint(self, service_id, region_id, **kwargs):
+        """Create an endpoint for service."""
+        post_body = {
+            'service_id': service_id,
+            'region': region_id,
+            'publicurl': kwargs.get('publicurl'),
+            'adminurl': kwargs.get('adminurl'),
+            'internalurl': kwargs.get('internalurl')
+        }
+        post_body = json.dumps({'endpoint': post_body})
+        resp, body = self.post('/endpoints', post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def list_endpoints(self):
+        """List Endpoints - Returns Endpoints."""
+        resp, body = self.get('/endpoints')
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_endpoint(self, endpoint_id):
+        """Delete an endpoint."""
+        url = '/endpoints/%s' % endpoint_id
+        resp, body = self.delete(url)
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/identity_client.py b/tempest/services/identity/v2/json/identity_client.py
index f37bc08..f045bb7 100644
--- a/tempest/services/identity/v2/json/identity_client.py
+++ b/tempest/services/identity/v2/json/identity_client.py
@@ -11,7 +11,6 @@
 #    under the License.
 
 from oslo_serialization import jsonutils as json
-from tempest_lib import exceptions as lib_exc
 
 from tempest.common import service_client
 
@@ -19,7 +18,7 @@
 class IdentityClient(service_client.ServiceClient):
     api_version = "v2.0"
 
-    def get_api_description(self):
+    def show_api_description(self):
         """Retrieves info about the v2.0 Identity API"""
         url = ''
         resp, body = self.get(url)
@@ -27,195 +26,7 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def has_admin_extensions(self):
-        """
-        Returns True if the KSADM Admin Extensions are supported
-        False otherwise
-        """
-        if hasattr(self, '_has_admin_extensions'):
-            return self._has_admin_extensions
-        # Try something that requires admin
-        try:
-            self.list_roles()
-            self._has_admin_extensions = True
-        except Exception:
-            self._has_admin_extensions = False
-        return self._has_admin_extensions
-
-    def create_role(self, name):
-        """Create a role."""
-        post_body = {
-            'name': name,
-        }
-        post_body = json.dumps({'role': post_body})
-        resp, body = self.post('OS-KSADM/roles', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_role(self, role_id):
-        """Get a role by its id."""
-        resp, body = self.get('OS-KSADM/roles/%s' % role_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_tenant(self, name, **kwargs):
-        """
-        Create a tenant
-        name (required): New tenant name
-        description: Description of new tenant (default is none)
-        enabled <true|false>: Initial tenant status (default is true)
-        """
-        post_body = {
-            'name': name,
-            'description': kwargs.get('description', ''),
-            'enabled': kwargs.get('enabled', True),
-        }
-        post_body = json.dumps({'tenant': post_body})
-        resp, body = self.post('tenants', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_role(self, role_id):
-        """Delete a role."""
-        resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
-        self.expected_success(204, resp.status)
-        return resp, body
-
-    def list_user_roles(self, tenant_id, user_id):
-        """Returns a list of roles assigned to a user for a tenant."""
-        url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def assign_user_role(self, tenant_id, user_id, role_id):
-        """Add roles to a user on a tenant."""
-        resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
-                              (tenant_id, user_id, role_id), "")
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def remove_user_role(self, tenant_id, user_id, role_id):
-        """Removes a role assignment for a user on a tenant."""
-        resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
-                                 (tenant_id, user_id, role_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_tenant(self, tenant_id):
-        """Delete a tenant."""
-        resp, body = self.delete('tenants/%s' % str(tenant_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def get_tenant(self, tenant_id):
-        """Get tenant details."""
-        resp, body = self.get('tenants/%s' % str(tenant_id))
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_roles(self):
-        """Returns roles."""
-        resp, body = self.get('OS-KSADM/roles')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_tenants(self):
-        """Returns tenants."""
-        resp, body = self.get('tenants')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_tenant_by_name(self, tenant_name):
-        tenants = self.list_tenants()['tenants']
-        for tenant in tenants:
-            if tenant['name'] == tenant_name:
-                return tenant
-        raise lib_exc.NotFound('No such tenant')
-
-    def update_tenant(self, tenant_id, **kwargs):
-        """Updates a tenant."""
-        body = self.get_tenant(tenant_id)['tenant']
-        name = kwargs.get('name', body['name'])
-        desc = kwargs.get('description', body['description'])
-        en = kwargs.get('enabled', body['enabled'])
-        post_body = {
-            'id': tenant_id,
-            'name': name,
-            'description': desc,
-            'enabled': en,
-        }
-        post_body = json.dumps({'tenant': post_body})
-        resp, body = self.post('tenants/%s' % tenant_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_user(self, name, password, tenant_id, email, **kwargs):
-        """Create a user."""
-        post_body = {
-            'name': name,
-            'password': password,
-            'email': email
-        }
-        if tenant_id is not None:
-            post_body['tenantId'] = tenant_id
-        if kwargs.get('enabled') is not None:
-            post_body['enabled'] = kwargs.get('enabled')
-        post_body = json.dumps({'user': post_body})
-        resp, body = self.post('users', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_user(self, user_id, **kwargs):
-        """Updates a user."""
-        put_body = json.dumps({'user': kwargs})
-        resp, body = self.put('users/%s' % user_id, put_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_user(self, user_id):
-        """GET a user."""
-        resp, body = self.get("users/%s" % user_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_user(self, user_id):
-        """Delete a user."""
-        resp, body = self.delete("users/%s" % user_id)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def get_users(self):
-        """Get the list of users."""
-        resp, body = self.get("users")
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def enable_disable_user(self, user_id, enabled):
-        """Enables or disables a user."""
-        put_body = {
-            'enabled': enabled
-        }
-        put_body = json.dumps({'user': put_body})
-        resp, body = self.put('users/%s/enabled' % user_id, put_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_token(self, token_id):
+    def show_token(self, token_id):
         """Get token details."""
         resp, body = self.get("tokens/%s" % token_id)
         self.expected_success(200, resp.status)
@@ -228,138 +39,9 @@
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def list_users_for_tenant(self, tenant_id):
-        """List users for a Tenant."""
-        resp, body = self.get('/tenants/%s/users' % tenant_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_user_by_username(self, tenant_id, username):
-        users = self.list_users_for_tenant(tenant_id)['users']
-        for user in users:
-            if user['name'] == username:
-                return user
-        raise lib_exc.NotFound('No such user')
-
-    def create_service(self, name, type, **kwargs):
-        """Create a service."""
-        post_body = {
-            'name': name,
-            'type': type,
-            'description': kwargs.get('description')
-        }
-        post_body = json.dumps({'OS-KSADM:service': post_body})
-        resp, body = self.post('/OS-KSADM/services', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_service(self, service_id):
-        """Get Service."""
-        url = '/OS-KSADM/services/%s' % service_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_services(self):
-        """List Service - Returns Services."""
-        resp, body = self.get('/OS-KSADM/services')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_service(self, service_id):
-        """Delete Service."""
-        url = '/OS-KSADM/services/%s' % service_id
-        resp, body = self.delete(url)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def create_endpoint(self, service_id, region_id, **kwargs):
-        """Create an endpoint for service."""
-        post_body = {
-            'service_id': service_id,
-            'region': region_id,
-            'publicurl': kwargs.get('publicurl'),
-            'adminurl': kwargs.get('adminurl'),
-            'internalurl': kwargs.get('internalurl')
-        }
-        post_body = json.dumps({'endpoint': post_body})
-        resp, body = self.post('/endpoints', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_endpoints(self):
-        """List Endpoints - Returns Endpoints."""
-        resp, body = self.get('/endpoints')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_endpoint(self, endpoint_id):
-        """Delete an endpoint."""
-        url = '/endpoints/%s' % endpoint_id
-        resp, body = self.delete(url)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def update_user_password(self, user_id, new_pass):
-        """Update User Password."""
-        put_body = {
-            'password': new_pass,
-            'id': user_id
-        }
-        put_body = json.dumps({'user': put_body})
-        resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_user_own_password(self, user_id, new_pass, old_pass):
-        """User updates own password"""
-        patch_body = {
-            "password": new_pass,
-            "original_password": old_pass
-        }
-        patch_body = json.dumps({'user': patch_body})
-        resp, body = self.patch('OS-KSCRUD/users/%s' % user_id, patch_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
     def list_extensions(self):
         """List all the extensions."""
         resp, body = self.get('/extensions')
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
-
-    def create_user_ec2_credentials(self, user_id, tenant_id):
-        post_body = json.dumps({'tenant_id': tenant_id})
-        resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
-                               post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_user_ec2_credentials(self, user_id, access):
-        resp, body = self.delete('/users/%s/credentials/OS-EC2/%s' %
-                                 (user_id, access))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_user_ec2_credentials(self, user_id):
-        resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_user_ec2_credentials(self, user_id, access):
-        resp, body = self.get('/users/%s/credentials/OS-EC2/%s' %
-                              (user_id, access))
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/roles_client.py b/tempest/services/identity/v2/json/roles_client.py
new file mode 100644
index 0000000..ef6dfe9
--- /dev/null
+++ b/tempest/services/identity/v2/json/roles_client.py
@@ -0,0 +1,74 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class RolesClient(service_client.ServiceClient):
+    api_version = "v2.0"
+
+    def create_role(self, **kwargs):
+        """Create a role.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v2-ext.html#createRole
+        """
+        post_body = json.dumps({'role': kwargs})
+        resp, body = self.post('OS-KSADM/roles', post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def show_role(self, role_id):
+        """Get a role by its id."""
+        resp, body = self.get('OS-KSADM/roles/%s' % role_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_role(self, role_id):
+        """Delete a role."""
+        resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
+        self.expected_success(204, resp.status)
+        return resp, body
+
+    def list_user_roles(self, tenant_id, user_id):
+        """Returns a list of roles assigned to a user for a tenant."""
+        url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def assign_user_role(self, tenant_id, user_id, role_id):
+        """Add roles to a user on a tenant."""
+        resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
+                              (tenant_id, user_id, role_id), "")
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_user_role(self, tenant_id, user_id, role_id):
+        """Removes a role assignment for a user on a tenant."""
+        resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
+                                 (tenant_id, user_id, role_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
+    def list_roles(self):
+        """Returns roles."""
+        resp, body = self.get('OS-KSADM/roles')
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/services_client.py b/tempest/services/identity/v2/json/services_client.py
new file mode 100644
index 0000000..436d00d
--- /dev/null
+++ b/tempest/services/identity/v2/json/services_client.py
@@ -0,0 +1,56 @@
+# Copyright 2015 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class ServicesClient(service_client.ServiceClient):
+    api_version = "v2.0"
+
+    def create_service(self, name, type, **kwargs):
+        """Create a service."""
+        post_body = {
+            'name': name,
+            'type': type,
+            'description': kwargs.get('description')
+        }
+        post_body = json.dumps({'OS-KSADM:service': post_body})
+        resp, body = self.post('/OS-KSADM/services', post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def show_service(self, service_id):
+        """Get Service."""
+        url = '/OS-KSADM/services/%s' % service_id
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def list_services(self):
+        """List Service - Returns Services."""
+        resp, body = self.get('/OS-KSADM/services')
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_service(self, service_id):
+        """Delete Service."""
+        url = '/OS-KSADM/services/%s' % service_id
+        resp, body = self.delete(url)
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/tenants_client.py b/tempest/services/identity/v2/json/tenants_client.py
new file mode 100644
index 0000000..937ae6f
--- /dev/null
+++ b/tempest/services/identity/v2/json/tenants_client.py
@@ -0,0 +1,84 @@
+# Copyright 2015 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class TenantsClient(service_client.ServiceClient):
+    api_version = "v2.0"
+
+    def create_tenant(self, name, **kwargs):
+        """Create a tenant
+
+        name (required): New tenant name
+        description: Description of new tenant (default is none)
+        enabled <true|false>: Initial tenant status (default is true)
+        """
+        post_body = {
+            'name': name,
+            'description': kwargs.get('description', ''),
+            'enabled': kwargs.get('enabled', True),
+        }
+        post_body = json.dumps({'tenant': post_body})
+        resp, body = self.post('tenants', post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_tenant(self, tenant_id):
+        """Delete a tenant."""
+        resp, body = self.delete('tenants/%s' % str(tenant_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
+    def show_tenant(self, tenant_id):
+        """Get tenant details."""
+        resp, body = self.get('tenants/%s' % str(tenant_id))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def list_tenants(self):
+        """Returns tenants."""
+        resp, body = self.get('tenants')
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def update_tenant(self, tenant_id, **kwargs):
+        """Updates a tenant."""
+        body = self.show_tenant(tenant_id)['tenant']
+        name = kwargs.get('name', body['name'])
+        desc = kwargs.get('description', body['description'])
+        en = kwargs.get('enabled', body['enabled'])
+        post_body = {
+            'id': tenant_id,
+            'name': name,
+            'description': desc,
+            'enabled': en,
+        }
+        post_body = json.dumps({'tenant': post_body})
+        resp, body = self.post('tenants/%s' % tenant_id, post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def list_tenant_users(self, tenant_id):
+        """List users for a Tenant."""
+        resp, body = self.get('/tenants/%s/users' % tenant_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/users_client.py b/tempest/services/identity/v2/json/users_client.py
new file mode 100644
index 0000000..5327638
--- /dev/null
+++ b/tempest/services/identity/v2/json/users_client.py
@@ -0,0 +1,137 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class UsersClient(service_client.ServiceClient):
+    api_version = "v2.0"
+
+    def create_user(self, name, password, tenant_id, email, **kwargs):
+        """Create a user."""
+        post_body = {
+            'name': name,
+            'password': password,
+            'email': email
+        }
+        if tenant_id is not None:
+            post_body['tenantId'] = tenant_id
+        if kwargs.get('enabled') is not None:
+            post_body['enabled'] = kwargs.get('enabled')
+        post_body = json.dumps({'user': post_body})
+        resp, body = self.post('users', post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def update_user(self, user_id, **kwargs):
+        """Updates a user."""
+        put_body = json.dumps({'user': kwargs})
+        resp, body = self.put('users/%s' % user_id, put_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def show_user(self, user_id):
+        """GET a user."""
+        resp, body = self.get("users/%s" % user_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_user(self, user_id):
+        """Delete a user."""
+        resp, body = self.delete("users/%s" % user_id)
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
+    def list_users(self):
+        """Get the list of users."""
+        resp, body = self.get("users")
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def enable_disable_user(self, user_id, **kwargs):
+        """Enables or disables a user.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v2-ext.html#enableUser
+        """
+        # NOTE: The URL (users/<id>/enabled) is different from the api-site
+        # one (users/<id>/OS-KSADM/enabled) , but they are the same API
+        # because of the fact that in keystone/contrib/admin_crud/core.py
+        # both api use same action='set_user_enabled'
+        put_body = json.dumps({'user': kwargs})
+        resp, body = self.put('users/%s/enabled' % user_id, put_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def update_user_password(self, user_id, **kwargs):
+        """Update User Password."""
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1524147
+        put_body = json.dumps({'user': kwargs})
+        resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def update_user_own_password(self, user_id, **kwargs):
+        """User updates own password"""
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1524153
+        # NOTE: This API is used for updating user password by itself.
+        # Ref: http://lists.openstack.org/pipermail/openstack-dev/2015-December
+        #      /081803.html
+        patch_body = json.dumps({'user': kwargs})
+        resp, body = self.patch('OS-KSCRUD/users/%s' % user_id, patch_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def create_user_ec2_credentials(self, user_id, **kwargs):
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        post_body = json.dumps(kwargs)
+        resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
+                               post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_user_ec2_credentials(self, user_id, access):
+        resp, body = self.delete('/users/%s/credentials/OS-EC2/%s' %
+                                 (user_id, access))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
+    def list_user_ec2_credentials(self, user_id):
+        resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def show_user_ec2_credentials(self, user_id, access):
+        resp, body = self.get('/users/%s/credentials/OS-EC2/%s' %
+                              (user_id, access))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v3/json/credentials_client.py b/tempest/services/identity/v3/json/credentials_client.py
index decf3a8..753e960 100644
--- a/tempest/services/identity/v3/json/credentials_client.py
+++ b/tempest/services/identity/v3/json/credentials_client.py
@@ -13,6 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+"""
+http://developer.openstack.org/api-ref-identity-v3.html#credentials-v3
+"""
+
 from oslo_serialization import jsonutils as json
 
 from tempest.common import service_client
@@ -21,17 +25,13 @@
 class CredentialsClient(service_client.ServiceClient):
     api_version = "v3"
 
-    def create_credential(self, access_key, secret_key, user_id, project_id):
-        """Creates a credential."""
-        blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
-            access_key, secret_key)
-        post_body = {
-            "blob": blob,
-            "project_id": project_id,
-            "type": "ec2",
-            "user_id": user_id
-        }
-        post_body = json.dumps({'credential': post_body})
+    def create_credential(self, **kwargs):
+        """Creates a credential.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#createCredential
+        """
+        post_body = json.dumps({'credential': kwargs})
         resp, body = self.post('credentials', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
@@ -39,29 +39,19 @@
         return service_client.ResponseBody(resp, body)
 
     def update_credential(self, credential_id, **kwargs):
-        """Updates a credential."""
-        body = self.get_credential(credential_id)['credential']
-        cred_type = kwargs.get('type', body['type'])
-        access_key = kwargs.get('access_key', body['blob']['access'])
-        secret_key = kwargs.get('secret_key', body['blob']['secret'])
-        project_id = kwargs.get('project_id', body['project_id'])
-        user_id = kwargs.get('user_id', body['user_id'])
-        blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
-            access_key, secret_key)
-        post_body = {
-            "blob": blob,
-            "project_id": project_id,
-            "type": cred_type,
-            "user_id": user_id
-        }
-        post_body = json.dumps({'credential': post_body})
+        """Updates a credential.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#updateCredential
+        """
+        post_body = json.dumps({'credential': kwargs})
         resp, body = self.patch('credentials/%s' % credential_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         body['credential']['blob'] = json.loads(body['credential']['blob'])
         return service_client.ResponseBody(resp, body)
 
-    def get_credential(self, credential_id):
+    def show_credential(self, credential_id):
         """To GET Details of a credential."""
         resp, body = self.get('credentials/%s' % credential_id)
         self.expected_success(200, resp.status)
diff --git a/tempest/services/identity/v3/json/endpoints_client.py b/tempest/services/identity/v3/json/endpoints_client.py
index 6bdf8b3..8ab7464 100644
--- a/tempest/services/identity/v3/json/endpoints_client.py
+++ b/tempest/services/identity/v3/json/endpoints_client.py
@@ -13,6 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+"""
+http://developer.openstack.org/api-ref-identity-v3.html#endpoints-v3
+"""
+
 from oslo_serialization import jsonutils as json
 
 from tempest.common import service_client
@@ -28,53 +32,25 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def create_endpoint(self, service_id, interface, url, **kwargs):
+    def create_endpoint(self, **kwargs):
         """Create endpoint.
 
-        Normally this function wouldn't allow setting values that are not
-        allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
-
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#createEndpoint
         """
-        region = kwargs.get('region', None)
-        if 'force_enabled' in kwargs:
-            enabled = kwargs.get('force_enabled', None)
-        else:
-            enabled = kwargs.get('enabled', None)
-        post_body = {
-            'service_id': service_id,
-            'interface': interface,
-            'url': url,
-            'region': region,
-            'enabled': enabled
-        }
-        post_body = json.dumps({'endpoint': post_body})
+        post_body = json.dumps({'endpoint': kwargs})
         resp, body = self.post('endpoints', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def update_endpoint(self, endpoint_id, service_id=None, interface=None,
-                        url=None, region=None, enabled=None, **kwargs):
+    def update_endpoint(self, endpoint_id, **kwargs):
         """Updates an endpoint with given parameters.
 
-        Normally this function wouldn't allow setting values that are not
-        allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
-
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#updateEndpoint
         """
-        post_body = {}
-        if service_id is not None:
-            post_body['service_id'] = service_id
-        if interface is not None:
-            post_body['interface'] = interface
-        if url is not None:
-            post_body['url'] = url
-        if region is not None:
-            post_body['region'] = region
-        if 'force_enabled' in kwargs:
-            post_body['enabled'] = kwargs['force_enabled']
-        elif enabled is not None:
-            post_body['enabled'] = enabled
-        post_body = json.dumps({'endpoint': post_body})
+        post_body = json.dumps({'endpoint': kwargs})
         resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
@@ -85,3 +61,10 @@
         resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
         self.expected_success(204, resp_header.status)
         return service_client.ResponseBody(resp_header, resp_body)
+
+    def show_endpoint(self, endpoint_id):
+        """Get endpoint."""
+        resp_header, resp_body = self.get('endpoints/%s' % endpoint_id)
+        self.expected_success(200, resp_header.status)
+        resp_body = json.loads(resp_body)
+        return service_client.ResponseBody(resp_header, resp_body)
diff --git a/tempest/services/identity/v3/json/groups_client.py b/tempest/services/identity/v3/json/groups_client.py
new file mode 100644
index 0000000..6ed85cf
--- /dev/null
+++ b/tempest/services/identity/v3/json/groups_client.py
@@ -0,0 +1,96 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+http://developer.openstack.org/api-ref-identity-v3.html#groups-v3
+"""
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class GroupsClient(service_client.ServiceClient):
+    api_version = "v3"
+
+    def create_group(self, **kwargs):
+        """Creates a group.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#createGroup
+        """
+        post_body = json.dumps({'group': kwargs})
+        resp, body = self.post('groups', post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def show_group(self, group_id):
+        """Get group details."""
+        resp, body = self.get('groups/%s' % group_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def list_groups(self):
+        """Lists the groups."""
+        resp, body = self.get('groups')
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def update_group(self, group_id, **kwargs):
+        """Updates a group.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#updateGroup
+        """
+        post_body = json.dumps({'group': kwargs})
+        resp, body = self.patch('groups/%s' % group_id, post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_group(self, group_id):
+        """Delete a group."""
+        resp, body = self.delete('groups/%s' % str(group_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
+    def add_group_user(self, group_id, user_id):
+        """Add user into group."""
+        resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),
+                              None)
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
+    def list_group_users(self, group_id):
+        """List users in group."""
+        resp, body = self.get('groups/%s/users' % group_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return service_client.ResponseBody(resp, body)
+
+    def delete_group_user(self, group_id, user_id):
+        """Delete user in group."""
+        resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
+    def check_group_user_existence(self, group_id, user_id):
+        """Check user in group."""
+        resp, body = self.head('groups/%s/users/%s' % (group_id, user_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 3f27624..15f0577 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -22,7 +22,7 @@
 class IdentityV3Client(service_client.ServiceClient):
     api_version = "v3"
 
-    def get_api_description(self):
+    def show_api_description(self):
         """Retrieves info about the v3 Identity API"""
         url = ''
         resp, body = self.get(url)
@@ -54,7 +54,7 @@
 
     def update_user(self, user_id, name, **kwargs):
         """Updates a user."""
-        body = self.get_user(user_id)['user']
+        body = self.show_user(user_id)['user']
         email = kwargs.get('email', body['email'])
         en = kwargs.get('enabled', body['enabled'])
         project_id = kwargs.get('project_id', body['project_id'])
@@ -81,13 +81,13 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def update_user_password(self, user_id, password, original_password):
-        """Updates a user password."""
-        update_user = {
-            'password': password,
-            'original_password': original_password
-        }
-        update_user = json.dumps({'user': update_user})
+    def update_user_password(self, user_id, **kwargs):
+        """Update a user password
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#changeUserPassword
+        """
+        update_user = json.dumps({'user': kwargs})
         resp, _ = self.post('users/%s/password' % user_id, update_user)
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp)
@@ -99,7 +99,7 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_users(self, params=None):
+    def list_users(self, params=None):
         """Get the list of users."""
         url = 'users'
         if params:
@@ -109,7 +109,7 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_user(self, user_id):
+    def show_user(self, user_id):
         """GET a user."""
         resp, body = self.get("users/%s" % user_id)
         self.expected_success(200, resp.status)
@@ -149,7 +149,7 @@
         return service_client.ResponseBody(resp, body)
 
     def update_project(self, project_id, **kwargs):
-        body = self.get_project(project_id)['project']
+        body = self.show_project(project_id)['project']
         name = kwargs.get('name', body['name'])
         desc = kwargs.get('description', body['description'])
         en = kwargs.get('enabled', body['enabled'])
@@ -167,7 +167,7 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_project(self, project_id):
+    def show_project(self, project_id):
         """GET a Project."""
         resp, body = self.get("projects/%s" % project_id)
         self.expected_success(200, resp.status)
@@ -180,18 +180,19 @@
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_role(self, name):
-        """Create a Role."""
-        post_body = {
-            'name': name
-        }
-        post_body = json.dumps({'role': post_body})
+    def create_role(self, **kwargs):
+        """Create a Role.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#createRole
+        """
+        post_body = json.dumps({'role': kwargs})
         resp, body = self.post('roles', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_role(self, role_id):
+    def show_role(self, role_id):
         """GET a Role."""
         resp, body = self.get('roles/%s' % str(role_id))
         self.expected_success(200, resp.status)
@@ -205,12 +206,13 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def update_role(self, name, role_id):
-        """Create a Role."""
-        post_body = {
-            'name': name
-        }
-        post_body = json.dumps({'role': post_body})
+    def update_role(self, role_id, **kwargs):
+        """Update a Role.
+
+        Available params: see http://developer.openstack.org/
+                          api-ref-identity-v3.html#updateRole
+        """
+        post_body = json.dumps({'role': kwargs})
         resp, body = self.patch('roles/%s' % str(role_id), post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
@@ -262,7 +264,7 @@
 
     def update_domain(self, domain_id, **kwargs):
         """Updates a domain."""
-        body = self.get_domain(domain_id)['domain']
+        body = self.show_domain(domain_id)['domain']
         description = kwargs.get('description', body['description'])
         en = kwargs.get('enabled', body['enabled'])
         name = kwargs.get('name', body['name'])
@@ -277,14 +279,14 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_domain(self, domain_id):
+    def show_domain(self, domain_id):
         """Get Domain details."""
         resp, body = self.get('domains/%s' % domain_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_token(self, resp_token):
+    def show_token(self, resp_token):
         """Get token details."""
         headers = {'X-Subject-Token': resp_token}
         resp, body = self.get("auth/tokens", headers=headers)
@@ -299,72 +301,6 @@
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_group(self, name, **kwargs):
-        """Creates a group."""
-        description = kwargs.get('description', None)
-        domain_id = kwargs.get('domain_id', 'default')
-        project_id = kwargs.get('project_id', None)
-        post_body = {
-            'description': description,
-            'domain_id': domain_id,
-            'project_id': project_id,
-            'name': name
-        }
-        post_body = json.dumps({'group': post_body})
-        resp, body = self.post('groups', post_body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def get_group(self, group_id):
-        """Get group details."""
-        resp, body = self.get('groups/%s' % group_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_groups(self):
-        """Lists the groups."""
-        resp, body = self.get('groups')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_group(self, group_id, **kwargs):
-        """Updates a group."""
-        body = self.get_group(group_id)['group']
-        name = kwargs.get('name', body['name'])
-        description = kwargs.get('description', body['description'])
-        post_body = {
-            'name': name,
-            'description': description
-        }
-        post_body = json.dumps({'group': post_body})
-        resp, body = self.patch('groups/%s' % group_id, post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_group(self, group_id):
-        """Delete a group."""
-        resp, body = self.delete('groups/%s' % str(group_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def add_group_user(self, group_id, user_id):
-        """Add user into group."""
-        resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),
-                              None)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-    def list_group_users(self, group_id):
-        """List users in group."""
-        resp, body = self.get('groups/%s/users' % group_id)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
-
     def list_user_groups(self, user_id):
         """Lists groups which a user belongs to."""
         resp, body = self.get('users/%s/groups' % user_id)
@@ -372,12 +308,6 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def delete_group_user(self, group_id, user_id):
-        """Delete user in group."""
-        resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
-
     def assign_user_role_on_project(self, project_id, user_id, role_id):
         """Add roles to a user on a project."""
         resp, body = self.put('projects/%s/users/%s/roles/%s' %
@@ -408,20 +338,36 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def revoke_role_from_user_on_project(self, project_id, user_id, role_id):
+    def delete_role_from_user_on_project(self, project_id, user_id, role_id):
         """Delete role of a user on a project."""
         resp, body = self.delete('projects/%s/users/%s/roles/%s' %
                                  (project_id, user_id, role_id))
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def revoke_role_from_user_on_domain(self, domain_id, user_id, role_id):
+    def delete_role_from_user_on_domain(self, domain_id, user_id, role_id):
         """Delete role of a user on a domain."""
         resp, body = self.delete('domains/%s/users/%s/roles/%s' %
                                  (domain_id, user_id, role_id))
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
+    def check_user_role_existence_on_project(self, project_id,
+                                             user_id, role_id):
+        """Check role of a user on a project."""
+        resp, body = self.head('projects/%s/users/%s/roles/%s' %
+                               (project_id, user_id, role_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp)
+
+    def check_user_role_existence_on_domain(self, domain_id,
+                                            user_id, role_id):
+        """Check role of a user on a domain."""
+        resp, body = self.head('domains/%s/users/%s/roles/%s' %
+                               (domain_id, user_id, role_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp)
+
     def assign_group_role_on_project(self, project_id, group_id, role_id):
         """Add roles to a user on a project."""
         resp, body = self.put('projects/%s/groups/%s/roles/%s' %
@@ -452,33 +398,43 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def revoke_role_from_group_on_project(self, project_id, group_id, role_id):
+    def delete_role_from_group_on_project(self, project_id, group_id, role_id):
         """Delete role of a user on a project."""
         resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
                                  (project_id, group_id, role_id))
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def revoke_role_from_group_on_domain(self, domain_id, group_id, role_id):
+    def delete_role_from_group_on_domain(self, domain_id, group_id, role_id):
         """Delete role of a user on a domain."""
         resp, body = self.delete('domains/%s/groups/%s/roles/%s' %
                                  (domain_id, group_id, role_id))
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_trust(self, trustor_user_id, trustee_user_id, project_id,
-                     role_names, impersonation, expires_at):
-        """Creates a trust."""
-        roles = [{'name': n} for n in role_names]
-        post_body = {
-            'trustor_user_id': trustor_user_id,
-            'trustee_user_id': trustee_user_id,
-            'project_id': project_id,
-            'impersonation': impersonation,
-            'roles': roles,
-            'expires_at': expires_at
-        }
-        post_body = json.dumps({'trust': post_body})
+    def check_role_from_group_on_project_existence(self, project_id,
+                                                   group_id, role_id):
+        """Check role of a user on a project."""
+        resp, body = self.head('projects/%s/groups/%s/roles/%s' %
+                               (project_id, group_id, role_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp)
+
+    def check_role_from_group_on_domain_existence(self, domain_id,
+                                                  group_id, role_id):
+        """Check role of a user on a domain."""
+        resp, body = self.head('domains/%s/groups/%s/roles/%s' %
+                               (domain_id, group_id, role_id))
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp)
+
+    def create_trust(self, **kwargs):
+        """Creates a trust.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3-ext.html#createTrust
+        """
+        post_body = json.dumps({'trust': kwargs})
         resp, body = self.post('OS-TRUST/trusts', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
@@ -490,7 +446,7 @@
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def get_trusts(self, trustor_user_id=None, trustee_user_id=None):
+    def list_trusts(self, trustor_user_id=None, trustee_user_id=None):
         """GET trusts."""
         if trustor_user_id:
             resp, body = self.get("OS-TRUST/trusts?trustor_user_id=%s"
@@ -504,21 +460,21 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_trust(self, trust_id):
+    def show_trust(self, trust_id):
         """GET trust."""
         resp, body = self.get("OS-TRUST/trusts/%s" % trust_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_trust_roles(self, trust_id):
+    def list_trust_roles(self, trust_id):
         """GET roles delegated by a trust."""
         resp, body = self.get("OS-TRUST/trusts/%s/roles" % trust_id)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_trust_role(self, trust_id, role_id):
+    def show_trust_role(self, trust_id, role_id):
         """GET role delegated by a trust."""
         resp, body = self.get("OS-TRUST/trusts/%s/roles/%s"
                               % (trust_id, role_id))
diff --git a/tempest/services/identity/v3/json/policy_client.py b/tempest/services/identity/v3/json/policies_client.py
similarity index 75%
rename from tempest/services/identity/v3/json/policy_client.py
rename to tempest/services/identity/v3/json/policies_client.py
index 3231bb0..639ed6d 100644
--- a/tempest/services/identity/v3/json/policy_client.py
+++ b/tempest/services/identity/v3/json/policies_client.py
@@ -13,21 +13,25 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+"""
+http://developer.openstack.org/api-ref-identity-v3.html#policies-v3
+"""
+
 from oslo_serialization import jsonutils as json
 
 from tempest.common import service_client
 
 
-class PolicyClient(service_client.ServiceClient):
+class PoliciesClient(service_client.ServiceClient):
     api_version = "v3"
 
-    def create_policy(self, blob, type):
-        """Creates a Policy."""
-        post_body = {
-            "blob": blob,
-            "type": type
-        }
-        post_body = json.dumps({'policy': post_body})
+    def create_policy(self, **kwargs):
+        """Creates a Policy.
+
+        Available params: see http://developer.openstack.org/
+                          api-ref-identity-v3.html#createPolicy
+        """
+        post_body = json.dumps({'policy': kwargs})
         resp, body = self.post('policies', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
@@ -40,7 +44,7 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_policy(self, policy_id):
+    def show_policy(self, policy_id):
         """Lists out the given policy."""
         url = 'policies/%s' % policy_id
         resp, body = self.get(url)
@@ -49,12 +53,12 @@
         return service_client.ResponseBody(resp, body)
 
     def update_policy(self, policy_id, **kwargs):
-        """Updates a policy."""
-        type = kwargs.get('type')
-        post_body = {
-            'type': type
-        }
-        post_body = json.dumps({'policy': post_body})
+        """Updates a policy.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#updatePolicy
+        """
+        post_body = json.dumps({'policy': kwargs})
         url = 'policies/%s' % policy_id
         resp, body = self.patch(url, post_body)
         self.expected_success(200, resp.status)
diff --git a/tempest/services/identity/v3/json/region_client.py b/tempest/services/identity/v3/json/regions_client.py
similarity index 66%
rename from tempest/services/identity/v3/json/region_client.py
rename to tempest/services/identity/v3/json/regions_client.py
index 24c6f33..bc4b7a1 100644
--- a/tempest/services/identity/v3/json/region_client.py
+++ b/tempest/services/identity/v3/json/regions_client.py
@@ -13,46 +13,53 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+"""
+http://developer.openstack.org/api-ref-identity-v3.html#regions-v3
+"""
+
 from oslo_serialization import jsonutils as json
 from six.moves.urllib import parse as urllib
 
 from tempest.common import service_client
 
 
-class RegionClient(service_client.ServiceClient):
+class RegionsClient(service_client.ServiceClient):
     api_version = "v3"
 
-    def create_region(self, description, **kwargs):
-        """Create region."""
-        req_body = {
-            'description': description,
-        }
-        if kwargs.get('parent_region_id'):
-            req_body['parent_region_id'] = kwargs.get('parent_region_id')
-        req_body = json.dumps({'region': req_body})
-        if kwargs.get('unique_region_id'):
-            resp, body = self.put(
-                'regions/%s' % kwargs.get('unique_region_id'), req_body)
+    def create_region(self, region_id=None, **kwargs):
+        """Create region.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#createRegion
+
+                          see http://developer.openstack.org/
+                              api-ref-identity-v3.html#createRegionWithID
+        """
+        if region_id is not None:
+            method = self.put
+            url = 'regions/%s' % region_id
         else:
-            resp, body = self.post('regions', req_body)
+            method = self.post
+            url = 'regions'
+        req_body = json.dumps({'region': kwargs})
+        resp, body = method(url, req_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
     def update_region(self, region_id, **kwargs):
-        """Updates a region."""
-        post_body = {}
-        if 'description' in kwargs:
-            post_body['description'] = kwargs.get('description')
-        if 'parent_region_id' in kwargs:
-            post_body['parent_region_id'] = kwargs.get('parent_region_id')
-        post_body = json.dumps({'region': post_body})
+        """Updates a region.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#updateRegion
+        """
+        post_body = json.dumps({'region': kwargs})
         resp, body = self.patch('regions/%s' % region_id, post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_region(self, region_id):
+    def show_region(self, region_id):
         """Get region."""
         url = 'regions/%s' % region_id
         resp, body = self.get(url)
diff --git a/tempest/services/identity/v3/json/service_client.py b/tempest/services/identity/v3/json/services_client.py
similarity index 69%
rename from tempest/services/identity/v3/json/service_client.py
rename to tempest/services/identity/v3/json/services_client.py
index 2acc3a8..dd65f1d 100644
--- a/tempest/services/identity/v3/json/service_client.py
+++ b/tempest/services/identity/v3/json/services_client.py
@@ -13,32 +13,31 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+"""
+http://developer.openstack.org/api-ref-identity-v3.html#service-catalog-v3
+"""
+
 from oslo_serialization import jsonutils as json
 
 from tempest.common import service_client
 
 
-class ServiceClient(service_client.ServiceClient):
+class ServicesClient(service_client.ServiceClient):
     api_version = "v3"
 
     def update_service(self, service_id, **kwargs):
-        """Updates a service."""
-        body = self.get_service(service_id)['service']
-        name = kwargs.get('name', body['name'])
-        type = kwargs.get('type', body['type'])
-        desc = kwargs.get('description', body['description'])
-        patch_body = {
-            'description': desc,
-            'type': type,
-            'name': name
-        }
-        patch_body = json.dumps({'service': patch_body})
+        """Updates a service.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#updateService
+        """
+        patch_body = json.dumps({'service': kwargs})
         resp, body = self.patch('services/%s' % service_id, patch_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def get_service(self, service_id):
+    def show_service(self, service_id):
         """Get Service."""
         url = 'services/%s' % service_id
         resp, body = self.get(url)
@@ -46,15 +45,13 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def create_service(self, serv_type, name=None, description=None,
-                       enabled=True):
-        body_dict = {
-            'name': name,
-            'type': serv_type,
-            'enabled': enabled,
-            'description': description,
-        }
-        body = json.dumps({'service': body_dict})
+    def create_service(self, **kwargs):
+        """Creates a service.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-identity-v3.html#createService
+        """
+        body = json.dumps({'service': kwargs})
         resp, body = self.post("services", body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/images_client.py
similarity index 85%
rename from tempest/services/image/v1/json/image_client.py
rename to tempest/services/image/v1/json/images_client.py
index d97da36..af2e68c 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/images_client.py
@@ -32,13 +32,13 @@
 LOG = logging.getLogger(__name__)
 
 
-class ImageClient(service_client.ServiceClient):
+class ImagesClient(service_client.ServiceClient):
 
     def __init__(self, auth_provider, catalog_type, region, endpoint_type=None,
                  build_interval=None, build_timeout=None,
                  disable_ssl_certificate_validation=None,
                  ca_certs=None, trace_requests=None):
-        super(ImageClient, self).__init__(
+        super(ImagesClient, self).__init__(
             auth_provider,
             catalog_type,
             region,
@@ -147,50 +147,29 @@
             self._http = self._get_http()
         return self._http
 
-    def create_image(self, name, container_format, disk_format, **kwargs):
-        params = {
-            "name": name,
-            "container_format": container_format,
-            "disk_format": disk_format,
-        }
-
+    def create_image(self, **kwargs):
         headers = {}
+        data = kwargs.pop('data', None)
+        headers.update(self._image_meta_to_headers(kwargs))
 
-        for option in ['is_public', 'location', 'properties',
-                       'copy_from', 'min_ram']:
-            if option in kwargs:
-                params[option] = kwargs.get(option)
-
-        headers.update(self._image_meta_to_headers(params))
-
-        if 'data' in kwargs:
-            return self._create_with_data(headers, kwargs.get('data'))
+        if data is not None:
+            return self._create_with_data(headers, data)
 
         resp, body = self.post('v1/images', None, headers)
         self.expected_success(201, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def update_image(self, image_id, name=None, container_format=None,
-                     data=None, properties=None):
-        params = {}
+    def update_image(self, image_id, **kwargs):
         headers = {}
-        if name is not None:
-            params['name'] = name
-
-        if container_format is not None:
-            params['container_format'] = container_format
-
-        if properties is not None:
-            params['properties'] = properties
-
-        headers.update(self._image_meta_to_headers(params))
+        data = kwargs.pop('data', None)
+        headers.update(self._image_meta_to_headers(kwargs))
 
         if data is not None:
             return self._update_with_data(image_id, headers, data)
 
         url = 'v1/images/%s' % image_id
-        resp, body = self.put(url, data, headers)
+        resp, body = self.put(url, None, headers)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
@@ -201,21 +180,27 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def list_images(self, detail=False, properties=dict(),
-                    changes_since=None, **kwargs):
+    def list_images(self, detail=False, **kwargs):
+        """Return a list of all images filtered by input parameters.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v1.html#listImage-v1
+
+        Most parameters except the following are passed to the API without
+        any changes.
+        :param changes_since: The name is changed to changes-since
+        """
         url = 'v1/images'
 
         if detail:
             url += '/detail'
 
-        params = {}
-        for key, value in properties.items():
-            params['property-%s' % key] = value
+        properties = kwargs.pop('properties', {})
+        for key, value in six.iteritems(properties):
+            kwargs['property-%s' % key] = value
 
-        kwargs.update(params)
-
-        if changes_since is not None:
-            kwargs['changes-since'] = changes_since
+        if kwargs.get('changes_since'):
+            kwargs['changes-since'] = kwargs.pop('changes_since')
 
         if len(kwargs) > 0:
             url += '?%s' % urllib.urlencode(kwargs)
@@ -265,11 +250,14 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def add_member(self, member_id, image_id, can_share=False):
+    def add_member(self, member_id, image_id, **kwargs):
+        """Add a member to an image.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v1.html#addMember-v1
+        """
         url = 'v1/images/%s/members/%s' % (image_id, member_id)
-        body = None
-        if can_share:
-            body = json.dumps({'member': {'can_share': True}})
+        body = json.dumps({'member': kwargs})
         resp, __ = self.put(url, body)
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp)
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/images_client.py
similarity index 77%
rename from tempest/services/image/v2/json/image_client.py
rename to tempest/services/image/v2/json/images_client.py
index eea179d..72b203a 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/images_client.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import jsonschema
 from oslo_serialization import jsonutils as json
 from six.moves.urllib import parse as urllib
 from tempest_lib import exceptions as lib_exc
@@ -22,13 +21,13 @@
 from tempest.common import service_client
 
 
-class ImageClientV2(service_client.ServiceClient):
+class ImagesClientV2(service_client.ServiceClient):
 
     def __init__(self, auth_provider, catalog_type, region, endpoint_type=None,
                  build_interval=None, build_timeout=None,
                  disable_ssl_certificate_validation=None, ca_certs=None,
                  trace_requests=None):
-        super(ImageClientV2, self).__init__(
+        super(ImagesClientV2, self).__init__(
             auth_provider,
             catalog_type,
             region,
@@ -49,14 +48,6 @@
                                       insecure=self.dscv,
                                       ca_certs=self.ca_certs)
 
-    def _validate_schema(self, body, type='image'):
-        if type in ['image', 'images']:
-            schema = self.show_schema(type)
-        else:
-            raise ValueError("%s is not a valid schema type" % type)
-
-        jsonschema.validate(body, schema)
-
     @property
     def http(self):
         if self._http is None:
@@ -64,9 +55,12 @@
         return self._http
 
     def update_image(self, image_id, patch):
-        data = json.dumps(patch)
-        self._validate_schema(data)
+        """Update an image.
 
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v2.html#updateImage-v2
+        """
+        data = json.dumps(patch)
         headers = {"Content-Type": "application/openstack-images-v2.0"
                                    "-json-patch"}
         resp, body = self.patch('v2/images/%s' % image_id, data, headers)
@@ -74,23 +68,13 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def create_image(self, name, container_format, disk_format, **kwargs):
-        params = {
-            "name": name,
-            "container_format": container_format,
-            "disk_format": disk_format,
-        }
+    def create_image(self, **kwargs):
+        """Create an image.
 
-        for option in kwargs:
-            value = kwargs.get(option)
-            if isinstance(value, dict) or isinstance(value, tuple):
-                params.update(value)
-            else:
-                params[option] = value
-
-        data = json.dumps(params)
-        self._validate_schema(data)
-
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v2.html#createImage-v2
+        """
+        data = json.dumps(kwargs)
         resp, body = self.post('v2/images', data)
         self.expected_success(201, resp.status)
         body = json.loads(body)
@@ -123,7 +107,6 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        self._validate_schema(body, type='images')
         return service_client.ResponseBody(resp, body)
 
     def show_image(self, image_id):
@@ -153,7 +136,7 @@
         self.expected_success(204, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def load_image_file(self, image_id):
+    def show_image_file(self, image_id):
         url = 'v2/images/%s/file' % image_id
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
@@ -178,17 +161,27 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def add_image_member(self, image_id, member_id):
+    def create_image_member(self, image_id, **kwargs):
+        """Create an image member.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v2.html#createImageMember-v2
+        """
         url = 'v2/images/%s/members' % image_id
-        data = json.dumps({'member': member_id})
+        data = json.dumps(kwargs)
         resp, body = self.post(url, data)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def update_image_member(self, image_id, member_id, body):
+    def update_image_member(self, image_id, member_id, **kwargs):
+        """Update an image member.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v2.html#updateImageMember-v2
+        """
         url = 'v2/images/%s/members/%s' % (image_id, member_id)
-        data = json.dumps(body)
+        data = json.dumps(kwargs)
         resp, body = self.put(url, data)
         self.expected_success(200, resp.status)
         body = json.loads(body)
@@ -200,7 +193,7 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, json.loads(body))
 
-    def remove_image_member(self, image_id, member_id):
+    def delete_image_member(self, image_id, member_id):
         url = 'v2/images/%s/members/%s' % (image_id, member_id)
         resp, _ = self.delete(url)
         self.expected_success(204, resp.status)
@@ -220,54 +213,43 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def create_namespaces(self, namespace, **kwargs):
-        params = {
-            "namespace": namespace,
-        }
+    def create_namespace(self, **kwargs):
+        """Create a namespace.
 
-        for option in kwargs:
-            value = kwargs.get(option)
-            if isinstance(value, dict) or isinstance(value, tuple):
-                params.update(value)
-            else:
-                params[option] = value
-
-        data = json.dumps(params)
-        self._validate_schema(data)
-
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v2.html#createNamespace-v2
+        """
+        data = json.dumps(kwargs)
         resp, body = self.post('/v2/metadefs/namespaces', data)
         self.expected_success(201, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def show_namespaces(self, namespace):
+    def show_namespace(self, namespace):
         url = '/v2/metadefs/namespaces/%s' % namespace
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def update_namespaces(self, namespace, visibility, **kwargs):
-        params = {
-            "namespace": namespace,
-            "visibility": visibility
-        }
-        for option in kwargs:
-            value = kwargs.get(option)
-            if isinstance(value, dict) or isinstance(value, tuple):
-                params.update(value)
-            else:
-                params[option] = value
+    def update_namespace(self, namespace, **kwargs):
+        """Update a namespace.
 
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v2.html#updateNamespace-v2
+        """
+        # NOTE: On Glance API, we need to pass namespace on both URI
+        # and a request body.
+        params = {'namespace': namespace}
+        params.update(kwargs)
         data = json.dumps(params)
-        self._validate_schema(data)
         url = '/v2/metadefs/namespaces/%s' % namespace
         resp, body = self.put(url, body=data)
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def delete_namespaces(self, namespace):
+    def delete_namespace(self, namespace):
         url = '/v2/metadefs/namespaces/%s' % namespace
         resp, _ = self.delete(url)
         self.expected_success(204, resp.status)
diff --git a/tempest/services/messaging/json/messaging_client.py b/tempest/services/messaging/json/messaging_client.py
index 2f233a9..5a43841 100644
--- a/tempest/services/messaging/json/messaging_client.py
+++ b/tempest/services/messaging/json/messaging_client.py
@@ -170,7 +170,7 @@
         self.expected_success(204, resp.status)
         return resp, body
 
-    def release_claim(self, claim_uri):
+    def delete_claim(self, claim_uri):
         resp, body = self.delete(claim_uri)
         self.expected_success(204, resp.status)
         return resp, body
diff --git a/tempest/services/network/json/agents_client.py b/tempest/services/network/json/agents_client.py
new file mode 100644
index 0000000..8bec847
--- /dev/null
+++ b/tempest/services/network/json/agents_client.py
@@ -0,0 +1,68 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.network.json import base
+
+
+class AgentsClient(base.BaseNetworkClient):
+
+    def update_agent(self, agent_id, **kwargs):
+        """Update agent."""
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526673
+        uri = '/agents/%s' % agent_id
+        return self.update_resource(uri, kwargs)
+
+    def show_agent(self, agent_id, **fields):
+        uri = '/agents/%s' % agent_id
+        return self.show_resource(uri, **fields)
+
+    def list_agents(self, **filters):
+        uri = '/agents'
+        return self.list_resources(uri, **filters)
+
+    def list_routers_on_l3_agent(self, agent_id):
+        uri = '/agents/%s/l3-routers' % agent_id
+        return self.list_resources(uri)
+
+    def create_router_on_l3_agent(self, agent_id, **kwargs):
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526670
+        uri = '/agents/%s/l3-routers' % agent_id
+        return self.create_resource(uri, kwargs)
+
+    def delete_router_from_l3_agent(self, agent_id, router_id):
+        uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
+        return self.delete_resource(uri)
+
+    def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
+        uri = '/agents/%s/dhcp-networks' % agent_id
+        return self.list_resources(uri)
+
+    def delete_network_from_dhcp_agent(self, agent_id, network_id):
+        uri = '/agents/%s/dhcp-networks/%s' % (agent_id,
+                                               network_id)
+        return self.delete_resource(uri)
+
+    def add_dhcp_agent_to_network(self, agent_id, **kwargs):
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526212
+        uri = '/agents/%s/dhcp-networks' % agent_id
+        return self.create_resource(uri, kwargs)
diff --git a/tempest/services/network/json/base.py b/tempest/services/network/json/base.py
index fe150df..6ebc245 100644
--- a/tempest/services/network/json/base.py
+++ b/tempest/services/network/json/base.py
@@ -18,9 +18,10 @@
 
 class BaseNetworkClient(service_client.ServiceClient):
 
-    """
-    Base class for Tempest REST clients for Neutron.  Child classes use v2 of
-    the Neutron API, since the V1 API has been removed from the code base.
+    """Base class for Tempest REST clients for Neutron.
+
+    Child classes use v2 of the Neutron API, since the V1 API has been
+    removed from the code base.
     """
 
     version = '2.0'
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/network/json/extensions_client.py
similarity index 61%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/network/json/extensions_client.py
index f0cc03f..64d3a4f 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/network/json/extensions_client.py
@@ -1,6 +1,3 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
@@ -13,12 +10,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.network.json import base
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class ExtensionsClient(base.BaseNetworkClient):
+
+    def show_extension(self, ext_alias, **fields):
+        uri = '/extensions/%s' % ext_alias
+        return self.show_resource(uri, **fields)
+
+    def list_extensions(self, **filters):
+        uri = '/extensions'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 7821f37..c6b22df 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -21,9 +21,10 @@
 
 class NetworkClient(base.BaseNetworkClient):
 
-    """
-    Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
-    V1 API has been removed from the code base.
+    """Tempest REST client for Neutron.
+
+    Uses v2 of the Neutron API, since the V1 API has been removed from the
+    code base.
 
     Implements create, delete, update, list and show for the basic Neutron
     abstractions (networks, sub-networks, routers, ports and floating IP):
@@ -34,161 +35,49 @@
     quotas
     """
 
-    def create_port(self, **kwargs):
-        uri = '/ports'
-        post_data = {'port': kwargs}
-        return self.create_resource(uri, post_data)
+    def create_bulk_network(self, **kwargs):
+        """create bulk network
 
-    def update_port(self, port_id, **kwargs):
-        uri = '/ports/%s' % port_id
-        post_data = {'port': kwargs}
-        return self.update_resource(uri, post_data)
-
-    def show_port(self, port_id, **fields):
-        uri = '/ports/%s' % port_id
-        return self.show_resource(uri, **fields)
-
-    def delete_port(self, port_id):
-        uri = '/ports/%s' % port_id
-        return self.delete_resource(uri)
-
-    def list_ports(self, **filters):
-        uri = '/ports'
-        return self.list_resources(uri, **filters)
-
-    def create_floatingip(self, **kwargs):
-        uri = '/floatingips'
-        post_data = {'floatingip': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def update_floatingip(self, floatingip_id, **kwargs):
-        uri = '/floatingips/%s' % floatingip_id
-        post_data = {'floatingip': kwargs}
-        return self.update_resource(uri, post_data)
-
-    def show_floatingip(self, floatingip_id, **fields):
-        uri = '/floatingips/%s' % floatingip_id
-        return self.show_resource(uri, **fields)
-
-    def delete_floatingip(self, floatingip_id):
-        uri = '/floatingips/%s' % floatingip_id
-        return self.delete_resource(uri)
-
-    def list_floatingips(self, **filters):
-        uri = '/floatingips'
-        return self.list_resources(uri, **filters)
-
-    def create_metering_label(self, **kwargs):
-        uri = '/metering/metering-labels'
-        post_data = {'metering_label': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def show_metering_label(self, metering_label_id, **fields):
-        uri = '/metering/metering-labels/%s' % metering_label_id
-        return self.show_resource(uri, **fields)
-
-    def delete_metering_label(self, metering_label_id):
-        uri = '/metering/metering-labels/%s' % metering_label_id
-        return self.delete_resource(uri)
-
-    def list_metering_labels(self, **filters):
-        uri = '/metering/metering-labels'
-        return self.list_resources(uri, **filters)
-
-    def create_metering_label_rule(self, **kwargs):
-        uri = '/metering/metering-label-rules'
-        post_data = {'metering_label_rule': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def show_metering_label_rule(self, metering_label_rule_id, **fields):
-        uri = '/metering/metering-label-rules/%s' % metering_label_rule_id
-        return self.show_resource(uri, **fields)
-
-    def delete_metering_label_rule(self, metering_label_rule_id):
-        uri = '/metering/metering-label-rules/%s' % metering_label_rule_id
-        return self.delete_resource(uri)
-
-    def list_metering_label_rules(self, **filters):
-        uri = '/metering/metering-label-rules'
-        return self.list_resources(uri, **filters)
-
-    def create_security_group(self, **kwargs):
-        uri = '/security-groups'
-        post_data = {'security_group': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def update_security_group(self, security_group_id, **kwargs):
-        uri = '/security-groups/%s' % security_group_id
-        post_data = {'security_group': kwargs}
-        return self.update_resource(uri, post_data)
-
-    def show_security_group(self, security_group_id, **fields):
-        uri = '/security-groups/%s' % security_group_id
-        return self.show_resource(uri, **fields)
-
-    def delete_security_group(self, security_group_id):
-        uri = '/security-groups/%s' % security_group_id
-        return self.delete_resource(uri)
-
-    def list_security_groups(self, **filters):
-        uri = '/security-groups'
-        return self.list_resources(uri, **filters)
-
-    def create_security_group_rule(self, **kwargs):
-        uri = '/security-group-rules'
-        post_data = {'security_group_rule': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def show_security_group_rule(self, security_group_rule_id, **fields):
-        uri = '/security-group-rules/%s' % security_group_rule_id
-        return self.show_resource(uri, **fields)
-
-    def delete_security_group_rule(self, security_group_rule_id):
-        uri = '/security-group-rules/%s' % security_group_rule_id
-        return self.delete_resource(uri)
-
-    def list_security_group_rules(self, **filters):
-        uri = '/security-group-rules'
-        return self.list_resources(uri, **filters)
-
-    def show_extension(self, ext_alias, **fields):
-        uri = '/extensions/%s' % ext_alias
-        return self.show_resource(uri, **fields)
-
-    def list_extensions(self, **filters):
-        uri = '/extensions'
-        return self.list_resources(uri, **filters)
-
-    def create_bulk_network(self, names):
-        network_list = [{'name': name} for name in names]
-        post_data = {'networks': network_list}
+        Available params: see http://developer.openstack.org/
+                              api-ref-networking-v2.html#bulkCreateNetwork
+        """
         uri = '/networks'
-        return self.create_resource(uri, post_data)
+        return self.create_resource(uri, kwargs)
 
-    def create_bulk_subnet(self, subnet_list):
-        post_data = {'subnets': subnet_list}
+    def create_bulk_subnet(self, **kwargs):
+        """create bulk subnet
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-networking-v2.html#bulkCreateSubnet
+        """
         uri = '/subnets'
-        return self.create_resource(uri, post_data)
+        return self.create_resource(uri, kwargs)
 
-    def create_bulk_port(self, port_list):
-        post_data = {'ports': port_list}
+    def create_bulk_port(self, **kwargs):
+        """create bulk port
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-networking-v2.html#bulkCreatePorts
+        """
         uri = '/ports'
-        return self.create_resource(uri, post_data)
+        return self.create_resource(uri, kwargs)
 
-    def wait_for_resource_deletion(self, resource_type, id):
+    def wait_for_resource_deletion(self, resource_type, id, client=None):
         """Waits for a resource to be deleted."""
         start_time = int(time.time())
         while True:
-            if self.is_resource_deleted(resource_type, id):
+            if self.is_resource_deleted(resource_type, id, client=client):
                 return
             if int(time.time()) - start_time >= self.build_timeout:
                 raise exceptions.TimeoutException
             time.sleep(self.build_interval)
 
-    def is_resource_deleted(self, resource_type, id):
+    def is_resource_deleted(self, resource_type, id, client=None):
+        if client is None:
+            client = self
         method = 'show_' + resource_type
         try:
-            getattr(self, method)(id)
+            getattr(client, method)(id)
         except AttributeError:
             raise Exception("Unknown resource type %s " % resource_type)
         except lib_exc.NotFound:
@@ -197,8 +86,8 @@
 
     def wait_for_resource_status(self, fetch, status, interval=None,
                                  timeout=None):
-        """
-        @summary: Waits for a network resource to reach a status
+        """Waits for a network resource to reach a status
+
         @param fetch: the callable to be used to query the resource status
         @type fecth: callable that takes no parameters and returns the resource
         @param status: the status that the resource has to reach
@@ -232,23 +121,6 @@
             message = '(%s) %s' % (caller, message)
         raise exceptions.TimeoutException(message)
 
-    def update_quotas(self, tenant_id, **kwargs):
-        put_body = {'quota': kwargs}
-        uri = '/quotas/%s' % tenant_id
-        return self.update_resource(uri, put_body)
-
-    def reset_quotas(self, tenant_id):
-        uri = '/quotas/%s' % tenant_id
-        return self.delete_resource(uri)
-
-    def show_quotas(self, tenant_id, **fields):
-        uri = '/quotas/%s' % tenant_id
-        return self.show_resource(uri, **fields)
-
-    def list_quotas(self, **filters):
-        uri = '/quotas'
-        return self.list_resources(uri, **filters)
-
     def create_router(self, name, admin_state_up=True, **kwargs):
         post_body = {'router': kwargs}
         post_body['router']['name'] = name
@@ -308,84 +180,44 @@
         """
         return self._update_router(router_id, set_enable_snat=True, **kwargs)
 
-    def add_router_interface_with_subnet_id(self, router_id, subnet_id):
+    def add_router_interface(self, router_id, **kwargs):
+        """Add router interface.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-networking-v2-ext.html#addRouterInterface
+        """
         uri = '/routers/%s/add_router_interface' % router_id
-        update_body = {"subnet_id": subnet_id}
-        return self.update_resource(uri, update_body)
+        return self.update_resource(uri, kwargs)
 
-    def add_router_interface_with_port_id(self, router_id, port_id):
-        uri = '/routers/%s/add_router_interface' % router_id
-        update_body = {"port_id": port_id}
-        return self.update_resource(uri, update_body)
+    def remove_router_interface(self, router_id, **kwargs):
+        """Remove router interface.
 
-    def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
+        Available params: see http://developer.openstack.org/
+                              api-ref-networking-v2-ext.html#removeRouterInterface
+        """
         uri = '/routers/%s/remove_router_interface' % router_id
-        update_body = {"subnet_id": subnet_id}
-        return self.update_resource(uri, update_body)
-
-    def remove_router_interface_with_port_id(self, router_id, port_id):
-        uri = '/routers/%s/remove_router_interface' % router_id
-        update_body = {"port_id": port_id}
-        return self.update_resource(uri, update_body)
+        return self.update_resource(uri, kwargs)
 
     def list_router_interfaces(self, uuid):
         uri = '/ports?device_id=%s' % uuid
         return self.list_resources(uri)
 
-    def update_agent(self, agent_id, agent_info):
-        """
-        :param agent_info: Agent update information.
-        E.g {"admin_state_up": True}
-        """
-        uri = '/agents/%s' % agent_id
-        agent = {"agent": agent_info}
-        return self.update_resource(uri, agent)
-
-    def show_agent(self, agent_id, **fields):
-        uri = '/agents/%s' % agent_id
-        return self.show_resource(uri, **fields)
-
-    def list_agents(self, **filters):
-        uri = '/agents'
-        return self.list_resources(uri, **filters)
-
-    def list_routers_on_l3_agent(self, agent_id):
-        uri = '/agents/%s/l3-routers' % agent_id
-        return self.list_resources(uri)
-
     def list_l3_agents_hosting_router(self, router_id):
         uri = '/routers/%s/l3-agents' % router_id
         return self.list_resources(uri)
 
-    def add_router_to_l3_agent(self, agent_id, router_id):
-        uri = '/agents/%s/l3-routers' % agent_id
-        post_body = {"router_id": router_id}
-        return self.create_resource(uri, post_body)
-
-    def remove_router_from_l3_agent(self, agent_id, router_id):
-        uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
-        return self.delete_resource(uri)
-
     def list_dhcp_agent_hosting_network(self, network_id):
         uri = '/networks/%s/dhcp-agents' % network_id
         return self.list_resources(uri)
 
-    def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
-        uri = '/agents/%s/dhcp-networks' % agent_id
-        return self.list_resources(uri)
+    def update_extra_routes(self, router_id, **kwargs):
+        """Update Extra routes.
 
-    def remove_network_from_dhcp_agent(self, agent_id, network_id):
-        uri = '/agents/%s/dhcp-networks/%s' % (agent_id,
-                                               network_id)
-        return self.delete_resource(uri)
-
-    def update_extra_routes(self, router_id, routes):
+        Available params: see http://developer.openstack.org/
+                              api-ref-networking-v2-ext.html#updateExtraRoutes
+        """
         uri = '/routers/%s' % router_id
-        put_body = {
-            'router': {
-                'routes': routes
-            }
-        }
+        put_body = {'router': kwargs}
         return self.update_resource(uri, put_body)
 
     def delete_extra_routes(self, router_id):
@@ -396,30 +228,3 @@
             }
         }
         return self.update_resource(uri, put_body)
-
-    def add_dhcp_agent_to_network(self, agent_id, network_id):
-        post_body = {'network_id': network_id}
-        uri = '/agents/%s/dhcp-networks' % agent_id
-        return self.create_resource(uri, post_body)
-
-    def list_subnetpools(self, **filters):
-        uri = '/subnetpools'
-        return self.list_resources(uri, **filters)
-
-    def create_subnetpools(self, **kwargs):
-        uri = '/subnetpools'
-        post_data = {'subnetpool': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def show_subnetpools(self, subnetpool_id, **fields):
-        uri = '/subnetpools/%s' % subnetpool_id
-        return self.show_resource(uri, **fields)
-
-    def update_subnetpools(self, subnetpool_id, **kwargs):
-        uri = '/subnetpools/%s' % subnetpool_id
-        post_data = {'subnetpool': kwargs}
-        return self.update_resource(uri, post_data)
-
-    def delete_subnetpools(self, subnetpool_id):
-        uri = '/subnetpools/%s' % subnetpool_id
-        return self.delete_resource(uri)
diff --git a/tempest/services/network/json/networks_client.py b/tempest/services/network/json/networks_client.py
deleted file mode 100644
index 2907d44..0000000
--- a/tempest/services/network/json/networks_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.services.network.json import base
-
-
-class NetworksClient(base.BaseNetworkClient):
-
-    def create_network(self, **kwargs):
-        uri = '/networks'
-        post_data = {'network': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def update_network(self, network_id, **kwargs):
-        uri = '/networks/%s' % network_id
-        post_data = {'network': kwargs}
-        return self.update_resource(uri, post_data)
-
-    def show_network(self, network_id, **fields):
-        uri = '/networks/%s' % network_id
-        return self.show_resource(uri, **fields)
-
-    def delete_network(self, network_id):
-        uri = '/networks/%s' % network_id
-        return self.delete_resource(uri)
-
-    def list_networks(self, **filters):
-        uri = '/networks'
-        return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/quotas_client.py b/tempest/services/network/json/quotas_client.py
new file mode 100644
index 0000000..9b65a54
--- /dev/null
+++ b/tempest/services/network/json/quotas_client.py
@@ -0,0 +1,35 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.network.json import base
+
+
+class QuotasClient(base.BaseNetworkClient):
+
+    def update_quotas(self, tenant_id, **kwargs):
+        put_body = {'quota': kwargs}
+        uri = '/quotas/%s' % tenant_id
+        return self.update_resource(uri, put_body)
+
+    def reset_quotas(self, tenant_id):
+        uri = '/quotas/%s' % tenant_id
+        return self.delete_resource(uri)
+
+    def show_quotas(self, tenant_id, **fields):
+        uri = '/quotas/%s' % tenant_id
+        return self.show_resource(uri, **fields)
+
+    def list_quotas(self, **filters):
+        uri = '/quotas'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/security_group_rules_client.py b/tempest/services/network/json/security_group_rules_client.py
new file mode 100644
index 0000000..b2ba5b2
--- /dev/null
+++ b/tempest/services/network/json/security_group_rules_client.py
@@ -0,0 +1,33 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.network.json import base
+
+
+class SecurityGroupRulesClient(base.BaseNetworkClient):
+
+    def create_security_group_rule(self, **kwargs):
+        uri = '/security-group-rules'
+        post_data = {'security_group_rule': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def show_security_group_rule(self, security_group_rule_id, **fields):
+        uri = '/security-group-rules/%s' % security_group_rule_id
+        return self.show_resource(uri, **fields)
+
+    def delete_security_group_rule(self, security_group_rule_id):
+        uri = '/security-group-rules/%s' % security_group_rule_id
+        return self.delete_resource(uri)
+
+    def list_security_group_rules(self, **filters):
+        uri = '/security-group-rules'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/security_groups_client.py b/tempest/services/network/json/security_groups_client.py
new file mode 100644
index 0000000..a60d2a6
--- /dev/null
+++ b/tempest/services/network/json/security_groups_client.py
@@ -0,0 +1,38 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.network.json import base
+
+
+class SecurityGroupsClient(base.BaseNetworkClient):
+
+    def create_security_group(self, **kwargs):
+        uri = '/security-groups'
+        post_data = {'security_group': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_security_group(self, security_group_id, **kwargs):
+        uri = '/security-groups/%s' % security_group_id
+        post_data = {'security_group': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_security_group(self, security_group_id, **fields):
+        uri = '/security-groups/%s' % security_group_id
+        return self.show_resource(uri, **fields)
+
+    def delete_security_group(self, security_group_id):
+        uri = '/security-groups/%s' % security_group_id
+        return self.delete_resource(uri)
+
+    def list_security_groups(self, **filters):
+        uri = '/security-groups'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/subnetpools_client.py b/tempest/services/network/json/subnetpools_client.py
new file mode 100644
index 0000000..f921bb0
--- /dev/null
+++ b/tempest/services/network/json/subnetpools_client.py
@@ -0,0 +1,40 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.network.json import base
+
+
+class SubnetpoolsClient(base.BaseNetworkClient):
+
+    def list_subnetpools(self, **filters):
+        uri = '/subnetpools'
+        return self.list_resources(uri, **filters)
+
+    def create_subnetpool(self, **kwargs):
+        uri = '/subnetpools'
+        post_data = {'subnetpool': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def show_subnetpool(self, subnetpool_id, **fields):
+        uri = '/subnetpools/%s' % subnetpool_id
+        return self.show_resource(uri, **fields)
+
+    def update_subnetpool(self, subnetpool_id, **kwargs):
+        uri = '/subnetpools/%s' % subnetpool_id
+        post_data = {'subnetpool': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def delete_subnetpool(self, subnetpool_id):
+        uri = '/subnetpools/%s' % subnetpool_id
+        return self.delete_resource(uri)
diff --git a/tempest/services/network/json/subnets_client.py b/tempest/services/network/json/subnets_client.py
deleted file mode 100644
index 957b606..0000000
--- a/tempest/services/network/json/subnets_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.services.network.json import base
-
-
-class SubnetsClient(base.BaseNetworkClient):
-
-    def create_subnet(self, **kwargs):
-        uri = '/subnets'
-        post_data = {'subnet': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def update_subnet(self, subnet_id, **kwargs):
-        uri = '/subnets/%s' % subnet_id
-        post_data = {'subnet': kwargs}
-        return self.update_resource(uri, post_data)
-
-    def show_subnet(self, subnet_id, **fields):
-        uri = '/subnets/%s' % subnet_id
-        return self.show_resource(uri, **fields)
-
-    def delete_subnet(self, subnet_id):
-        uri = '/subnets/%s' % subnet_id
-        return self.delete_resource(uri)
-
-    def list_subnets(self, **filters):
-        uri = '/subnets'
-        return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
index 16d9823..0a7da92 100644
--- a/tempest/services/network/resources.py
+++ b/tempest/services/network/resources.py
@@ -19,10 +19,7 @@
 
 
 class AttributeDict(dict):
-
-    """
-    Provide attribute access (dict.key) to dictionary values.
-    """
+    """Provide attribute access (dict.key) to dictionary values."""
 
     def __getattr__(self, name):
         """Allow attribute access for all keys in the dict."""
@@ -33,10 +30,9 @@
 
 @six.add_metaclass(abc.ABCMeta)
 class DeletableResource(AttributeDict):
+    """Support deletion of neutron resources (networks, subnets)
 
-    """
-    Support deletion of neutron resources (networks, subnets) via a
-    delete() method, as is supported by keystone and nova resources.
+    via a delete() method, as is supported by keystone and nova resources.
     """
 
     def __init__(self, *args, **kwargs):
@@ -44,6 +40,7 @@
         self.network_client = kwargs.pop('network_client', None)
         self.networks_client = kwargs.pop('networks_client', None)
         self.subnets_client = kwargs.pop('subnets_client', None)
+        self.ports_client = kwargs.pop('ports_client', None)
         super(DeletableResource, self).__init__(*args, **kwargs)
 
     def __str__(self):
@@ -92,14 +89,13 @@
 
     def add_to_router(self, router_id):
         self._router_ids.add(router_id)
-        self.network_client.add_router_interface_with_subnet_id(
-            router_id, subnet_id=self.id)
+        self.network_client.add_router_interface(router_id,
+                                                 subnet_id=self.id)
 
     def delete(self):
         for router_id in self._router_ids.copy():
-            self.network_client.remove_router_interface_with_subnet_id(
-                router_id,
-                subnet_id=self.id)
+            self.network_client.remove_router_interface(router_id,
+                                                        subnet_id=self.id)
             self._router_ids.remove(router_id)
         self.subnets_client.delete_subnet(self.id)
 
@@ -152,7 +148,7 @@
 class DeletablePort(DeletableResource):
 
     def delete(self):
-        self.client.delete_port(self.id)
+        self.ports_client.delete_port(self.id)
 
 
 class DeletableSecurityGroup(DeletableResource):
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index d89aa5d..2c7fe29 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -59,8 +59,8 @@
         return resp, body
 
     def list_account_metadata(self):
-        """
-        HEAD on the storage URL
+        """HEAD on the storage URL
+
         Returns all account metadata headers
         """
         resp, body = self.head('')
@@ -86,9 +86,7 @@
 
     def delete_account_metadata(self, metadata,
                                 metadata_prefix='X-Remove-Account-Meta-'):
-        """
-        Deletes an account metadata entry.
-        """
+        """Deletes an account metadata entry."""
 
         headers = {}
         for item in metadata:
@@ -103,9 +101,7 @@
             delete_metadata=None,
             create_metadata_prefix='X-Account-Meta-',
             delete_metadata_prefix='X-Remove-Account-Meta-'):
-        """
-        Creates and deletes an account metadata entry.
-        """
+        """Creates and deletes an account metadata entry."""
         headers = {}
         for key in create_metadata:
             headers[create_metadata_prefix + key] = create_metadata[key]
@@ -117,8 +113,8 @@
         return resp, body
 
     def list_account_containers(self, params=None):
-        """
-        GET on the (base) storage URL
+        """GET on the (base) storage URL
+
         Given valid X-Auth-Token, returns a list of all containers for the
         account.
 
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index e8ee20b..73c25db 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -29,9 +29,9 @@
             remove_metadata=None,
             metadata_prefix='X-Container-Meta-',
             remove_metadata_prefix='X-Remove-Container-Meta-'):
-        """
-           Creates a container, with optional metadata passed in as a
-           dictionary
+        """Creates a container
+
+        with optional metadata passed in as a dictionary
         """
         url = str(container_name)
         headers = {}
@@ -90,19 +90,17 @@
         return resp, body
 
     def list_container_metadata(self, container_name):
-        """
-        Retrieves container metadata headers
-        """
+        """Retrieves container metadata headers"""
         url = str(container_name)
         resp, body = self.head(url)
         self.expected_success(204, resp.status)
         return resp, body
 
     def list_all_container_objects(self, container, params=None):
-        """
-            Returns complete list of all objects in the container, even if
-            item count is beyond 10,000 item listing limit.
-            Does not require any parameters aside from container name.
+        """Returns complete list of all objects in the container
+
+        even if item count is beyond 10,000 item listing limit.
+        Does not require any parameters aside from container name.
         """
         # TODO(dwalleck): Rewrite using json format to avoid newlines at end of
         # obj names. Set limit to API limit - 1 (max returned items = 9999)
@@ -121,8 +119,7 @@
         return objlist
 
     def list_container_contents(self, container, params=None):
-        """
-           List the objects in a container, given the container name
+        """List the objects in a container, given the container name
 
            Returns the container object listing as a plain text list, or as
            xml or json if that option is specified via the 'format' argument.
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 2265587..5890e33 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -149,9 +149,7 @@
         return resp, body
 
     def put_object_with_chunk(self, container, name, contents, chunk_size):
-        """
-        Put an object with Transfer-Encoding header
-        """
+        """Put an object with Transfer-Encoding header"""
         if self.base_url is None:
             self._set_auth()
 
@@ -204,8 +202,8 @@
 
 def put_object_connection(base_url, container, name, contents=None,
                           chunk_size=65536, headers=None, query_string=None):
-    """
-    Helper function to make connection to put object with httplib
+    """Helper function to make connection to put object with httplib
+
     :param base_url: base_url of an object client
     :param container: container name that the object is in
     :param name: object name to put
diff --git a/tempest/services/telemetry/json/alarming_client.py b/tempest/services/telemetry/json/alarming_client.py
new file mode 100644
index 0000000..ce14211
--- /dev/null
+++ b/tempest/services/telemetry/json/alarming_client.py
@@ -0,0 +1,98 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.common import service_client
+
+
+class AlarmingClient(service_client.ServiceClient):
+
+    version = '2'
+    uri_prefix = "v2"
+
+    def deserialize(self, body):
+        return json.loads(body.replace("\n", ""))
+
+    def serialize(self, body):
+        return json.dumps(body)
+
+    def list_alarms(self, query=None):
+        uri = '%s/alarms' % self.uri_prefix
+        uri_dict = {}
+        if query:
+            uri_dict = {'q.field': query[0],
+                        'q.op': query[1],
+                        'q.value': query[2]}
+        if uri_dict:
+            uri += "?%s" % urllib.urlencode(uri_dict)
+        resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return service_client.ResponseBodyList(resp, body)
+
+    def show_alarm(self, alarm_id):
+        uri = '%s/alarms/%s' % (self.uri_prefix, alarm_id)
+        resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return service_client.ResponseBody(resp, body)
+
+    def show_alarm_history(self, alarm_id):
+        uri = "%s/alarms/%s/history" % (self.uri_prefix, alarm_id)
+        resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return service_client.ResponseBodyList(resp, body)
+
+    def delete_alarm(self, alarm_id):
+        uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
+        resp, body = self.delete(uri)
+        self.expected_success(204, resp.status)
+        if body:
+            body = self.deserialize(body)
+        return service_client.ResponseBody(resp, body)
+
+    def create_alarm(self, **kwargs):
+        uri = "%s/alarms" % self.uri_prefix
+        body = self.serialize(kwargs)
+        resp, body = self.post(uri, body)
+        self.expected_success(201, resp.status)
+        body = self.deserialize(body)
+        return service_client.ResponseBody(resp, body)
+
+    def update_alarm(self, alarm_id, **kwargs):
+        uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
+        body = self.serialize(kwargs)
+        resp, body = self.put(uri, body)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return service_client.ResponseBody(resp, body)
+
+    def show_alarm_state(self, alarm_id):
+        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+        resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return service_client.ResponseBodyData(resp, body)
+
+    def alarm_set_state(self, alarm_id, state):
+        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+        body = self.serialize(state)
+        resp, body = self.put(uri, body)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return service_client.ResponseBodyData(resp, body)
diff --git a/tempest/services/telemetry/json/telemetry_client.py b/tempest/services/telemetry/json/telemetry_client.py
index fc8951e..abdeba2 100644
--- a/tempest/services/telemetry/json/telemetry_client.py
+++ b/tempest/services/telemetry/json/telemetry_client.py
@@ -30,17 +30,6 @@
     def serialize(self, body):
         return json.dumps(body)
 
-    def add_sample(self, sample_list, meter_name, meter_unit, volume,
-                   sample_type, resource_id, **kwargs):
-        sample = {"counter_name": meter_name, "counter_unit": meter_unit,
-                  "counter_volume": volume, "counter_type": sample_type,
-                  "resource_id": resource_id}
-        for key in kwargs:
-            sample[key] = kwargs[key]
-
-        sample_list.append(self.serialize(sample))
-        return sample_list
-
     def create_sample(self, meter_name, sample_list):
         uri = "%s/meters/%s" % (self.uri_prefix, meter_name)
         body = self.serialize(sample_list)
@@ -72,10 +61,6 @@
         uri = '%s/meters' % self.uri_prefix
         return self._helper_list(uri, query)
 
-    def list_alarms(self, query=None):
-        uri = '%s/alarms' % self.uri_prefix
-        return self._helper_list(uri, query)
-
     def list_statistics(self, meter, period=None, query=None):
         uri = "%s/meters/%s/statistics" % (self.uri_prefix, meter)
         return self._helper_list(uri, query, period)
@@ -94,56 +79,3 @@
         self.expected_success(200, resp.status)
         body = self.deserialize(body)
         return service_client.ResponseBody(resp, body)
-
-    def show_alarm(self, alarm_id):
-        uri = '%s/alarms/%s' % (self.uri_prefix, alarm_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = self.deserialize(body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_alarm(self, alarm_id):
-        uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        if body:
-            body = self.deserialize(body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_alarm(self, **kwargs):
-        uri = "%s/alarms" % self.uri_prefix
-        body = self.serialize(kwargs)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = self.deserialize(body)
-        return service_client.ResponseBody(resp, body)
-
-    def update_alarm(self, alarm_id, **kwargs):
-        uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
-        body = self.serialize(kwargs)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = self.deserialize(body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_alarm_state(self, alarm_id):
-        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = self.deserialize(body)
-        return service_client.ResponseBodyData(resp, body)
-
-    def alarm_set_state(self, alarm_id, state):
-        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
-        body = self.serialize(state)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = self.deserialize(body)
-        return service_client.ResponseBodyData(resp, body)
-
-    def show_alarm_history(self, alarm_id):
-        uri = "%s/alarms/%s/history" % (self.uri_prefix, alarm_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = self.deserialize(body)
-        return service_client.ResponseBodyList(resp, body)
diff --git a/tempest/services/volume/json/__init__.py b/tempest/services/volume/base/__init__.py
similarity index 100%
copy from tempest/services/volume/json/__init__.py
copy to tempest/services/volume/base/__init__.py
diff --git a/tempest/services/volume/json/admin/__init__.py b/tempest/services/volume/base/admin/__init__.py
similarity index 100%
copy from tempest/services/volume/json/admin/__init__.py
copy to tempest/services/volume/base/admin/__init__.py
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/base/admin/base_hosts_client.py
similarity index 78%
copy from tempest/services/volume/json/admin/volume_services_client.py
copy to tempest/services/volume/base/admin/base_hosts_client.py
index 798a642..074f87f 100644
--- a/tempest/services/volume/json/admin/volume_services_client.py
+++ b/tempest/services/volume/base/admin/base_hosts_client.py
@@ -1,4 +1,4 @@
-# Copyright 2014 NEC Corporation
+# Copyright 2013 OpenStack Foundation
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -19,10 +19,13 @@
 from tempest.common import service_client
 
 
-class BaseVolumesServicesClient(service_client.ServiceClient):
+class BaseHostsClient(service_client.ServiceClient):
+    """Client class to send CRUD Volume Hosts API requests"""
 
-    def list_services(self, params=None):
-        url = 'os-services'
+    def list_hosts(self, **params):
+        """Lists all hosts."""
+
+        url = 'os-hosts'
         if params:
             url += '?%s' % urllib.urlencode(params)
 
@@ -30,7 +33,3 @@
         body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
-
-
-class VolumesServicesClient(BaseVolumesServicesClient):
-    """Volume V1 volume services client"""
diff --git a/tempest/services/volume/json/admin/volume_quotas_client.py b/tempest/services/volume/base/admin/base_quotas_client.py
similarity index 71%
rename from tempest/services/volume/json/admin/volume_quotas_client.py
rename to tempest/services/volume/base/admin/base_quotas_client.py
index 207554d..e063a31 100644
--- a/tempest/services/volume/json/admin/volume_quotas_client.py
+++ b/tempest/services/volume/base/admin/base_quotas_client.py
@@ -18,10 +18,8 @@
 from tempest.common import service_client
 
 
-class BaseVolumeQuotasClient(service_client.ServiceClient):
-    """
-    Client class to send CRUD Volume Quotas API requests to a Cinder endpoint
-    """
+class BaseQuotasClient(service_client.ServiceClient):
+    """Client class to send CRUD Volume Quotas API requests"""
 
     TYPE = "json"
 
@@ -52,21 +50,14 @@
         body = self.show_quota_set(tenant_id, params={'usage': True})
         return body
 
-    def update_quota_set(self, tenant_id, gigabytes=None, volumes=None,
-                         snapshots=None):
-        post_body = {}
+    def update_quota_set(self, tenant_id, **kwargs):
+        """Updates quota set
 
-        if gigabytes is not None:
-            post_body['gigabytes'] = gigabytes
-
-        if volumes is not None:
-            post_body['volumes'] = volumes
-
-        if snapshots is not None:
-            post_body['snapshots'] = snapshots
-
-        post_body = jsonutils.dumps({'quota_set': post_body})
-        resp, body = self.put('os-quota-sets/%s' % tenant_id, post_body)
+        Available params: see http://developer.openstack.org/
+                              api-ref-blockstorage-v2.html#updateQuotas-v2
+        """
+        put_body = jsonutils.dumps({'quota_set': kwargs})
+        resp, body = self.put('os-quota-sets/%s' % tenant_id, put_body)
         self.expected_success(200, resp.status)
         body = jsonutils.loads(body)
         return service_client.ResponseBody(resp, body)
@@ -76,9 +67,3 @@
         resp, body = self.delete('os-quota-sets/%s' % tenant_id)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
-
-
-class VolumeQuotasClient(BaseVolumeQuotasClient):
-    """
-    Client class to send CRUD Volume Type API V1 requests to a Cinder endpoint
-    """
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/base/admin/base_services_client.py
similarity index 83%
rename from tempest/services/volume/json/admin/volume_services_client.py
rename to tempest/services/volume/base/admin/base_services_client.py
index 798a642..3626469 100644
--- a/tempest/services/volume/json/admin/volume_services_client.py
+++ b/tempest/services/volume/base/admin/base_services_client.py
@@ -19,9 +19,9 @@
 from tempest.common import service_client
 
 
-class BaseVolumesServicesClient(service_client.ServiceClient):
+class BaseServicesClient(service_client.ServiceClient):
 
-    def list_services(self, params=None):
+    def list_services(self, **params):
         url = 'os-services'
         if params:
             url += '?%s' % urllib.urlencode(params)
@@ -30,7 +30,3 @@
         body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
-
-
-class VolumesServicesClient(BaseVolumesServicesClient):
-    """Volume V1 volume services client"""
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/base/admin/base_types_client.py
similarity index 76%
rename from tempest/services/volume/json/admin/volume_types_client.py
rename to tempest/services/volume/base/admin/base_types_client.py
index cd61859..867273e 100644
--- a/tempest/services/volume/json/admin/volume_types_client.py
+++ b/tempest/services/volume/base/admin/base_types_client.py
@@ -20,10 +20,8 @@
 from tempest.common import service_client
 
 
-class BaseVolumeTypesClient(service_client.ServiceClient):
-    """
-    Client class to send CRUD Volume Types API requests to a Cinder endpoint
-    """
+class BaseTypesClient(service_client.ServiceClient):
+    """Client class to send CRUD Volume Types API requests"""
 
     def is_resource_deleted(self, resource):
         # to use this method self.resource must be defined to respective value
@@ -49,10 +47,10 @@
         """Returns the primary type of resource this client works with."""
         return 'volume-type/encryption-type'
 
-    def list_volume_types(self, params=None):
+    def list_volume_types(self, **params):
         """List all the volume_types created."""
         url = 'types'
-        if params is not None:
+        if params:
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
@@ -68,19 +66,13 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_volume_type(self, name, **kwargs):
-        """
-        Creates a new Volume_type.
-        name(Required): Name of volume_type.
-        Following optional keyword arguments are accepted:
-        extra_specs: A dictionary of values to be used as extra_specs.
-        """
-        post_body = {
-            'name': name,
-            'extra_specs': kwargs.get('extra_specs'),
-        }
+    def create_volume_type(self, **kwargs):
+        """Create volume type.
 
-        post_body = json.dumps({'volume_type': post_body})
+        Available params: see http://developer.openstack.org/
+                              api-ref-blockstorage-v2.html#createVolumeType
+        """
+        post_body = json.dumps({'volume_type': kwargs})
         resp, body = self.post('types', post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
@@ -92,10 +84,17 @@
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def list_volume_types_extra_specs(self, vol_type_id, params=None):
-        """List all the volume_types extra specs created."""
+    def list_volume_types_extra_specs(self, vol_type_id, **params):
+        """List all the volume_types extra specs created.
+
+        TODO: Current api-site doesn't contain this API description.
+        After fixing the api-site, we need to fix here also for putting
+        the link to api-site.
+
+
+        """
         url = 'types/%s/extra_specs' % str(vol_type_id)
-        if params is not None:
+        if params:
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
@@ -103,23 +102,23 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def show_volume_type_extra_specs(self, vol_type_id, extra_spec_name):
+    def show_volume_type_extra_specs(self, vol_type_id, extra_specs_name):
         """Returns the details of a single volume_type extra spec."""
         url = "types/%s/extra_specs/%s" % (str(vol_type_id),
-                                           str(extra_spec_name))
+                                           str(extra_specs_name))
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_volume_type_extra_specs(self, vol_type_id, extra_spec):
-        """
-        Creates a new Volume_type extra spec.
+    def create_volume_type_extra_specs(self, vol_type_id, extra_specs):
+        """Creates a new Volume_type extra spec.
+
         vol_type_id: Id of volume_type.
         extra_specs: A dictionary of values to be used as extra_specs.
         """
         url = "types/%s/extra_specs" % str(vol_type_id)
-        post_body = json.dumps({'extra_specs': extra_spec})
+        post_body = json.dumps({'extra_specs': extra_specs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
@@ -133,9 +132,9 @@
         return service_client.ResponseBody(resp, body)
 
     def update_volume_type_extra_specs(self, vol_type_id, extra_spec_name,
-                                       extra_spec):
-        """
-        Update a volume_type extra spec.
+                                       extra_specs):
+        """Update a volume_type extra spec.
+
         vol_type_id: Id of volume_type.
         extra_spec_name: Name of the extra spec to be updated.
         extra_spec: A dictionary of with key as extra_spec_name and the
@@ -143,15 +142,15 @@
         """
         url = "types/%s/extra_specs/%s" % (str(vol_type_id),
                                            str(extra_spec_name))
-        put_body = json.dumps(extra_spec)
+        put_body = json.dumps(extra_specs)
         resp, body = self.put(url, put_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
     def show_encryption_type(self, vol_type_id):
-        """
-        Get the volume encryption type for the specified volume type.
+        """Get the volume encryption type for the specified volume type.
+
         vol_type_id: Id of volume_type.
         """
         url = "/types/%s/encryption" % str(vol_type_id)
@@ -161,19 +160,14 @@
         return service_client.ResponseBody(resp, body)
 
     def create_encryption_type(self, vol_type_id, **kwargs):
-        """
-        Create a new encryption type for the specified volume type.
+        """Create encryption type.
 
-        vol_type_id: Id of volume_type.
-        provider: Class providing encryption support.
-        cipher: Encryption algorithm/mode to use.
-        key_size: Size of the encryption key, in bits.
-        control_location: Notional service where encryption is performed.
+        TODO: Current api-site doesn't contain this API description.
+        After fixing the api-site, we need to fix here also for putting
+        the link to api-site.
         """
         url = "/types/%s/encryption" % str(vol_type_id)
-        post_body = {}
-        post_body.update(kwargs)
-        post_body = json.dumps({'encryption': post_body})
+        post_body = json.dumps({'encryption': kwargs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
@@ -185,7 +179,3 @@
             "/types/%s/encryption/provider" % str(vol_type_id))
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
-
-
-class VolumeTypesClient(BaseVolumeTypesClient):
-    """Volume V1 Volume Types client"""
diff --git a/tempest/services/volume/json/availability_zone_client.py b/tempest/services/volume/base/base_availability_zone_client.py
similarity index 82%
rename from tempest/services/volume/json/availability_zone_client.py
rename to tempest/services/volume/base/base_availability_zone_client.py
index 4d24ede..b63fdc2 100644
--- a/tempest/services/volume/json/availability_zone_client.py
+++ b/tempest/services/volume/base/base_availability_zone_client.py
@@ -18,16 +18,10 @@
 from tempest.common import service_client
 
 
-class BaseVolumeAvailabilityZoneClient(service_client.ServiceClient):
+class BaseAvailabilityZoneClient(service_client.ServiceClient):
 
     def list_availability_zones(self):
         resp, body = self.get('os-availability-zone')
         body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
-
-
-class VolumeAvailabilityZoneClient(BaseVolumeAvailabilityZoneClient):
-    """
-    Volume V1 availability zone client.
-    """
diff --git a/tempest/services/volume/json/backups_client.py b/tempest/services/volume/base/base_backups_client.py
similarity index 81%
rename from tempest/services/volume/json/backups_client.py
rename to tempest/services/volume/base/base_backups_client.py
index 6827c93..fc9a40a 100644
--- a/tempest/services/volume/json/backups_client.py
+++ b/tempest/services/volume/base/base_backups_client.py
@@ -16,7 +16,6 @@
 import time
 
 from oslo_serialization import jsonutils as json
-
 from tempest_lib import exceptions as lib_exc
 
 from tempest.common import service_client
@@ -24,30 +23,19 @@
 
 
 class BaseBackupsClient(service_client.ServiceClient):
-    """
-    Client class to send CRUD Volume backup API requests to a Cinder endpoint
-    """
+    """Client class to send CRUD Volume backup API requests"""
 
-    def create_backup(self, volume_id, container=None, name=None,
-                      description=None):
+    def create_backup(self, **kwargs):
         """Creates a backup of volume."""
-        post_body = {'volume_id': volume_id}
-        if container:
-            post_body['container'] = container
-        if name:
-            post_body['name'] = name
-        if description:
-            post_body['description'] = description
-        post_body = json.dumps({'backup': post_body})
+        post_body = json.dumps({'backup': kwargs})
         resp, body = self.post('backups', post_body)
         body = json.loads(body)
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def restore_backup(self, backup_id, volume_id=None):
+    def restore_backup(self, backup_id, **kwargs):
         """Restore volume from backup."""
-        post_body = {'volume_id': volume_id}
-        post_body = json.dumps({'restore': post_body})
+        post_body = json.dumps({'restore': kwargs})
         resp, body = self.post('backups/%s/restore' % (backup_id), post_body)
         body = json.loads(body)
         self.expected_success(202, resp.status)
@@ -85,11 +73,9 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def import_backup(self, backup_service, backup_url):
+    def import_backup(self, **kwargs):
         """Import backup metadata record."""
-        post_body = {'backup_service': backup_service,
-                     'backup_url': backup_url}
-        post_body = json.dumps({'backup-record': post_body})
+        post_body = json.dumps({'backup-record': kwargs})
         resp, body = self.post("backups/import_record", post_body)
         body = json.loads(body)
         self.expected_success(201, resp.status)
@@ -126,7 +112,3 @@
             if int(time.time()) - start_time >= self.build_timeout:
                 raise exceptions.TimeoutException
             time.sleep(self.build_interval)
-
-
-class BackupsClient(BaseBackupsClient):
-    """Volume V1 Backups client"""
diff --git a/tempest/services/volume/json/extensions_client.py b/tempest/services/volume/base/base_extensions_client.py
similarity index 90%
rename from tempest/services/volume/json/extensions_client.py
rename to tempest/services/volume/base/base_extensions_client.py
index 5744d4a..afc3f6b 100644
--- a/tempest/services/volume/json/extensions_client.py
+++ b/tempest/services/volume/base/base_extensions_client.py
@@ -26,9 +26,3 @@
         body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
-
-
-class ExtensionsClient(BaseExtensionsClient):
-    """
-    Volume V1 extensions client.
-    """
diff --git a/tempest/services/volume/json/qos_client.py b/tempest/services/volume/base/base_qos_client.py
similarity index 91%
rename from tempest/services/volume/json/qos_client.py
rename to tempest/services/volume/base/base_qos_client.py
index c79168c..697e902 100644
--- a/tempest/services/volume/json/qos_client.py
+++ b/tempest/services/volume/base/base_qos_client.py
@@ -67,15 +67,13 @@
                 raise exceptions.TimeoutException
             time.sleep(self.build_interval)
 
-    def create_qos(self, name, consumer, **kwargs):
+    def create_qos(self, **kwargs):
         """Create a QoS Specification.
 
-        name : name of the QoS specifications
-        consumer : conumer of Qos ( front-end / back-end / both )
+        Available params: see http://developer.openstack.org/
+                              api-ref-blockstorage-v2.html#createQoSSpec
         """
-        post_body = {'name': name, 'consumer': consumer}
-        post_body.update(kwargs)
-        post_body = json.dumps({'qos_specs': post_body})
+        post_body = json.dumps({'qos_specs': kwargs})
         resp, body = self.post('qos-specs', post_body)
         self.expected_success(200, resp.status)
         body = json.loads(body)
@@ -107,7 +105,8 @@
     def set_qos_key(self, qos_id, **kwargs):
         """Set the specified keys/values of QoS specification.
 
-        kwargs : it is the dictionary of the key=value pairs to set
+        Available params: see http://developer.openstack.org/
+                              api-ref-blockstorage-v2.html#setQoSKey
         """
         put_body = json.dumps({"qos_specs": kwargs})
         resp, body = self.put('qos-specs/%s' % qos_id, put_body)
@@ -118,7 +117,9 @@
     def unset_qos_key(self, qos_id, keys):
         """Unset the specified keys of QoS specification.
 
-        keys : it is the array of the keys to unset
+        :param keys: keys to delete from the QoS specification.
+
+        TODO(jordanP): Add a link once LP #1524877 is fixed.
         """
         put_body = json.dumps({'keys': keys})
         resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
@@ -155,7 +156,3 @@
         resp, body = self.get(url)
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
-
-
-class QosSpecsClient(BaseQosSpecsClient):
-    """Volume V1 QoS client."""
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/base/base_snapshots_client.py
similarity index 83%
rename from tempest/services/volume/json/snapshots_client.py
rename to tempest/services/volume/base/base_snapshots_client.py
index 3fcf18c..1388e9c 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/base/base_snapshots_client.py
@@ -29,7 +29,7 @@
 
     create_resp = 200
 
-    def list_snapshots(self, detail=False, params=None):
+    def list_snapshots(self, detail=False, **params):
         """List all the snapshot."""
         url = 'snapshots'
         if detail:
@@ -50,17 +50,13 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_snapshot(self, volume_id, **kwargs):
+    def create_snapshot(self, **kwargs):
+        """Creates a new snapshot.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-blockstorage-v2.html#createSnapshot
         """
-        Creates a new snapshot.
-        volume_id(Required): id of the volume.
-        force: Create a snapshot even if the volume attached (Default=False)
-        display_name: Optional snapshot Name.
-        display_description: User friendly snapshot description.
-        """
-        post_body = {'volume_id': volume_id}
-        post_body.update(kwargs)
-        post_body = json.dumps({'snapshot': post_body})
+        post_body = json.dumps({'snapshot': kwargs})
         resp, body = self.post('snapshots', post_body)
         body = json.loads(body)
         self.expected_success(self.create_resp, resp.status)
@@ -136,13 +132,14 @@
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def update_snapshot_status(self, snapshot_id, status, progress):
+    def update_snapshot_status(self, snapshot_id, **kwargs):
         """Update the specified snapshot's status."""
-        post_body = {
-            'status': status,
-            'progress': progress
-        }
-        post_body = json.dumps({'os-update_snapshot_status': post_body})
+        # TODO(gmann): api-site doesn't contain doc ref
+        # for this API. After fixing the api-site, we need to
+        # add the link here.
+        # Bug https://bugs.launchpad.net/openstack-api-site/+bug/1532645
+
+        post_body = json.dumps({'os-update_snapshot_status': kwargs})
         url = 'snapshots/%s/action' % str(snapshot_id)
         resp, body = self.post(url, post_body)
         self.expected_success(202, resp.status)
@@ -165,18 +162,26 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def update_snapshot_metadata(self, snapshot_id, metadata):
+    def update_snapshot_metadata(self, snapshot_id, **kwargs):
         """Update metadata for the snapshot."""
-        put_body = json.dumps({'metadata': metadata})
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529063
+        put_body = json.dumps(kwargs)
         url = "snapshots/%s/metadata" % str(snapshot_id)
         resp, body = self.put(url, put_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def update_snapshot_metadata_item(self, snapshot_id, id, meta_item):
+    def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
         """Update metadata item for the snapshot."""
-        put_body = json.dumps({'meta': meta_item})
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529064
+        put_body = json.dumps(kwargs)
         url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
         resp, body = self.put(url, put_body)
         body = json.loads(body)
@@ -196,7 +201,3 @@
         resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
-
-
-class SnapshotsClient(BaseSnapshotsClient):
-    """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/base/base_volumes_client.py
similarity index 77%
rename from tempest/services/volume/json/volumes_client.py
rename to tempest/services/volume/base/base_volumes_client.py
index 9304f63..d4435bc 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/base/base_volumes_client.py
@@ -23,9 +23,7 @@
 
 
 class BaseVolumesClient(service_client.ServiceClient):
-    """
-    Base client class to send CRUD Volume API requests to a Cinder endpoint
-    """
+    """Base client class to send CRUD Volume API requests"""
 
     create_resp = 200
 
@@ -73,23 +71,15 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_volume(self, size=None, **kwargs):
+    def create_volume(self, **kwargs):
+        """Creates a new Volume.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-blockstorage-v2.html#createVolume
         """
-        Creates a new Volume.
-        size: Size of volume in GB.
-        Following optional keyword arguments are accepted:
-        display_name: Optional Volume Name(only for V1).
-        name: Optional Volume Name(only for V2).
-        metadata: A dictionary of values to be used as metadata.
-        volume_type: Optional Name of volume_type for the volume
-        snapshot_id: When specified the volume is created from this snapshot
-        imageRef: When specified the volume is created from this image
-        """
-        if size is None:
-            size = self.default_volume_size
-        post_body = {'size': size}
-        post_body.update(kwargs)
-        post_body = json.dumps({'volume': post_body})
+        if 'size' not in kwargs:
+            kwargs['size'] = self.default_volume_size
+        post_body = json.dumps({'volume': kwargs})
         resp, body = self.post('volumes', post_body)
         body = json.loads(body)
         self.expected_success(self.create_resp, resp.status)
@@ -109,35 +99,26 @@
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def upload_volume(self, volume_id, image_name, disk_format):
+    def upload_volume(self, volume_id, **kwargs):
         """Uploads a volume in Glance."""
-        post_body = {
-            'image_name': image_name,
-            'disk_format': disk_format
-        }
-        post_body = json.dumps({'os-volume_upload_image': post_body})
+        post_body = json.dumps({'os-volume_upload_image': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def attach_volume(self, volume_id, instance_uuid, mountpoint):
+    def attach_volume(self, volume_id, **kwargs):
         """Attaches a volume to a given instance on a given mountpoint."""
-        post_body = {
-            'instance_uuid': instance_uuid,
-            'mountpoint': mountpoint,
-        }
-        post_body = json.dumps({'os-attach': post_body})
+        post_body = json.dumps({'os-attach': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def set_bootable_volume(self, volume_id, bootable):
+    def set_bootable_volume(self, volume_id, **kwargs):
         """set a bootable flag for a volume - true or false."""
-        post_body = {"bootable": bootable}
-        post_body = json.dumps({'os-set_bootable': post_body})
+        post_body = json.dumps({'os-set_bootable': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         self.expected_success(200, resp.status)
@@ -145,8 +126,7 @@
 
     def detach_volume(self, volume_id):
         """Detaches a volume from an instance."""
-        post_body = {}
-        post_body = json.dumps({'os-detach': post_body})
+        post_body = json.dumps({'os-detach': {}})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         self.expected_success(202, resp.status)
@@ -154,8 +134,7 @@
 
     def reserve_volume(self, volume_id):
         """Reserves a volume."""
-        post_body = {}
-        post_body = json.dumps({'os-reserve': post_body})
+        post_body = json.dumps({'os-reserve': {}})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         self.expected_success(202, resp.status)
@@ -163,8 +142,7 @@
 
     def unreserve_volume(self, volume_id):
         """Restore a reserved volume ."""
-        post_body = {}
-        post_body = json.dumps({'os-unreserve': post_body})
+        post_body = json.dumps({'os-unreserve': {}})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         self.expected_success(202, resp.status)
@@ -186,20 +164,17 @@
         """Returns the primary type of resource this client works with."""
         return 'volume'
 
-    def extend_volume(self, volume_id, extend_size):
+    def extend_volume(self, volume_id, **kwargs):
         """Extend a volume."""
-        post_body = {
-            'new_size': extend_size
-        }
-        post_body = json.dumps({'os-extend': post_body})
+        post_body = json.dumps({'os-extend': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def reset_volume_status(self, volume_id, status):
+    def reset_volume_status(self, volume_id, **kwargs):
         """Reset the Specified Volume's Status."""
-        post_body = json.dumps({'os-reset_status': {"status": status}})
+        post_body = json.dumps({'os-reset_status': kwargs})
         resp, body = self.post('volumes/%s/action' % volume_id, post_body)
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
@@ -220,14 +195,9 @@
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_volume_transfer(self, vol_id, display_name=None):
+    def create_volume_transfer(self, **kwargs):
         """Create a volume transfer."""
-        post_body = {
-            'volume_id': vol_id
-        }
-        if display_name:
-            post_body['name'] = display_name
-        post_body = json.dumps({'transfer': post_body})
+        post_body = json.dumps({'transfer': kwargs})
         resp, body = self.post('os-volume-transfer', post_body)
         body = json.loads(body)
         self.expected_success(202, resp.status)
@@ -241,7 +211,7 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def list_volume_transfers(self, params=None):
+    def list_volume_transfers(self, **params):
         """List all the volume transfers created."""
         url = 'os-volume-transfer'
         if params:
@@ -257,24 +227,18 @@
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def accept_volume_transfer(self, transfer_id, transfer_auth_key):
+    def accept_volume_transfer(self, transfer_id, **kwargs):
         """Accept a volume transfer."""
-        post_body = {
-            'auth_key': transfer_auth_key,
-        }
         url = 'os-volume-transfer/%s/accept' % transfer_id
-        post_body = json.dumps({'accept': post_body})
+        post_body = json.dumps({'accept': kwargs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def update_volume_readonly(self, volume_id, readonly):
+    def update_volume_readonly(self, volume_id, **kwargs):
         """Update the Specified Volume readonly."""
-        post_body = {
-            'readonly': readonly
-        }
-        post_body = json.dumps({'os-update_readonly_flag': post_body})
+        post_body = json.dumps({'os-update_readonly_flag': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         self.expected_success(202, resp.status)
@@ -329,16 +293,8 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def retype_volume(self, volume_id, volume_type, **kwargs):
+    def retype_volume(self, volume_id, **kwargs):
         """Updates volume with new volume type."""
-        post_body = {'new_type': volume_type}
-        post_body.update(kwargs)
-        post_body = json.dumps({'os-retype': post_body})
+        post_body = json.dumps({'os-retype': kwargs})
         resp, body = self.post('volumes/%s/action' % volume_id, post_body)
         self.expected_success(202, resp.status)
-
-
-class VolumesClient(BaseVolumesClient):
-    """
-    Client class to send CRUD Volume V1 API requests to a Cinder endpoint
-    """
diff --git a/tempest/services/volume/json/admin/volume_hosts_client.py b/tempest/services/volume/json/admin/volume_hosts_client.py
deleted file mode 100644
index ab9cd5a..0000000
--- a/tempest/services/volume/json/admin/volume_hosts_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.common import service_client
-
-
-class BaseVolumeHostsClient(service_client.ServiceClient):
-    """
-    Client class to send CRUD Volume Hosts API requests to a Cinder endpoint
-    """
-
-    def list_hosts(self, params=None):
-        """Lists all hosts."""
-
-        url = 'os-hosts'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return service_client.ResponseBody(resp, body)
-
-
-class VolumeHostsClient(BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume Host API V1 requests to a Cinder endpoint
-    """
diff --git a/tempest/services/volume/json/__init__.py b/tempest/services/volume/v1/__init__.py
similarity index 100%
copy from tempest/services/volume/json/__init__.py
copy to tempest/services/volume/v1/__init__.py
diff --git a/tempest/services/volume/json/__init__.py b/tempest/services/volume/v1/json/__init__.py
similarity index 100%
rename from tempest/services/volume/json/__init__.py
rename to tempest/services/volume/v1/json/__init__.py
diff --git a/tempest/services/volume/json/admin/__init__.py b/tempest/services/volume/v1/json/admin/__init__.py
similarity index 100%
rename from tempest/services/volume/json/admin/__init__.py
rename to tempest/services/volume/v1/json/admin/__init__.py
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v1/json/admin/hosts_client.py
similarity index 67%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v1/json/admin/hosts_client.py
index f0cc03f..3b52968 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v1/json/admin/hosts_client.py
@@ -1,4 +1,4 @@
-# Copyright 2014 OpenStack Foundation
+# Copyright 2013 OpenStack Foundation
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,12 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base.admin import base_hosts_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class HostsClient(base_hosts_client.BaseHostsClient):
+    """Client class to send CRUD Volume Host API V1 requests"""
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v1/json/admin/quotas_client.py
similarity index 64%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v1/json/admin/quotas_client.py
index f0cc03f..27fc301 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v1/json/admin/quotas_client.py
@@ -1,5 +1,4 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
+# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -13,12 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base.admin import base_quotas_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class QuotasClient(base_quotas_client.BaseQuotasClient):
+    """Client class to send CRUD Volume Type API V1 requests"""
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v1/json/admin/services_client.py
similarity index 67%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v1/json/admin/services_client.py
index f0cc03f..2bffd55 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v1/json/admin/services_client.py
@@ -1,4 +1,4 @@
-# Copyright 2014 OpenStack Foundation
+# Copyright 2014 NEC Corporation
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,12 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base.admin import base_services_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class ServicesClient(base_services_client.BaseServicesClient):
+    """Volume V1 volume services client"""
diff --git a/tempest/services/volume/v2/json/admin/volume_types_client.py b/tempest/services/volume/v1/json/admin/types_client.py
similarity index 71%
copy from tempest/services/volume/v2/json/admin/volume_types_client.py
copy to tempest/services/volume/v1/json/admin/types_client.py
index 1b9ff51..0e84296 100644
--- a/tempest/services/volume/v2/json/admin/volume_types_client.py
+++ b/tempest/services/volume/v1/json/admin/types_client.py
@@ -13,12 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_types_client
+from tempest.services.volume.base.admin import base_types_client
 
 
-class VolumeTypesV2Client(volume_types_client.BaseVolumeTypesClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class TypesClient(base_types_client.BaseTypesClient):
+    """Volume V1 Volume Types client"""
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v1/json/availability_zone_client.py
similarity index 67%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v1/json/availability_zone_client.py
index f0cc03f..3a27027 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v1/json/availability_zone_client.py
@@ -1,4 +1,4 @@
-# Copyright 2014 OpenStack Foundation
+# Copyright 2014 NEC Corporation.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,12 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base import base_availability_zone_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class AvailabilityZoneClient(
+        base_availability_zone_client.BaseAvailabilityZoneClient):
+    """Volume V1 availability zone client."""
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v1/json/backups_client.py
similarity index 71%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v1/json/backups_client.py
index f0cc03f..ac6db6a 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v1/json/backups_client.py
@@ -13,12 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base import base_backups_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class BackupsClient(base_backups_client.BaseBackupsClient):
+    """Volume V1 Backups client"""
diff --git a/tempest/services/volume/v2/json/admin/volume_types_client.py b/tempest/services/volume/v1/json/extensions_client.py
similarity index 71%
copy from tempest/services/volume/v2/json/admin/volume_types_client.py
copy to tempest/services/volume/v1/json/extensions_client.py
index 1b9ff51..f99d0f5 100644
--- a/tempest/services/volume/v2/json/admin/volume_types_client.py
+++ b/tempest/services/volume/v1/json/extensions_client.py
@@ -13,12 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_types_client
+from tempest.services.volume.base import base_extensions_client
 
 
-class VolumeTypesV2Client(volume_types_client.BaseVolumeTypesClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class ExtensionsClient(base_extensions_client.BaseExtensionsClient):
+    """Volume V1 extensions client."""
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v1/json/qos_client.py
similarity index 67%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v1/json/qos_client.py
index f0cc03f..b2b2195 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v1/json/qos_client.py
@@ -1,4 +1,3 @@
-# Copyright 2014 OpenStack Foundation
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -13,12 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base import base_qos_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class QosSpecsClient(base_qos_client.BaseQosSpecsClient):
+    """Volume V1 QoS client."""
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v1/json/snapshots_client.py
similarity index 64%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v1/json/snapshots_client.py
index f0cc03f..b039c2b 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v1/json/snapshots_client.py
@@ -1,6 +1,3 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
@@ -13,12 +10,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base import base_snapshots_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class SnapshotsClient(base_snapshots_client.BaseSnapshotsClient):
+    """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/services/volume/v2/json/admin/volume_types_client.py b/tempest/services/volume/v1/json/volumes_client.py
similarity index 71%
copy from tempest/services/volume/v2/json/admin/volume_types_client.py
copy to tempest/services/volume/v1/json/volumes_client.py
index 1b9ff51..7782043 100644
--- a/tempest/services/volume/v2/json/admin/volume_types_client.py
+++ b/tempest/services/volume/v1/json/volumes_client.py
@@ -13,12 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_types_client
+from tempest.services.volume.base import base_volumes_client
 
 
-class VolumeTypesV2Client(volume_types_client.BaseVolumeTypesClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
+class VolumesClient(base_volumes_client.BaseVolumesClient):
+    """Client class to send CRUD Volume V1 API requests"""
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v2/json/admin/hosts_client.py
similarity index 74%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v2/json/admin/hosts_client.py
index f0cc03f..e092c6a 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v2/json/admin/hosts_client.py
@@ -13,12 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base.admin import base_hosts_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
+class HostsClient(base_hosts_client.BaseHostsClient):
+    """Client class to send CRUD Volume V2 API requests"""
     api_version = "v2"
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v2/json/admin/quotas_client.py
similarity index 74%
copy from tempest/services/volume/v2/json/admin/volume_hosts_client.py
copy to tempest/services/volume/v2/json/admin/quotas_client.py
index f0cc03f..11e0e22 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v2/json/admin/quotas_client.py
@@ -13,12 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base.admin import base_quotas_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
+class QuotasClient(base_quotas_client.BaseQuotasClient):
+    """Client class to send CRUD Volume V2 API requests"""
     api_version = "v2"
diff --git a/tempest/services/volume/v2/json/admin/volume_hosts_client.py b/tempest/services/volume/v2/json/admin/services_client.py
similarity index 74%
rename from tempest/services/volume/v2/json/admin/volume_hosts_client.py
rename to tempest/services/volume/v2/json/admin/services_client.py
index f0cc03f..db19ba9 100644
--- a/tempest/services/volume/v2/json/admin/volume_hosts_client.py
+++ b/tempest/services/volume/v2/json/admin/services_client.py
@@ -13,12 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_hosts_client
+from tempest.services.volume.base.admin import base_services_client
 
 
-class VolumeHostsV2Client(volume_hosts_client.BaseVolumeHostsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
+class ServicesClient(base_services_client.BaseServicesClient):
+    """Client class to send CRUD Volume V2 API requests"""
     api_version = "v2"
diff --git a/tempest/services/volume/v2/json/admin/volume_types_client.py b/tempest/services/volume/v2/json/admin/types_client.py
similarity index 74%
rename from tempest/services/volume/v2/json/admin/volume_types_client.py
rename to tempest/services/volume/v2/json/admin/types_client.py
index 1b9ff51..ecf5131 100644
--- a/tempest/services/volume/v2/json/admin/volume_types_client.py
+++ b/tempest/services/volume/v2/json/admin/types_client.py
@@ -13,12 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
-from tempest.services.volume.json.admin import volume_types_client
+from tempest.services.volume.base.admin import base_types_client
 
 
-class VolumeTypesV2Client(volume_types_client.BaseVolumeTypesClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
+class TypesClient(base_types_client.BaseTypesClient):
+    """Client class to send CRUD Volume V2 API requests"""
     api_version = "v2"
diff --git a/tempest/services/volume/v2/json/admin/volume_quotas_client.py b/tempest/services/volume/v2/json/admin/volume_quotas_client.py
deleted file mode 100644
index 635b6e1..0000000
--- a/tempest/services/volume/v2/json/admin/volume_quotas_client.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.services.volume.json.admin import volume_quotas_client
-
-
-class VolumeQuotasV2Client(volume_quotas_client.BaseVolumeQuotasClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
diff --git a/tempest/services/volume/v2/json/admin/volume_services_client.py b/tempest/services/volume/v2/json/admin/volume_services_client.py
deleted file mode 100644
index d0efc38..0000000
--- a/tempest/services/volume/v2/json/admin/volume_services_client.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.services.volume.json.admin import volume_services_client as vs_cli
-
-
-class VolumesServicesV2Client(vs_cli.BaseVolumesServicesClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
-    api_version = "v2"
diff --git a/tempest/services/volume/v2/json/availability_zone_client.py b/tempest/services/volume/v2/json/availability_zone_client.py
index 2e1ab20..905ebdc 100644
--- a/tempest/services/volume/v2/json/availability_zone_client.py
+++ b/tempest/services/volume/v2/json/availability_zone_client.py
@@ -13,9 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.services.volume.json import availability_zone_client
+from tempest.services.volume.base import base_availability_zone_client
 
 
-class VolumeV2AvailabilityZoneClient(
-        availability_zone_client.BaseVolumeAvailabilityZoneClient):
+class AvailabilityZoneClient(
+        base_availability_zone_client.BaseAvailabilityZoneClient):
     api_version = "v2"
diff --git a/tempest/services/volume/v2/json/backups_client.py b/tempest/services/volume/v2/json/backups_client.py
index 1ce11ce..78bab82 100644
--- a/tempest/services/volume/v2/json/backups_client.py
+++ b/tempest/services/volume/v2/json/backups_client.py
@@ -13,11 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.services.volume.json import backups_client
+from tempest.services.volume.base import base_backups_client
 
 
-class BackupsClientV2(backups_client.BaseBackupsClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
+class BackupsClient(base_backups_client.BaseBackupsClient):
+    """Client class to send CRUD Volume V2 API requests"""
     api_version = "v2"
diff --git a/tempest/services/volume/v2/json/extensions_client.py b/tempest/services/volume/v2/json/extensions_client.py
index 3e32c0c..245906f 100644
--- a/tempest/services/volume/v2/json/extensions_client.py
+++ b/tempest/services/volume/v2/json/extensions_client.py
@@ -13,8 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.services.volume.json import extensions_client
+from tempest.services.volume.base import base_extensions_client
 
 
-class ExtensionsV2Client(extensions_client.BaseExtensionsClient):
+class ExtensionsClient(base_extensions_client.BaseExtensionsClient):
     api_version = "v2"
diff --git a/tempest/services/volume/v2/json/qos_client.py b/tempest/services/volume/v2/json/qos_client.py
index 42bd1c9..3c0f74f 100644
--- a/tempest/services/volume/v2/json/qos_client.py
+++ b/tempest/services/volume/v2/json/qos_client.py
@@ -12,8 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.services.volume.json import qos_client
+from tempest.services.volume.base import base_qos_client
 
 
-class QosSpecsV2Client(qos_client.BaseQosSpecsClient):
+class QosSpecsClient(base_qos_client.BaseQosSpecsClient):
     api_version = "v2"
diff --git a/tempest/services/volume/v2/json/snapshots_client.py b/tempest/services/volume/v2/json/snapshots_client.py
index a94f9cd..a2d415f 100644
--- a/tempest/services/volume/v2/json/snapshots_client.py
+++ b/tempest/services/volume/v2/json/snapshots_client.py
@@ -10,10 +10,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.services.volume.json import snapshots_client
+from tempest.services.volume.base import base_snapshots_client
 
 
-class SnapshotsV2Client(snapshots_client.BaseSnapshotsClient):
+class SnapshotsClient(base_snapshots_client.BaseSnapshotsClient):
     """Client class to send CRUD Volume V2 API requests."""
     api_version = "v2"
     create_resp = 202
diff --git a/tempest/services/volume/v2/json/volumes_client.py b/tempest/services/volume/v2/json/volumes_client.py
index a6d081c..b7d9dfb 100644
--- a/tempest/services/volume/v2/json/volumes_client.py
+++ b/tempest/services/volume/v2/json/volumes_client.py
@@ -13,12 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.services.volume.json import volumes_client
+from tempest.services.volume.base import base_volumes_client
 
 
-class VolumesV2Client(volumes_client.BaseVolumesClient):
-    """
-    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
-    """
+class VolumesClient(base_volumes_client.BaseVolumesClient):
+    """Client class to send CRUD Volume V2 API requests"""
     api_version = "v2"
     create_resp = 202
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index 4f1f56c..33842fd 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -33,17 +33,17 @@
 
 The stress test framework can automatically discover test inside the tempest
 test suite. All test flag with the `@stresstest` decorator will be executed.
-In order to use this discovery you have to be in the tempest root directory
-and execute the following:
+In order to use this discovery you have to install tempest CLI, be in the
+tempest root directory and execute the following:
 
-	run-tempest-stress -a -d 30
+	tempest run-stress -a -d 30
 
 Running the sample test
 -----------------------
 
 To test installation, do the following:
 
-	run-tempest-stress -t tempest/stress/etc/server-create-destroy-test.json -d 30
+	tempest run-stress -t tempest/stress/etc/server-create-destroy-test.json -d 30
 
 This sample test tries to create a few VMs and kill a few VMs.
 
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index d912b25..4f8c6bd 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -91,7 +91,7 @@
         self.logger.info("deleted %s" % self.server_id)
 
     def _create_sec_group(self):
-        sec_grp_cli = self.manager.security_groups_client
+        sec_grp_cli = self.manager.compute_security_groups_client
         s_name = data_utils.rand_name('sec_grp')
         s_description = data_utils.rand_name('desc')
         self.sec_grp = sec_grp_cli.create_security_group(
@@ -103,16 +103,16 @@
                     from_port=-1, to_port=-1)
 
     def _destroy_sec_grp(self):
-        sec_grp_cli = self.manager.security_groups_client
+        sec_grp_cli = self.manager.compute_security_groups_client
         sec_grp_cli.delete_security_group(self.sec_grp['id'])
 
     def _create_floating_ip(self):
-        floating_cli = self.manager.floating_ips_client
+        floating_cli = self.manager.compute_floating_ips_client
         self.floating = (floating_cli.create_floating_ip(self.floating_pool)
                          ['floating_ip'])
 
     def _destroy_floating_ip(self):
-        cli = self.manager.floating_ips_client
+        cli = self.manager.compute_floating_ips_client
         cli.delete_floating_ip(self.floating['id'])
         cli.wait_for_resource_deletion(self.floating['id'])
         self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
@@ -146,7 +146,7 @@
             self._create_vm()
 
     def wait_disassociate(self):
-        cli = self.manager.floating_ips_client
+        cli = self.manager.compute_floating_ips_client
 
         def func():
             floating = (cli.show_floating_ip(self.floating['id'])
@@ -158,7 +158,7 @@
             raise RuntimeError("IP disassociate timeout!")
 
     def run_core(self):
-        cli = self.manager.floating_ips_client
+        cli = self.manager.compute_floating_ips_client
         cli.associate_floating_ip_to_server(self.floating['ip'],
                                             self.server_id)
         for method in self.verify:
diff --git a/tempest/stress/actions/unit_test.py b/tempest/stress/actions/unit_test.py
index c376693..3b27885 100644
--- a/tempest/stress/actions/unit_test.py
+++ b/tempest/stress/actions/unit_test.py
@@ -35,6 +35,7 @@
 
 class UnitTest(stressaction.StressAction):
     """This is a special action for running existing unittests as stress test.
+
        You need to pass ``test_method`` and ``class_setup_per``
        using ``kwargs`` in the JSON descriptor;
        ``test_method`` should be the fully qualified name of a unittest,
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
index 95841a9..8bbbfc4 100644
--- a/tempest/stress/actions/volume_attach_verify.py
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -54,7 +54,7 @@
         self.logger.info("deleted server: %s" % self.server_id)
 
     def _create_sec_group(self):
-        sec_grp_cli = self.manager.security_groups_client
+        sec_grp_cli = self.manager.compute_security_groups_client
         s_name = data_utils.rand_name('sec_grp')
         s_description = data_utils.rand_name('desc')
         self.sec_grp = sec_grp_cli.create_security_group(
@@ -66,16 +66,16 @@
                     from_port=-1, to_port=-1)
 
     def _destroy_sec_grp(self):
-        sec_grp_cli = self.manager.security_groups_client
+        sec_grp_cli = self.manager.compute_security_groups_client
         sec_grp_cli.delete_security_group(self.sec_grp['id'])
 
     def _create_floating_ip(self):
-        floating_cli = self.manager.floating_ips_client
+        floating_cli = self.manager.compute_floating_ips_client
         self.floating = (floating_cli.create_floating_ip(self.floating_pool)
                          ['floating_ip'])
 
     def _destroy_floating_ip(self):
-        cli = self.manager.floating_ips_client
+        cli = self.manager.compute_floating_ips_client
         cli.delete_floating_ip(self.floating['id'])
         cli.wait_for_resource_deletion(self.floating['id'])
         self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
@@ -98,7 +98,7 @@
         self.logger.info("deleted volume: %s" % self.volume['id'])
 
     def _wait_disassociate(self):
-        cli = self.manager.floating_ips_client
+        cli = self.manager.compute_floating_ips_client
 
         def func():
             floating = (cli.show_floating_ip(self.floating['id'])
@@ -111,7 +111,7 @@
 
     def new_server_ops(self):
         self._create_vm()
-        cli = self.manager.floating_ips_client
+        cli = self.manager.compute_floating_ips_client
         cli.associate_floating_ip_to_server(self.floating['ip'],
                                             self.server_id)
         if self.ssh_test_before_attach and self.enable_ssh_verify:
@@ -121,6 +121,7 @@
 
     def setUp(self, **kwargs):
         """Note able configuration combinations:
+
             Closest options to the test_stamp_pattern:
                 new_server = True
                 new_volume = True
@@ -160,7 +161,7 @@
         self._create_sec_group()
         self._create_keypair()
         private_key = self.key['private_key']
-        username = CONF.compute.image_ssh_user
+        username = CONF.validation.image_ssh_user
         self.remote_client = remote_client.RemoteClient(self.floating['ip'],
                                                         username,
                                                         pkey=private_key)
@@ -218,7 +219,7 @@
             self._destroy_vm()
 
     def tearDown(self):
-        cli = self.manager.floating_ips_client
+        cli = self.manager.compute_floating_ips_client
         cli.disassociate_floating_ip_from_server(self.floating['ip'],
                                                  self.server_id)
         self._wait_disassociate()
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
index 1350d95..1c1fb46 100644
--- a/tempest/stress/cleanup.py
+++ b/tempest/stress/cleanup.py
@@ -16,14 +16,14 @@
 
 from oslo_log import log as logging
 
-from tempest import clients
+from tempest.common import credentials_factory as credentials
 from tempest.common import waiters
 
 LOG = logging.getLogger(__name__)
 
 
 def cleanup():
-    admin_manager = clients.AdminManager()
+    admin_manager = credentials.AdminManager()
 
     body = admin_manager.servers_client.list_servers(all_tenants=True)
     LOG.info("Cleanup::remove %s servers" % len(body['servers']))
@@ -48,7 +48,7 @@
         except Exception:
             pass
 
-    secgrp_client = admin_manager.security_groups_client
+    secgrp_client = admin_manager.compute_security_groups_client
     secgrp = (secgrp_client.list_security_groups(all_tenants=True)
               ['security_groups'])
     secgrp_del = [grp for grp in secgrp if grp['name'] != 'default']
@@ -59,32 +59,32 @@
         except Exception:
             pass
 
-    floating_ips = (admin_manager.floating_ips_client.list_floating_ips()
+    admin_floating_ips_client = admin_manager.compute_floating_ips_client
+    floating_ips = (admin_floating_ips_client.list_floating_ips()
                     ['floating_ips'])
     LOG.info("Cleanup::remove %s floating ips" % len(floating_ips))
     for f in floating_ips:
         try:
-            admin_manager.floating_ips_client.delete_floating_ip(f['id'])
+            admin_floating_ips_client.delete_floating_ip(f['id'])
         except Exception:
             pass
 
-    users = admin_manager.identity_client.get_users()['users']
+    users = admin_manager.users_client.list_users()['users']
     LOG.info("Cleanup::remove %s users" % len(users))
     for user in users:
         if user['name'].startswith("stress_user"):
-            admin_manager.identity_client.delete_user(user['id'])
-
-    tenants = admin_manager.identity_client.list_tenants()['tenants']
+            admin_manager.users_client.delete_user(user['id'])
+    tenants = admin_manager.tenants_client.list_tenants()['tenants']
     LOG.info("Cleanup::remove %s tenants" % len(tenants))
     for tenant in tenants:
         if tenant['name'].startswith("stress_tenant"):
-            admin_manager.identity_client.delete_tenant(tenant['id'])
+            admin_manager.tenants_client.delete_tenant(tenant['id'])
 
     # We have to delete snapshots first or
     # volume deletion may block
 
     _, snaps = admin_manager.snapshots_client.list_snapshots(
-        params={"all_tenants": True})['snapshots']
+        all_tenants=True)['snapshots']
     LOG.info("Cleanup::remove %s snapshots" % len(snaps))
     for v in snaps:
         try:
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 7634d2c..02cb901 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -26,6 +26,7 @@
 
 from tempest import clients
 from tempest.common import cred_client
+from tempest.common import credentials_factory as credentials
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
@@ -48,9 +49,9 @@
 
 
 def _get_compute_nodes(controller, ssh_user, ssh_key=None):
-    """
-    Returns a list of active compute nodes. List is generated by running
-    nova-manage on the controller.
+    """Returns a list of active compute nodes.
+
+    List is generated by running nova-manage on the controller.
     """
     nodes = []
     cmd = "nova-manage service list | grep ^nova-compute"
@@ -68,9 +69,7 @@
 
 def _has_error_in_logs(logfiles, nodes, ssh_user, ssh_key=None,
                        stop_on_error=False):
-    """
-    Detect errors in the nova log files on the controller and compute nodes.
-    """
+    """Detect errors in nova log files on the controller and compute nodes."""
     grep = 'egrep "ERROR|TRACE" %s' % logfiles
     ret = False
     for node in nodes:
@@ -84,9 +83,7 @@
 
 
 def sigchld_handler(signalnum, frame):
-    """
-    Signal handler (only active if stop_on_error is True).
-    """
+    """Signal handler (only active if stop_on_error is True)."""
     for process in processes:
         if (not process['process'].is_alive() and
                 process['process'].exitcode != 0):
@@ -96,9 +93,7 @@
 
 
 def terminate_all_processes(check_interval=20):
-    """
-    Goes through the process list and terminates all child processes.
-    """
+    """Goes through the process list and terminates all child processes."""
     LOG.info("Stopping all processes.")
     for process in processes:
         if process['process'].is_alive():
@@ -111,7 +106,7 @@
         if process['process'].is_alive():
             try:
                 pid = process['process'].pid
-                LOG.warn("Process %d hangs. Send SIGKILL." % pid)
+                LOG.warning("Process %d hangs. Send SIGKILL." % pid)
                 os.kill(pid, signal.SIGKILL)
             except Exception:
                 pass
@@ -119,10 +114,8 @@
 
 
 def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
-    """
-    Workload driver. Executes an action function against a nova-cluster.
-    """
-    admin_manager = clients.AdminManager()
+    """Workload driver. Executes an action function against a nova-cluster."""
+    admin_manager = credentials.AdminManager()
 
     ssh_user = CONF.stress.target_ssh_user
     ssh_key = CONF.stress.target_private_key_path
@@ -145,7 +138,7 @@
         if test.get('use_admin', False):
             manager = admin_manager
         else:
-            manager = clients.Manager()
+            manager = credentials.ConfiguredUserManager()
         for p_number in moves.xrange(test.get('threads', default_thread_num)):
             if test.get('use_isolated_tenants', False):
                 username = data_utils.rand_name("stress_user")
@@ -153,14 +146,23 @@
                 password = "pass"
                 if CONF.identity.auth_version == 'v2':
                     identity_client = admin_manager.identity_client
+                    projects_client = admin_manager.tenants_client
+                    roles_client = admin_manager.roles_client
+                    users_client = admin_manager.users_client
                 else:
                     identity_client = admin_manager.identity_v3_client
+                    projects_client = None
+                    roles_client = None
+                    users_client = None
+                domain = (identity_client.auth_provider.credentials.
+                          get('project_domain_name', 'Default'))
                 credentials_client = cred_client.get_creds_client(
-                    identity_client)
+                    identity_client, projects_client, roles_client,
+                    users_client, project_domain_name=domain)
                 project = credentials_client.create_project(
                     name=tenant_name, description=tenant_name)
                 user = credentials_client.create_user(username, password,
-                                                      project['id'], "email")
+                                                      project, "email")
                 # Add roles specified in config file
                 for conf_role in CONF.auth.tempest_roles:
                     credentials_client.assign_user_role(user, project,
@@ -242,14 +244,13 @@
             had_errors = True
         sum_runs += process['statistic']['runs']
         sum_fails += process['statistic']['fails']
-        LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
-                 (process['p_number'],
-                  process['action'],
-                  process['statistic']['runs'],
-                     process['statistic']['fails']))
-    LOG.info("Summary:")
-    LOG.info("Run %d actions (%d failed)" %
-             (sum_runs, sum_fails))
+        print ("Process %d (%s): Run %d actions (%d failed)" % (
+               process['p_number'],
+               process['action'],
+               process['statistic']['runs'],
+               process['statistic']['fails']))
+    print ("Summary:")
+    print ("Run %d actions (%d failed)" % (sum_runs, sum_fails))
 
     if not had_errors and CONF.stress.full_clean_stack:
         LOG.info("cleaning up")
diff --git a/tempest/stress/etc/sample-unit-test.json b/tempest/stress/etc/sample-unit-test.json
index b388bfe..54433d5 100644
--- a/tempest/stress/etc/sample-unit-test.json
+++ b/tempest/stress/etc/sample-unit-test.json
@@ -1,7 +1,7 @@
 [{"action": "tempest.stress.actions.unit_test.UnitTest",
   "threads": 8,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {"test_method": "tempest.cli.simple_read_only.test_glance.SimpleReadOnlyGlanceClientTest.test_glance_fake_action",
              "class_setup_per": "process"}
   }
diff --git a/tempest/stress/etc/server-create-destroy-test.json b/tempest/stress/etc/server-create-destroy-test.json
index 17d5e1a..bbb5352 100644
--- a/tempest/stress/etc/server-create-destroy-test.json
+++ b/tempest/stress/etc/server-create-destroy-test.json
@@ -1,7 +1,7 @@
 [{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
   "threads": 8,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {}
   }
 ]
diff --git a/tempest/stress/etc/ssh_floating.json b/tempest/stress/etc/ssh_floating.json
index e03fd4f..c502e96 100644
--- a/tempest/stress/etc/ssh_floating.json
+++ b/tempest/stress/etc/ssh_floating.json
@@ -1,7 +1,7 @@
 [{"action": "tempest.stress.actions.ssh_floating.FloatingStress",
   "threads": 8,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {"vm_extra_args": {},
              "new_vm": true,
              "new_sec_group": true,
diff --git a/tempest/stress/etc/stress-tox-job.json b/tempest/stress/etc/stress-tox-job.json
index 9cee316..bfa448d 100644
--- a/tempest/stress/etc/stress-tox-job.json
+++ b/tempest/stress/etc/stress-tox-job.json
@@ -1,25 +1,25 @@
 [{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
   "threads": 8,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {}
   },
   {"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
   "threads": 4,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {}
   },
   {"action": "tempest.stress.actions.volume_attach_delete.VolumeAttachDeleteTest",
   "threads": 2,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {}
   },
   {"action": "tempest.stress.actions.unit_test.UnitTest",
   "threads": 4,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "required_services": ["neutron"],
   "kwargs": {"test_method": "tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_server_connectivity_stop_start",
              "class_setup_per": "process"}
diff --git a/tempest/stress/etc/volume-attach-delete-test.json b/tempest/stress/etc/volume-attach-delete-test.json
index 4553ff8..d468967 100644
--- a/tempest/stress/etc/volume-attach-delete-test.json
+++ b/tempest/stress/etc/volume-attach-delete-test.json
@@ -1,7 +1,7 @@
 [{"action": "tempest.stress.actions.volume_attach_delete.VolumeAttachDeleteTest",
   "threads": 4,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {}
   }
 ]
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
index 731f5ed..d8c96fd 100644
--- a/tempest/stress/etc/volume-attach-verify.json
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -1,7 +1,7 @@
 [{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
   "threads": 1,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {"vm_extra_args": {},
              "new_volume": true,
              "new_server": false,
diff --git a/tempest/stress/etc/volume-create-delete-test.json b/tempest/stress/etc/volume-create-delete-test.json
index e8a58f7..a60cde6 100644
--- a/tempest/stress/etc/volume-create-delete-test.json
+++ b/tempest/stress/etc/volume-create-delete-test.json
@@ -1,7 +1,7 @@
 [{"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
   "threads": 4,
-  "use_admin": false,
-  "use_isolated_tenants": false,
+  "use_admin": true,
+  "use_isolated_tenants": true,
   "kwargs": {}
   }
 ]
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index a3d0d17..c8bd652 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -40,32 +40,35 @@
 
     @property
     def action(self):
-        """This methods returns the action. Overload this if you
-        create a stress test wrapper.
+        """This methods returns the action.
+
+        Overload this if you create a stress test wrapper.
         """
         return self.__class__.__name__
 
     def setUp(self, **kwargs):
-        """This method is called before the run method
-        to help the test initialize any structures.
-        kwargs contains arguments passed in from the
-        configuration json file.
+        """Initialize test structures/resources
+
+        This method is called before "run" method to help the test
+        initialize any structures. kwargs contains arguments passed
+        in from the configuration json file.
 
         setUp doesn't count against the time duration.
         """
         self.logger.debug("setUp")
 
     def tearDown(self):
-        """This method is called to do any cleanup
-        after the test is complete.
+        """Cleanup test structures/resources
+
+        This method is called to do any cleanup after the test is complete.
         """
         self.logger.debug("tearDown")
 
     def execute(self, shared_statistic):
-        """This is the main execution entry point called
-        by the driver.   We register a signal handler to
-        allow us to tearDown gracefully, and then exit.
-        We also keep track of how many runs we do.
+        """This is the main execution entry point called by the driver.
+
+        We register a signal handler to allow us to tearDown gracefully,
+        and then exit. We also keep track of how many runs we do.
         """
         signal.signal(signal.SIGHUP, self._shutdown_handler)
         signal.signal(signal.SIGTERM, self._shutdown_handler)
diff --git a/tempest/test.py b/tempest/test.py
index b8ba5f4..9c04ea1 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -19,7 +19,6 @@
 import re
 import sys
 import time
-import urllib
 import uuid
 
 import fixtures
@@ -27,12 +26,14 @@
 from oslo_serialization import jsonutils as json
 from oslo_utils import importutils
 import six
+from six.moves import urllib
+from tempest_lib import decorators
 import testscenarios
 import testtools
 
 from tempest import clients
 from tempest.common import cred_client
-from tempest.common import credentials
+from tempest.common import credentials_factory as credentials
 from tempest.common import fixed_network
 import tempest.common.generator.valid_generator as valid
 import tempest.common.validation_resources as vresources
@@ -43,6 +44,8 @@
 
 CONF = config.CONF
 
+idempotent_id = decorators.idempotent_id
+
 
 def attr(**kwargs):
     """A decorator which applies the testtools attr decorator
@@ -62,23 +65,6 @@
     return decorator
 
 
-def idempotent_id(id):
-    """Stub for metadata decorator"""
-    if not isinstance(id, six.string_types):
-        raise TypeError('Test idempotent_id must be string not %s'
-                        '' % type(id).__name__)
-    uuid.UUID(id)
-
-    def decorator(f):
-        f = testtools.testcase.attr('id-%s' % id)(f)
-        if f.__doc__:
-            f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
-        else:
-            f.__doc__ = 'Test idempotent id: %s' % id
-        return f
-    return decorator
-
-
 def get_service_list():
     service_list = {
         'compute': CONF.service_available.nova,
@@ -211,6 +197,7 @@
 class BaseTestCase(testtools.testcase.WithAttributes,
                    testtools.TestCase):
     """The test base class defines Tempest framework for class level fixtures.
+
     `setUpClass` and `tearDownClass` are defined here and cannot be overwritten
     by subclasses (enforced via hacking rule T105).
 
@@ -239,6 +226,7 @@
     # Resources required to validate a server using ssh
     validation_resources = {}
     network_resources = {}
+    services_microversion = {}
 
     # NOTE(sdague): log_format is defined inline here instead of using the oslo
     # default because going through the config path recouples config to the
@@ -305,7 +293,7 @@
                     LOG.exception("teardown of %s failed: %s" % (name, te))
                 if not etype:
                     etype, value, trace = sys_exec_info
-        # If exceptions were raised during teardown, an not before, re-raise
+        # If exceptions were raised during teardown, and not before, re-raise
         # the first one
         if re_raise and etype is not None:
             try:
@@ -315,8 +303,10 @@
 
     @classmethod
     def skip_checks(cls):
-        """Class level skip checks. Subclasses verify in here all
-        conditions that might prevent the execution of the entire test class.
+        """Class level skip checks.
+
+        Subclasses verify in here all conditions that might prevent the
+        execution of the entire test class.
         Checks implemented here may not make use API calls, and should rely on
         configuration alone.
         In general skip checks that require an API call are discouraged.
@@ -343,6 +333,7 @@
     @classmethod
     def setup_credentials(cls):
         """Allocate credentials and the client managers from them.
+
         A test class that requires network resources must override
         setup_credentials and defined the required resources before super
         is invoked.
@@ -380,18 +371,18 @@
 
     @classmethod
     def resource_setup(cls):
-        """Class level resource setup for test cases.
-        """
+        """Class level resource setup for test cases."""
         if hasattr(cls, "os"):
             cls.validation_resources = vresources.create_validation_resources(
                 cls.os, cls.validation_resources)
         else:
-            LOG.warn("Client manager not found, validation resources not"
-                     " created")
+            LOG.warning("Client manager not found, validation resources not"
+                        " created")
 
     @classmethod
     def resource_cleanup(cls):
         """Class level resource cleanup for test cases.
+
         Resource cleanup must be able to handle the case of partially setup
         resources, in case a failure during `resource_setup` should happen.
         """
@@ -401,8 +392,8 @@
                                                       cls.validation_resources)
                 cls.validation_resources = {}
             else:
-                LOG.warn("Client manager not found, validation resources not"
-                         " deleted")
+                LOG.warning("Client manager not found, validation resources "
+                            "not deleted")
 
     def setUp(self):
         super(BaseTestCase, self).setUp()
@@ -446,15 +437,24 @@
         """
         if CONF.identity.auth_version == 'v2':
             client = self.os_admin.identity_client
+            project_client = self.os_admin.tenants_client
+            roles_client = self.os_admin.roles_client
+            users_client = self.os_admin.users_client
         else:
             client = self.os_admin.identity_v3_client
+            project_client = None
+            roles_client = None
+            users_client = None
 
         try:
             domain = client.auth_provider.credentials.project_domain_name
         except AttributeError:
             domain = 'Default'
 
-        return cred_client.get_creds_client(client, domain)
+        return cred_client.get_creds_client(client, project_client,
+                                            roles_client,
+                                            users_client,
+                                            project_domain_name=domain)
 
     @classmethod
     def get_identity_version(cls):
@@ -491,7 +491,7 @@
         :param credential_type: string - primary, alt or admin
         :param roles: list of roles
 
-        :returns the created client manager
+        :returns: the created client manager
         :raises skipException: if the requested credentials are not available
         """
         if all([roles, credential_type]):
@@ -519,13 +519,12 @@
             else:
                 raise exceptions.InvalidCredentials(
                     "Invalid credentials type %s" % credential_type)
-        return clients.Manager(credentials=creds, service=cls._service)
+        return clients.Manager(credentials=creds, service=cls._service,
+                               api_microversions=cls.services_microversion)
 
     @classmethod
     def clear_credentials(cls):
-        """
-        Clears creds if set
-        """
+        """Clears creds if set"""
         if hasattr(cls, '_creds_provider'):
             cls._creds_provider.clear_creds()
 
@@ -534,6 +533,7 @@
                                  security_group=None,
                                  security_group_rules=None):
         """Specify which ssh server validation resources should be created.
+
         Each of the argument must be set to either None, True or False, with
         None - use default from config (security groups and security group
                rules get created when set to None)
@@ -600,17 +600,18 @@
         networks_client = cls.get_client_manager().compute_networks_client
         cred_provider = cls._get_credentials_provider()
         # In case of nova network, isolated tenants are not able to list the
-        # network configured in fixed_network_name, even if the can use it
+        # network configured in fixed_network_name, even if they can use it
         # for their servers, so using an admin network client to validate
         # the network name
         if (not CONF.service_available.neutron and
                 credentials.is_admin_available(
                     identity_version=cls.get_identity_version())):
             admin_creds = cred_provider.get_admin_creds()
-            admin_manager = clients.Manager(admin_creds)
+            admin_manager = clients.Manager(
+                admin_creds, api_microversions=cls.services_microversion)
             networks_client = admin_manager.compute_networks_client
-        return fixed_network.get_tenant_network(cred_provider,
-                                                networks_client)
+        return fixed_network.get_tenant_network(
+            cred_provider, networks_client, CONF.compute.fixed_network_name)
 
     def assertEmpty(self, list, msg=None):
         self.assertTrue(len(list) == 0, msg)
@@ -631,10 +632,11 @@
 
     @staticmethod
     def load_tests(*args):
-        """
-        Wrapper for testscenarios to set the mandatory scenarios variable
-        only in case a real test loader is in place. Will be automatically
-        called in case the variable "load_tests" is set.
+        """Wrapper for testscenarios
+
+        To set the mandatory scenarios variable only in case a real test
+        loader is in place. Will be automatically called in case the variable
+        "load_tests" is set.
         """
         if getattr(args[0], 'suiteClass', None) is not None:
             loader, standard_tests, pattern = args
@@ -649,8 +651,7 @@
 
     @staticmethod
     def generate_scenario(description):
-        """
-        Generates the test scenario list for a given description.
+        """Generates the test scenario list for a given description.
 
         :param description: A file or dictionary with the following entries:
             name (required) name for the api
@@ -694,7 +695,8 @@
         return scenario_list
 
     def execute(self, description):
-        """
+        """Execute a http call
+
         Execute a http call on an api that are expected to
         result in client errors. First it uses invalid resources that are part
         of the url, and then invalid data for queries and http request bodies.
@@ -774,7 +776,7 @@
         if not json_dict:
             return url, None
         elif method in ["GET", "HEAD", "PUT", "DELETE"]:
-            return "%s?%s" % (url, urllib.urlencode(json_dict)), None
+            return "%s?%s" % (url, urllib.parse.urlencode(json_dict)), None
         else:
             return url, json.dumps(json_dict)
 
@@ -788,8 +790,9 @@
 
     @classmethod
     def set_resource(cls, name, resource):
-        """
-        This function can be used in setUpClass context to register a resoruce
+        """Register a resource for a test
+
+        This function can be used in setUpClass context to register a resource
         for a test.
 
         :param name: The name of the kind of resource such as "flavor", "role",
@@ -799,10 +802,10 @@
         cls._resources[name] = resource
 
     def get_resource(self, name):
-        """
-        Return a valid uuid for a type of resource. If a real resource is
-        needed as part of a url then this method should return one. Otherwise
-        it can return None.
+        """Return a valid uuid for a type of resource.
+
+        If a real resource is needed as part of a url then this method should
+        return one. Otherwise it can return None.
 
         :param name: The name of the kind of resource such as "flavor", "role",
             etc.
@@ -819,9 +822,7 @@
 
 
 def SimpleNegativeAutoTest(klass):
-    """
-    This decorator registers a test function on basis of the class name.
-    """
+    """This decorator registers a test function on basis of the class name."""
     @attr(type=['negative'])
     def generic_test(self):
         if hasattr(self, '_schema'):
@@ -838,10 +839,9 @@
 
 
 def call_until_true(func, duration, sleep_for):
-    """
-    Call the given function until it returns True (and return True) or
-    until the specified duration (in seconds) elapses (and return
-    False).
+    """Call the given function until it returns True (and return True)
+
+    or until the specified duration (in seconds) elapses (and return False).
 
     :param func: A zero argument callable that returns True on success.
     :param duration: The number of seconds for which to attempt a
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 58a9905..108b50d 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -25,14 +25,14 @@
 
 @six.add_metaclass(abc.ABCMeta)
 class TempestPlugin(object):
-    """A TempestPlugin class provides the basic hooks for an external
-    plugin to provide tempest the necessary information to run the plugin.
+    """Provide basic hooks for an external plugin
+
+    To provide tempest the necessary information to run the plugin.
     """
 
     @abc.abstractmethod
     def load_tests(self):
-        """Method to return the information necessary to load the tests in the
-        plugin.
+        """Return the information necessary to load the tests in the plugin.
 
         :return: a tuple with the first value being the test_dir and the second
                  being the top_level
@@ -42,9 +42,10 @@
 
     @abc.abstractmethod
     def register_opts(self, conf):
-        """Method to add additional configuration options to tempest. This
-        method will be run for the plugin during the register_opts() function
-        in tempest.config
+        """Add additional configuration options to tempest.
+
+        This method will be run for the plugin during the register_opts()
+        function in tempest.config
 
         :param ConfigOpts conf: The conf object that can be used to register
             additional options on.
@@ -53,7 +54,7 @@
 
     @abc.abstractmethod
     def get_opt_lists(self):
-        """Method to get a list of options for sample config generation
+        """Get a list of options for sample config generation
 
         :return option_list: A list of tuples with the group name and options
                              in that group.
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index dac7d91..330f370 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -30,8 +30,7 @@
     base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
     base_path = os.path.split(base_path)[0]
     # Load local tempest tests
-    for test_dir in ['tempest/api', 'tempest/scenario',
-                     'tempest/thirdparty']:
+    for test_dir in ['tempest/api', 'tempest/scenario']:
         full_test_dir = os.path.join(base_path, test_dir)
         if not pattern:
             suite.addTests(loader.discover(full_test_dir,
diff --git a/tempest/tests/base.py b/tempest/tests/base.py
index 27eb2c4..fe9268e 100644
--- a/tempest/tests/base.py
+++ b/tempest/tests/base.py
@@ -26,8 +26,7 @@
         self.stubs = mox_fixture.stubs
 
     def patch(self, target, **kwargs):
-        """
-        Returns a started `mock.patch` object for the supplied target.
+        """Returns a started `mock.patch` object for the supplied target.
 
         The caller may then call the returned patcher to create a mock object.
 
diff --git a/tempest/tests/cmd/test_javelin.py b/tempest/tests/cmd/test_javelin.py
index fc3d984..ab6a7a0 100644
--- a/tempest/tests/cmd/test_javelin.py
+++ b/tempest/tests/cmd/test_javelin.py
@@ -24,7 +24,7 @@
 
     def setUp(self):
         super(JavelinUnitTest, self).setUp()
-        javelin.setup_logging()
+        javelin.LOG = mock.MagicMock()
         self.fake_client = mock.MagicMock()
         self.fake_object = mock.MagicMock()
 
@@ -78,38 +78,40 @@
         mocked_function = self.fake_client.volumes.attach_volume
         mocked_function.assert_called_once_with(
             self.fake_object.volume['id'],
-            self.fake_object.server['id'],
-            self.fake_object['device'])
+            instance_uuid=self.fake_object.server['id'],
+            mountpoint=self.fake_object['device'])
 
 
 class TestCreateResources(JavelinUnitTest):
     def test_create_tenants(self):
 
-        self.fake_client.identity.list_tenants.return_value = {'tenants': []}
+        self.fake_client.tenants.list_tenants.return_value = {'tenants': []}
         self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
                                               return_value=self.fake_client))
 
         javelin.create_tenants([self.fake_object['name']])
 
-        mocked_function = self.fake_client.identity.create_tenant
+        mocked_function = self.fake_client.tenants.create_tenant
         mocked_function.assert_called_once_with(self.fake_object['name'])
 
     def test_create_duplicate_tenant(self):
-        self.fake_client.identity.list_tenants.return_value = {'tenants': [
+        self.fake_client.tenants.list_tenants.return_value = {'tenants': [
             {'name': self.fake_object['name']}]}
         self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
                                               return_value=self.fake_client))
 
         javelin.create_tenants([self.fake_object['name']])
 
-        mocked_function = self.fake_client.identity.create_tenant
+        mocked_function = self.fake_client.tenants.create_tenant
         self.assertFalse(mocked_function.called)
 
     def test_create_users(self):
-        self.fake_client.identity.get_tenant_by_name.return_value = \
-            self.fake_object['tenant']
-        self.fake_client.identity.get_user_by_username.side_effect = \
-            lib_exc.NotFound("user is not found")
+        self.useFixture(mockpatch.Patch(
+                        'tempest.common.identity.get_tenant_by_name',
+                        return_value=self.fake_object['tenant']))
+        self.useFixture(mockpatch.Patch(
+                        'tempest.common.identity.get_user_by_username',
+                        side_effect=lib_exc.NotFound("user is not found")))
         self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
                                               return_value=self.fake_client))
 
@@ -117,7 +119,7 @@
 
         fake_tenant_id = self.fake_object['tenant']['id']
         fake_email = "%s@%s" % (self.fake_object['user'], fake_tenant_id)
-        mocked_function = self.fake_client.identity.create_user
+        mocked_function = self.fake_client.users.create_user
         mocked_function.assert_called_once_with(self.fake_object['name'],
                                                 self.fake_object['password'],
                                                 fake_tenant_id,
@@ -125,14 +127,15 @@
                                                 enabled=True)
 
     def test_create_user_missing_tenant(self):
-        self.fake_client.identity.get_tenant_by_name.side_effect = \
-            lib_exc.NotFound("tenant is not found")
+        self.useFixture(mockpatch.Patch(
+                        'tempest.common.identity.get_tenant_by_name',
+                        side_effect=lib_exc.NotFound("tenant is not found")))
         self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
                                               return_value=self.fake_client))
 
         javelin.create_users([self.fake_object])
 
-        mocked_function = self.fake_client.identity.create_user
+        mocked_function = self.fake_client.users.create_user
         self.assertFalse(mocked_function.called)
 
     def test_create_objects(self):
@@ -289,13 +292,14 @@
 
         fake_tenant = self.fake_object['tenant']
         fake_auth = self.fake_client
-        fake_auth.identity.get_tenant_by_name.return_value = fake_tenant
-
+        self.useFixture(mockpatch.Patch(
+                        'tempest.common.identity.get_tenant_by_name',
+                        return_value=fake_tenant))
         self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
                                               return_value=fake_auth))
         javelin.destroy_tenants([fake_tenant])
 
-        mocked_function = fake_auth.identity.delete_tenant
+        mocked_function = fake_auth.tenants.delete_tenant
         mocked_function.assert_called_once_with(fake_tenant['id'])
 
     def test_destroy_users(self):
@@ -304,15 +308,19 @@
         fake_tenant = self.fake_object['tenant']
 
         fake_auth = self.fake_client
-        fake_auth.identity.get_tenant_by_name.return_value = fake_tenant
-        fake_auth.identity.get_user_by_username.return_value = fake_user
+        fake_auth.tenants.list_tenants.return_value = \
+            {'tenants': [fake_tenant]}
+        fake_auth.users.list_users.return_value = {'users': [fake_user]}
 
+        self.useFixture(mockpatch.Patch(
+                        'tempest.common.identity.get_user_by_username',
+                        return_value=fake_user))
         self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
                                               return_value=fake_auth))
 
         javelin.destroy_users([fake_user])
 
-        mocked_function = fake_auth.identity.delete_user
+        mocked_function = fake_auth.users.delete_user
         mocked_function.assert_called_once_with(fake_user['id'])
 
     def test_destroy_objects(self):
diff --git a/tempest/tests/cmd/test_list_plugins.py b/tempest/tests/cmd/test_list_plugins.py
new file mode 100644
index 0000000..17ddb18
--- /dev/null
+++ b/tempest/tests/cmd/test_list_plugins.py
@@ -0,0 +1,24 @@
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import subprocess
+
+from tempest.tests import base
+
+
+class TestTempestListPlugins(base.TestCase):
+    def test_run_list_plugins(self):
+        return_code = subprocess.call(
+            ['tempest', 'list-plugins'], stdout=subprocess.PIPE)
+        self.assertEqual(return_code, 0)
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index a5dea54..193abc7 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -201,7 +201,8 @@
                                    {'alias': 'fake2'},
                                    {'alias': 'not_fake'}]}
         fake_os = mock.MagicMock()
-        fake_os.network_client.list_extensions = fake_list_extensions
+        fake_os.network_extensions_client.list_extensions = (
+            fake_list_extensions)
         self.useFixture(mockpatch.PatchObject(
             verify_tempest_config, 'get_enabled_extensions',
             return_value=(['fake1', 'fake2', 'fake3'])))
@@ -223,7 +224,8 @@
                                    {'alias': 'fake2'},
                                    {'alias': 'not_fake'}]}
         fake_os = mock.MagicMock()
-        fake_os.network_client.list_extensions = fake_list_extensions
+        fake_os.network_extensions_client.list_extensions = (
+            fake_list_extensions)
         self.useFixture(mockpatch.PatchObject(
             verify_tempest_config, 'get_enabled_extensions',
             return_value=(['all'])))
diff --git a/tempest/tests/common/test_admin_available.py b/tempest/tests/common/test_admin_available.py
index a53ed5f..75401db 100644
--- a/tempest/tests/common/test_admin_available.py
+++ b/tempest/tests/common/test_admin_available.py
@@ -15,7 +15,7 @@
 from oslo_config import cfg
 from oslotest import mockpatch
 
-from tempest.common import credentials
+from tempest.common import credentials_factory as credentials
 from tempest import config
 from tempest.tests import base
 from tempest.tests import fake_config
diff --git a/tempest/tests/common/test_alt_available.py b/tempest/tests/common/test_alt_available.py
index 6a86f73..db3f5ec 100644
--- a/tempest/tests/common/test_alt_available.py
+++ b/tempest/tests/common/test_alt_available.py
@@ -15,7 +15,7 @@
 from oslo_config import cfg
 from oslotest import mockpatch
 
-from tempest.common import credentials
+from tempest.common import credentials_factory as credentials
 from tempest import config
 from tempest.tests import base
 from tempest.tests import fake_config
diff --git a/tempest/tests/common/test_api_version_request.py b/tempest/tests/common/test_api_version_request.py
new file mode 100644
index 0000000..38fbfc1
--- /dev/null
+++ b/tempest/tests/common/test_api_version_request.py
@@ -0,0 +1,146 @@
+# Copyright 2014 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import api_version_request
+from tempest import exceptions
+from tempest.tests import base
+
+
+class APIVersionRequestTests(base.TestCase):
+    def test_valid_version_strings(self):
+        def _test_string(version, exp_major, exp_minor):
+            v = api_version_request.APIVersionRequest(version)
+            self.assertEqual(v.ver_major, exp_major)
+            self.assertEqual(v.ver_minor, exp_minor)
+
+        _test_string("1.1", 1, 1)
+        _test_string("2.10", 2, 10)
+        _test_string("5.234", 5, 234)
+        _test_string("12.5", 12, 5)
+        _test_string("2.0", 2, 0)
+        _test_string("2.200", 2, 200)
+
+    def test_null_version(self):
+        v = api_version_request.APIVersionRequest()
+        self.assertTrue(v.is_null())
+
+    def test_invalid_version_strings(self):
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "2")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "200")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "2.1.4")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "200.23.66.3")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "5 .3")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "5. 3")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "5.03")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "02.1")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "2.001")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, " 2.1")
+
+        self.assertRaises(exceptions.InvalidAPIVersionString,
+                          api_version_request.APIVersionRequest, "2.1 ")
+
+    def test_version_comparisons(self):
+        vers2_0 = api_version_request.APIVersionRequest("2.0")
+        vers2_5 = api_version_request.APIVersionRequest("2.5")
+        vers5_23 = api_version_request.APIVersionRequest("5.23")
+        v_null = api_version_request.APIVersionRequest()
+        v_latest = api_version_request.APIVersionRequest('latest')
+
+        self.assertTrue(v_null < vers2_5)
+        self.assertTrue(vers2_0 < vers2_5)
+        self.assertTrue(vers2_0 <= vers2_5)
+        self.assertTrue(vers2_0 <= vers2_0)
+        self.assertTrue(vers2_5 > v_null)
+        self.assertTrue(vers5_23 > vers2_5)
+        self.assertTrue(vers2_0 >= vers2_0)
+        self.assertTrue(vers5_23 >= vers2_5)
+        self.assertTrue(vers2_0 != vers2_5)
+        self.assertTrue(vers2_0 == vers2_0)
+        self.assertTrue(vers2_0 != v_null)
+        self.assertTrue(v_null == v_null)
+        self.assertTrue(vers2_0 <= v_latest)
+        self.assertTrue(vers2_0 != v_latest)
+        self.assertTrue(v_latest == v_latest)
+        self.assertRaises(TypeError, vers2_0.__lt__, "2.1")
+
+    def test_version_matches(self):
+        vers2_0 = api_version_request.APIVersionRequest("2.0")
+        vers2_5 = api_version_request.APIVersionRequest("2.5")
+        vers2_45 = api_version_request.APIVersionRequest("2.45")
+        vers3_3 = api_version_request.APIVersionRequest("3.3")
+        vers3_23 = api_version_request.APIVersionRequest("3.23")
+        vers4_0 = api_version_request.APIVersionRequest("4.0")
+        v_null = api_version_request.APIVersionRequest()
+        v_latest = api_version_request.APIVersionRequest('latest')
+
+        def _check_version_matches(version, version1, version2, check=True):
+            if check:
+                msg = "Version %s does not matches with [%s - %s] range"
+                self.assertTrue(version.matches(version1, version2),
+                                msg % (version.get_string(),
+                                       version1.get_string(),
+                                       version2.get_string()))
+            else:
+                msg = "Version %s matches with [%s - %s] range"
+                self.assertFalse(version.matches(version1, version2),
+                                 msg % (version.get_string(),
+                                        version1.get_string(),
+                                        version2.get_string()))
+
+        _check_version_matches(vers2_5, vers2_0, vers2_45)
+        _check_version_matches(vers2_5, vers2_0, v_null)
+        _check_version_matches(vers2_0, vers2_0, vers2_5)
+        _check_version_matches(vers3_3, vers2_5, vers3_3)
+        _check_version_matches(vers3_3, v_null, vers3_3)
+        _check_version_matches(vers3_3, v_null, vers4_0)
+        _check_version_matches(vers2_0, vers2_5, vers2_45, False)
+        _check_version_matches(vers3_23, vers2_5, vers3_3, False)
+        _check_version_matches(vers2_5, vers2_45, vers2_0, False)
+        _check_version_matches(vers2_5, vers2_0, v_latest)
+        _check_version_matches(v_latest, v_latest, v_latest)
+        _check_version_matches(vers2_5, v_latest, v_latest, False)
+        _check_version_matches(v_latest, vers2_0, vers4_0, False)
+
+        self.assertRaises(ValueError, v_null.matches, vers2_0, vers2_45)
+
+    def test_get_string(self):
+        vers_string = ["3.23", "latest"]
+        for ver in vers_string:
+            ver_obj = api_version_request.APIVersionRequest(ver)
+            self.assertEqual(ver, ver_obj.get_string())
+
+        self.assertIsNotNone(
+            api_version_request.APIVersionRequest().get_string)
diff --git a/tempest/tests/common/test_api_version_utils.py b/tempest/tests/common/test_api_version_utils.py
new file mode 100644
index 0000000..501f954
--- /dev/null
+++ b/tempest/tests/common/test_api_version_utils.py
@@ -0,0 +1,114 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.common import api_version_utils
+from tempest import exceptions
+from tempest.tests import base
+
+
+class TestVersionSkipLogic(base.TestCase):
+
+    def _test_version(self, test_min_version, test_max_version,
+                      cfg_min_version, cfg_max_version, expected_skip=False):
+        try:
+            api_version_utils.check_skip_with_microversion(test_min_version,
+                                                           test_max_version,
+                                                           cfg_min_version,
+                                                           cfg_max_version)
+        except testtools.TestCase.skipException as e:
+            if not expected_skip:
+                raise testtools.TestCase.failureException(e.message)
+
+    def test_version_min_in_range(self):
+        self._test_version('2.2', '2.10', '2.1', '2.7')
+
+    def test_version_max_in_range(self):
+        self._test_version('2.1', '2.3', '2.2', '2.7')
+
+    def test_version_cfg_in_range(self):
+        self._test_version('2.2', '2.9', '2.3', '2.7')
+
+    def test_version_equal(self):
+        self._test_version('2.2', '2.2', '2.2', '2.2')
+
+    def test_version_below_cfg_min(self):
+        self._test_version('2.2', '2.4', '2.5', '2.7', expected_skip=True)
+
+    def test_version_above_cfg_max(self):
+        self._test_version('2.8', '2.9', '2.3', '2.7', expected_skip=True)
+
+    def test_version_min_greater_than_max(self):
+        self.assertRaises(exceptions.InvalidAPIVersionRange,
+                          self._test_version, '2.8', '2.7', '2.3', '2.7')
+
+    def test_cfg_version_min_greater_than_max(self):
+        self.assertRaises(exceptions.InvalidAPIVersionRange,
+                          self._test_version, '2.2', '2.7', '2.9', '2.7')
+
+
+class TestSelectRequestMicroversion(base.TestCase):
+
+    def _test_request_version(self, test_min_version,
+                              cfg_min_version, expected_version):
+        selected_version = api_version_utils.select_request_microversion(
+            test_min_version, cfg_min_version)
+        self.assertEqual(expected_version, selected_version)
+
+    def test_cfg_min_version_greater(self):
+        self._test_request_version('2.1', '2.3', expected_version='2.3')
+
+    def test_class_min_version_greater(self):
+        self._test_request_version('2.5', '2.3', expected_version='2.5')
+
+    def test_cfg_min_version_none(self):
+        self._test_request_version('2.5', None, expected_version='2.5')
+
+    def test_class_min_version_none(self):
+        self._test_request_version(None, '2.3', expected_version='2.3')
+
+    def test_both_min_version_none(self):
+        self._test_request_version(None, None, expected_version=None)
+
+    def test_both_min_version_equal(self):
+        self._test_request_version('2.3', '2.3', expected_version='2.3')
+
+
+class TestMicroversionHeaderMatches(base.TestCase):
+
+    def test_header_matches(self):
+        microversion_header_name = 'x-openstack-xyz-api-version'
+        request_microversion = '2.1'
+        test_respose = {microversion_header_name: request_microversion}
+        api_version_utils.assert_version_header_matches_request(
+            microversion_header_name, request_microversion, test_respose)
+
+    def test_header_does_not_match(self):
+        microversion_header_name = 'x-openstack-xyz-api-version'
+        request_microversion = '2.1'
+        test_respose = {microversion_header_name: '2.2'}
+        self.assertRaises(
+            exceptions.InvalidHTTPResponseHeader,
+            api_version_utils.assert_version_header_matches_request,
+            microversion_header_name, request_microversion, test_respose)
+
+    def test_header_not_present(self):
+        microversion_header_name = 'x-openstack-xyz-api-version'
+        request_microversion = '2.1'
+        test_respose = {}
+        self.assertRaises(
+            exceptions.InvalidHTTPResponseHeader,
+            api_version_utils.assert_version_header_matches_request,
+            microversion_header_name, request_microversion, test_respose)
diff --git a/tempest/tests/common/test_cred_provider.py b/tempest/tests/common/test_configured_creds.py
similarity index 95%
rename from tempest/tests/common/test_cred_provider.py
rename to tempest/tests/common/test_configured_creds.py
index d404660..96b75fd 100644
--- a/tempest/tests/common/test_cred_provider.py
+++ b/tempest/tests/common/test_configured_creds.py
@@ -18,8 +18,7 @@
 from tempest_lib.services.identity.v2 import token_client as v2_client
 from tempest_lib.services.identity.v3 import token_client as v3_client
 
-
-from tempest.common import cred_provider
+from tempest.common import credentials_factory as common_creds
 from tempest.common import tempest_fixtures as fixtures
 from tempest import config
 from tempest.tests import base
@@ -65,12 +64,12 @@
 
     def _verify_credentials(self, credentials_class, filled=True,
                             identity_version=None):
-        for ctype in cred_provider.CREDENTIAL_TYPES:
+        for ctype in common_creds.CREDENTIAL_TYPES:
             if identity_version is None:
-                creds = cred_provider.get_configured_credentials(
+                creds = common_creds.get_configured_credentials(
                     credential_type=ctype, fill_in=filled)
             else:
-                creds = cred_provider.get_configured_credentials(
+                creds = common_creds.get_configured_credentials(
                     credential_type=ctype, fill_in=filled,
                     identity_version=identity_version)
             self._check(creds, credentials_class, filled)
diff --git a/tempest/tests/common/test_credentials.py b/tempest/tests/common/test_credentials.py
new file mode 100644
index 0000000..136ac02
--- /dev/null
+++ b/tempest/tests/common/test_credentials.py
@@ -0,0 +1,36 @@
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import credentials_factory as credentials
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestLegacyCredentialsProvider(base.TestCase):
+
+    fixed_params = {'identity_version': 'v2'}
+
+    def setUp(self):
+        super(TestLegacyCredentialsProvider, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def test_get_creds_roles_legacy_invalid(self):
+        test_accounts_class = credentials.LegacyCredentialProvider(
+            **self.fixed_params)
+        self.assertRaises(exceptions.InvalidConfiguration,
+                          test_accounts_class.get_creds_by_roles,
+                          ['fake_role'])
diff --git a/tempest/tests/common/test_dynamic_creds.py b/tempest/tests/common/test_dynamic_creds.py
index 59a5523..de2000d 100644
--- a/tempest/tests/common/test_dynamic_creds.py
+++ b/tempest/tests/common/test_dynamic_creds.py
@@ -17,12 +17,19 @@
 from oslotest import mockpatch
 from tempest_lib.services.identity.v2 import token_client as json_token_client
 
+from tempest.common import credentials_factory as credentials
 from tempest.common import dynamic_creds
 from tempest.common import service_client
 from tempest import config
 from tempest import exceptions
 from tempest.services.identity.v2.json import identity_client as \
     json_iden_client
+from tempest.services.identity.v2.json import roles_client as \
+    json_roles_client
+from tempest.services.identity.v2.json import tenants_client as \
+    json_tenants_client
+from tempest.services.identity.v2.json import users_client as \
+    json_users_client
 from tempest.services.network.json import network_client as json_network_client
 from tempest.tests import base
 from tempest.tests import fake_config
@@ -46,6 +53,8 @@
         cfg.CONF.set_default('operator_role', 'FakeRole',
                              group='object-storage')
         self._mock_list_ec2_credentials('fake_user_id', 'fake_tenant_id')
+        self.fixed_params.update(
+            admin_creds=self._get_fake_admin_creds())
 
     def test_tempest_client(self):
         creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
@@ -54,9 +63,16 @@
         self.assertTrue(isinstance(creds.network_admin_client,
                                    json_network_client.NetworkClient))
 
+    def _get_fake_admin_creds(self):
+        return credentials.get_credentials(
+            fill_in=False,
+            identity_version=self.fixed_params['identity_version'],
+            username='fake_username', password='fake_password',
+            tenant_name='fake_tenant')
+
     def _mock_user_create(self, id, name):
         user_fix = self.useFixture(mockpatch.PatchObject(
-            json_iden_client.IdentityClient,
+            json_users_client.UsersClient,
             'create_user',
             return_value=(service_client.ResponseBody
                           (200, {'user': {'id': id, 'name': name}}))))
@@ -64,7 +80,7 @@
 
     def _mock_tenant_create(self, id, name):
         tenant_fix = self.useFixture(mockpatch.PatchObject(
-            json_iden_client.IdentityClient,
+            json_tenants_client.TenantsClient,
             'create_tenant',
             return_value=(service_client.ResponseBody
                           (200, {'tenant': {'id': id, 'name': name}}))))
@@ -72,7 +88,7 @@
 
     def _mock_list_roles(self, id, name):
         roles_fix = self.useFixture(mockpatch.PatchObject(
-            json_iden_client.IdentityClient,
+            json_roles_client.RolesClient,
             'list_roles',
             return_value=(service_client.ResponseBody
                           (200,
@@ -83,7 +99,7 @@
 
     def _mock_list_2_roles(self):
         roles_fix = self.useFixture(mockpatch.PatchObject(
-            json_iden_client.IdentityClient,
+            json_roles_client.RolesClient,
             'list_roles',
             return_value=(service_client.ResponseBody
                           (200,
@@ -94,7 +110,7 @@
 
     def _mock_assign_user_role(self):
         tenant_fix = self.useFixture(mockpatch.PatchObject(
-            json_iden_client.IdentityClient,
+            json_roles_client.RolesClient,
             'assign_user_role',
             return_value=(service_client.ResponseBody
                           (200, {}))))
@@ -102,7 +118,7 @@
 
     def _mock_list_role(self):
         roles_fix = self.useFixture(mockpatch.PatchObject(
-            json_iden_client.IdentityClient,
+            json_roles_client.RolesClient,
             'list_roles',
             return_value=(service_client.ResponseBody
                           (200, {'roles': [{'id': '1',
@@ -111,7 +127,7 @@
 
     def _mock_list_ec2_credentials(self, user_id, tenant_id):
         ec2_creds_fix = self.useFixture(mockpatch.PatchObject(
-            json_iden_client.IdentityClient,
+            json_users_client.UsersClient,
             'list_user_ec2_credentials',
             return_value=(service_client.ResponseBody
                           (200, {'credentials': [{
@@ -166,11 +182,11 @@
         self._mock_user_create('1234', 'fake_admin_user')
         self._mock_tenant_create('1234', 'fake_admin_tenant')
 
-        user_mock = mock.patch.object(json_iden_client.IdentityClient,
+        user_mock = mock.patch.object(json_roles_client.RolesClient,
                                       'assign_user_role')
         user_mock.start()
         self.addCleanup(user_mock.stop)
-        with mock.patch.object(json_iden_client.IdentityClient,
+        with mock.patch.object(json_roles_client.RolesClient,
                                'assign_user_role') as user_mock:
             admin_creds = creds.get_admin_creds()
         user_mock.assert_has_calls([
@@ -189,11 +205,11 @@
         self._mock_user_create('1234', 'fake_role_user')
         self._mock_tenant_create('1234', 'fake_role_tenant')
 
-        user_mock = mock.patch.object(json_iden_client.IdentityClient,
+        user_mock = mock.patch.object(json_roles_client.RolesClient,
                                       'assign_user_role')
         user_mock.start()
         self.addCleanup(user_mock.stop)
-        with mock.patch.object(json_iden_client.IdentityClient,
+        with mock.patch.object(json_roles_client.RolesClient,
                                'assign_user_role') as user_mock:
             role_creds = creds.get_creds_by_roles(
                 roles=['role1', 'role2'])
@@ -227,11 +243,11 @@
         self._mock_list_roles('123456', 'admin')
         creds.get_admin_creds()
         user_mock = self.patch(
-            'tempest.services.identity.v2.json.identity_client.'
-            'IdentityClient.delete_user')
+            'tempest.services.identity.v2.json.users_client.'
+            'UsersClient.delete_user')
         tenant_mock = self.patch(
-            'tempest.services.identity.v2.json.identity_client.'
-            'IdentityClient.delete_tenant')
+            'tempest.services.identity.v2.json.tenants_client.'
+            'TenantsClient.delete_tenant')
         creds.clear_creds()
         # Verify user delete calls
         calls = user_mock.mock_calls
@@ -306,9 +322,9 @@
         self._mock_router_create('1234', 'fake_router')
         router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClient.'
-            'add_router_interface_with_subnet_id')
+            'add_router_interface')
         primary_creds = creds.get_primary_creds()
-        router_interface_mock.called_once_with('1234', '1234')
+        router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
         network = primary_creds.network
         subnet = primary_creds.subnet
         router = primary_creds.router
@@ -338,9 +354,9 @@
         self._mock_router_create('1234', 'fake_router')
         router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClient.'
-            'add_router_interface_with_subnet_id')
+            'add_router_interface')
         creds.get_primary_creds()
-        router_interface_mock.called_once_with('1234', '1234')
+        router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
         router_interface_mock.reset_mock()
         # Create alternate tenant and network
         self._mock_user_create('12345', 'fake_alt_user')
@@ -349,7 +365,8 @@
         self._mock_subnet_create(creds, '12345', 'fake_alt_subnet')
         self._mock_router_create('12345', 'fake_alt_router')
         creds.get_alt_creds()
-        router_interface_mock.called_once_with('12345', '12345')
+        router_interface_mock.assert_called_once_with('12345',
+                                                      subnet_id='12345')
         router_interface_mock.reset_mock()
         # Create admin tenant and networks
         self._mock_user_create('123456', 'fake_admin_user')
@@ -359,10 +376,10 @@
         self._mock_router_create('123456', 'fake_admin_router')
         self._mock_list_roles('123456', 'admin')
         creds.get_admin_creds()
-        self.patch('tempest.services.identity.v2.json.identity_client.'
-                   'IdentityClient.delete_user')
-        self.patch('tempest.services.identity.v2.json.identity_client.'
-                   'IdentityClient.delete_tenant')
+        self.patch('tempest.services.identity.v2.json.users_client.'
+                   'UsersClient.delete_user')
+        self.patch('tempest.services.identity.v2.json.tenants_client.'
+                   'TenantsClient.delete_tenant')
         net = mock.patch.object(creds.networks_admin_client,
                                 'delete_network')
         net_mock = net.start()
@@ -374,23 +391,23 @@
         router_mock = router.start()
         remove_router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClient.'
-            'remove_router_interface_with_subnet_id')
+            'remove_router_interface')
         return_values = ({'status': 200}, {'ports': []})
-        port_list_mock = mock.patch.object(creds.network_admin_client,
+        port_list_mock = mock.patch.object(creds.ports_admin_client,
                                            'list_ports',
                                            return_value=return_values)
 
         port_list_mock.start()
         secgroup_list_mock = mock.patch.object(
-            creds.network_admin_client,
+            creds.security_groups_admin_client,
             'list_security_groups',
             side_effect=side_effect)
         secgroup_list_mock.start()
 
         return_values = (fake_http.fake_httplib({}, status=204), {})
         remove_secgroup_mock = self.patch(
-            'tempest.services.network.json.network_client.'
-            'NetworkClient.delete', return_value=return_values)
+            'tempest.services.network.json.security_groups_client.'
+            'SecurityGroupsClient.delete', return_value=return_values)
         creds.clear_creds()
         # Verify default security group delete
         calls = remove_secgroup_mock.mock_calls
@@ -403,11 +420,11 @@
         # Verify remove router interface calls
         calls = remove_router_interface_mock.mock_calls
         self.assertEqual(len(calls), 3)
-        args = map(lambda x: x[1], calls)
+        args = map(lambda x: (x[1][0], x[2]), calls)
         args = list(args)
-        self.assertIn(('1234', '1234'), args)
-        self.assertIn(('12345', '12345'), args)
-        self.assertIn(('123456', '123456'), args)
+        self.assertIn(('1234', {'subnet_id': '1234'}), args)
+        self.assertIn(('12345', {'subnet_id': '12345'}), args)
+        self.assertIn(('123456', {'subnet_id': '123456'}), args)
         # Verify network delete calls
         calls = net_mock.mock_calls
         self.assertEqual(len(calls), 3)
@@ -445,9 +462,9 @@
         self._mock_router_create('1234', 'fake_alt_router')
         router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClient.'
-            'add_router_interface_with_subnet_id')
+            'add_router_interface')
         alt_creds = creds.get_alt_creds()
-        router_interface_mock.called_once_with('1234', '1234')
+        router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
         network = alt_creds.network
         subnet = alt_creds.subnet
         router = alt_creds.router
@@ -469,10 +486,10 @@
         self._mock_router_create('1234', 'fake_admin_router')
         router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClient.'
-            'add_router_interface_with_subnet_id')
+            'add_router_interface')
         self._mock_list_roles('123456', 'admin')
         admin_creds = creds.get_admin_creds()
-        router_interface_mock.called_once_with('1234', '1234')
+        router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
         network = admin_creds.network
         subnet = admin_creds.subnet
         router = admin_creds.router
diff --git a/tempest/tests/common/test_preprov_creds.py b/tempest/tests/common/test_preprov_creds.py
index 8a014af..fd7df16 100644
--- a/tempest/tests/common/test_preprov_creds.py
+++ b/tempest/tests/common/test_preprov_creds.py
@@ -17,17 +17,17 @@
 
 import mock
 from oslo_concurrency.fixture import lockutils as lockutils_fixtures
-from oslo_concurrency import lockutils
 from oslo_config import cfg
 from oslotest import mockpatch
+import shutil
 import six
 from tempest_lib import auth
+from tempest_lib import exceptions as lib_exc
 from tempest_lib.services.identity.v2 import token_client
 
 from tempest.common import cred_provider
 from tempest.common import preprov_creds
 from tempest import config
-from tempest import exceptions
 from tempest.tests import base
 from tempest.tests import fake_config
 from tempest.tests import fake_http
@@ -38,7 +38,11 @@
 
     fixed_params = {'name': 'test class',
                     'identity_version': 'v2',
-                    'admin_role': 'admin'}
+                    'test_accounts_file': 'fake_accounts_file',
+                    'accounts_lock_dir': 'fake_locks_dir',
+                    'admin_role': 'admin',
+                    'object_storage_operator_role': 'operator',
+                    'object_storage_reseller_admin_role': 'reseller'}
 
     def setUp(self):
         super(TestPreProvisionedCredentials, self).setUp()
@@ -77,9 +81,13 @@
         self.accounts_mock = self.useFixture(mockpatch.Patch(
             'tempest.common.preprov_creds.read_accounts_yaml',
             return_value=self.test_accounts))
-        cfg.CONF.set_default('test_accounts_file', 'fake_path', group='auth')
         self.useFixture(mockpatch.Patch('os.path.isfile', return_value=True))
 
+    def tearDown(self):
+        super(TestPreProvisionedCredentials, self).tearDown()
+        shutil.rmtree(self.fixed_params['accounts_lock_dir'],
+                      ignore_errors=True)
+
     def _get_hash_list(self, accounts_list):
         hash_list = []
         for account in accounts_list:
@@ -147,11 +155,10 @@
         with mock.patch('six.moves.builtins.open', mock.mock_open(),
                         create=True) as open_mock:
             test_account_class._get_free_hash(hash_list)
-            lock_path = os.path.join(lockutils.get_lock_path(
-                preprov_creds.CONF), 'test_accounts', hash_list[0])
+            lock_path = os.path.join(self.fixed_params['accounts_lock_dir'],
+                                     hash_list[0])
             open_mock.assert_called_once_with(lock_path, 'w')
-        mkdir_path = os.path.join(
-            preprov_creds.CONF.oslo_concurrency.lock_path, 'test_accounts')
+        mkdir_path = os.path.join(self.fixed_params['accounts_lock_dir'])
         mkdir_mock.mock.assert_called_once_with(mkdir_path)
 
     @mock.patch('oslo_concurrency.lockutils.lock')
@@ -165,7 +172,7 @@
             **self.fixed_params)
         with mock.patch('six.moves.builtins.open', mock.mock_open(),
                         create=True):
-            self.assertRaises(exceptions.InvalidConfiguration,
+            self.assertRaises(lib_exc.InvalidCredentials,
                               test_account_class._get_free_hash, hash_list)
 
     @mock.patch('oslo_concurrency.lockutils.lock')
@@ -187,9 +194,8 @@
         with mock.patch('six.moves.builtins.open', mock.mock_open(),
                         create=True) as open_mock:
             test_account_class._get_free_hash(hash_list)
-            lock_path = os.path.join(
-                lockutils.get_lock_path(preprov_creds.CONF),
-                'test_accounts', hash_list[3])
+            lock_path = os.path.join(self.fixed_params['accounts_lock_dir'],
+                                     hash_list[3])
             open_mock.assert_has_calls([mock.call(lock_path, 'w')])
 
     @mock.patch('oslo_concurrency.lockutils.lock')
@@ -204,11 +210,9 @@
         remove_mock = self.useFixture(mockpatch.Patch('os.remove'))
         rmdir_mock = self.useFixture(mockpatch.Patch('os.rmdir'))
         test_account_class.remove_hash(hash_list[2])
-        hash_path = os.path.join(lockutils.get_lock_path(preprov_creds.CONF),
-                                 'test_accounts',
+        hash_path = os.path.join(self.fixed_params['accounts_lock_dir'],
                                  hash_list[2])
-        lock_path = os.path.join(preprov_creds.CONF.oslo_concurrency.lock_path,
-                                 'test_accounts')
+        lock_path = self.fixed_params['accounts_lock_dir']
         remove_mock.mock.assert_called_once_with(hash_path)
         rmdir_mock.mock.assert_called_once_with(lock_path)
 
@@ -225,8 +229,7 @@
         remove_mock = self.useFixture(mockpatch.Patch('os.remove'))
         rmdir_mock = self.useFixture(mockpatch.Patch('os.rmdir'))
         test_account_class.remove_hash(hash_list[2])
-        hash_path = os.path.join(lockutils.get_lock_path(preprov_creds.CONF),
-                                 'test_accounts',
+        hash_path = os.path.join(self.fixed_params['accounts_lock_dir'],
                                  hash_list[2])
         remove_mock.mock.assert_called_once_with(hash_path)
         rmdir_mock.mock.assert_not_called()
@@ -316,7 +319,7 @@
             return_value=test_accounts))
         test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
             **self.fixed_params)
-        with mock.patch('tempest.services.compute.json.networks_client.'
+        with mock.patch('tempest_lib.services.compute.networks_client.'
                         'NetworksClient.list_networks',
                         return_value={'networks': [{'name': 'network-2',
                                                     'id': 'fake-id',
@@ -329,36 +332,3 @@
         self.assertIn('id', network)
         self.assertEqual('fake-id', network['id'])
         self.assertEqual('network-2', network['name'])
-
-
-class TestNotLockingAccount(base.TestCase):
-
-    fixed_params = {'name': 'test class',
-                    'identity_version': 'v2',
-                    'admin_role': 'admin'}
-
-    def setUp(self):
-        super(TestNotLockingAccount, self).setUp()
-        self.useFixture(fake_config.ConfigFixture())
-        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
-        self.useFixture(lockutils_fixtures.ExternalLockFixture())
-        self.test_accounts = [
-            {'username': 'test_user1', 'tenant_name': 'test_tenant1',
-             'password': 'p'},
-            {'username': 'test_user2', 'tenant_name': 'test_tenant2',
-             'password': 'p'},
-            {'username': 'test_user3', 'tenant_name': 'test_tenant3',
-             'password': 'p'},
-        ]
-        self.useFixture(mockpatch.Patch(
-            'tempest.common.preprov_creds.read_accounts_yaml',
-            return_value=self.test_accounts))
-        cfg.CONF.set_default('test_accounts_file', '', group='auth')
-        self.useFixture(mockpatch.Patch('os.path.isfile', return_value=True))
-
-    def test_get_creds_roles_nonlocking_invalid(self):
-        test_accounts_class = preprov_creds.NonLockingCredentialProvider(
-            **self.fixed_params)
-        self.assertRaises(exceptions.InvalidConfiguration,
-                          test_accounts_class.get_creds_by_roles,
-                          ['fake_role'])
diff --git a/tempest/tests/common/test_service_clients.py b/tempest/tests/common/test_service_clients.py
index 4225da8..f248957 100644
--- a/tempest/tests/common/test_service_clients.py
+++ b/tempest/tests/common/test_service_clients.py
@@ -17,15 +17,6 @@
 import six
 
 from tempest.services.baremetal.v1.json import baremetal_client
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import interfaces_client
-from tempest.services.compute.json import quota_classes_client
-from tempest.services.compute.json import security_group_rules_client
-from tempest.services.compute.json import server_groups_client
-from tempest.services.compute.json import servers_client
-from tempest.services.compute.json import services_client
-from tempest.services.compute.json import volumes_client \
-    as compute_volumes_client
 from tempest.services.data_processing.v1_1 import data_processing_client
 from tempest.services.database.json import flavors_client as db_flavor_client
 from tempest.services.database.json import versions_client as db_version_client
@@ -35,37 +26,42 @@
 from tempest.services.identity.v3.json import endpoints_client
 from tempest.services.identity.v3.json import identity_client as \
     identity_v3_identity_client
-from tempest.services.identity.v3.json import policy_client
-from tempest.services.identity.v3.json import region_client
-from tempest.services.identity.v3.json import service_client
-from tempest.services.image.v1.json import image_client
-from tempest.services.image.v2.json import image_client as image_v2_client
+from tempest.services.identity.v3.json import policies_client
+from tempest.services.identity.v3.json import regions_client
+from tempest.services.identity.v3.json import services_client
+from tempest.services.image.v1.json import images_client
+from tempest.services.image.v2.json import images_client as images_v2_client
 from tempest.services.messaging.json import messaging_client
 from tempest.services.network.json import network_client
 from tempest.services.object_storage import account_client
 from tempest.services.object_storage import container_client
 from tempest.services.object_storage import object_client
 from tempest.services.orchestration.json import orchestration_client
+from tempest.services.telemetry.json import alarming_client
 from tempest.services.telemetry.json import telemetry_client
-from tempest.services.volume.json.admin import volume_hosts_client
-from tempest.services.volume.json.admin import volume_quotas_client
-from tempest.services.volume.json.admin import volume_services_client
-from tempest.services.volume.json.admin import volume_types_client
-from tempest.services.volume.json import availability_zone_client \
+from tempest.services.volume.v1.json.admin import hosts_client \
+    as volume_hosts_client
+from tempest.services.volume.v1.json.admin import quotas_client \
+    as volume_quotas_client
+from tempest.services.volume.v1.json.admin import services_client \
+    as volume_services_client
+from tempest.services.volume.v1.json.admin import types_client \
+    as volume_types_client
+from tempest.services.volume.v1.json import availability_zone_client \
     as volume_az_client
-from tempest.services.volume.json import backups_client
-from tempest.services.volume.json import extensions_client \
+from tempest.services.volume.v1.json import backups_client
+from tempest.services.volume.v1.json import extensions_client \
     as volume_extensions_client
-from tempest.services.volume.json import qos_client
-from tempest.services.volume.json import snapshots_client
-from tempest.services.volume.json import volumes_client
-from tempest.services.volume.v2.json.admin import volume_hosts_client \
+from tempest.services.volume.v1.json import qos_client
+from tempest.services.volume.v1.json import snapshots_client
+from tempest.services.volume.v1.json import volumes_client
+from tempest.services.volume.v2.json.admin import hosts_client \
     as volume_v2_hosts_client
-from tempest.services.volume.v2.json.admin import volume_quotas_client \
+from tempest.services.volume.v2.json.admin import quotas_client \
     as volume_v2_quotas_client
-from tempest.services.volume.v2.json.admin import volume_services_client \
+from tempest.services.volume.v2.json.admin import services_client \
     as volume_v2_services_client
-from tempest.services.volume.v2.json.admin import volume_types_client \
+from tempest.services.volume.v2.json.admin import types_client \
     as volume_v2_types_client
 from tempest.services.volume.v2.json import availability_zone_client \
     as volume_v2_az_client
@@ -87,14 +83,6 @@
     def test_service_client_creations_with_specified_args(self, mock_init):
         test_clients = [
             baremetal_client.BaremetalClient,
-            floating_ips_client.FloatingIPsClient,
-            interfaces_client.InterfacesClient,
-            quota_classes_client.QuotaClassesClient,
-            security_group_rules_client.SecurityGroupRulesClient,
-            server_groups_client.ServerGroupsClient,
-            servers_client.ServersClient,
-            services_client.ServicesClient,
-            compute_volumes_client.VolumesClient,
             data_processing_client.DataProcessingClient,
             db_flavor_client.DatabaseFlavorsClient,
             db_version_client.DatabaseVersionsClient,
@@ -105,35 +93,36 @@
             object_client.ObjectClient,
             orchestration_client.OrchestrationClient,
             telemetry_client.TelemetryClient,
+            alarming_client.AlarmingClient,
             qos_client.QosSpecsClient,
-            volume_hosts_client.VolumeHostsClient,
-            volume_quotas_client.VolumeQuotasClient,
-            volume_services_client.VolumesServicesClient,
-            volume_types_client.VolumeTypesClient,
-            volume_az_client.VolumeAvailabilityZoneClient,
+            volume_hosts_client.HostsClient,
+            volume_quotas_client.QuotasClient,
+            volume_services_client.ServicesClient,
+            volume_types_client.TypesClient,
+            volume_az_client.AvailabilityZoneClient,
             backups_client.BackupsClient,
             volume_extensions_client.ExtensionsClient,
             snapshots_client.SnapshotsClient,
             volumes_client.VolumesClient,
-            volume_v2_hosts_client.VolumeHostsV2Client,
-            volume_v2_quotas_client.VolumeQuotasV2Client,
-            volume_v2_services_client.VolumesServicesV2Client,
-            volume_v2_types_client.VolumeTypesV2Client,
-            volume_v2_az_client.VolumeV2AvailabilityZoneClient,
-            volume_v2_backups_client.BackupsClientV2,
-            volume_v2_extensions_client.ExtensionsV2Client,
-            volume_v2_qos_client.QosSpecsV2Client,
-            volume_v2_snapshots_client.SnapshotsV2Client,
-            volume_v2_volumes_client.VolumesV2Client,
+            volume_v2_hosts_client.HostsClient,
+            volume_v2_quotas_client.QuotasClient,
+            volume_v2_services_client.ServicesClient,
+            volume_v2_types_client.TypesClient,
+            volume_v2_az_client.AvailabilityZoneClient,
+            volume_v2_backups_client.BackupsClient,
+            volume_v2_extensions_client.ExtensionsClient,
+            volume_v2_qos_client.QosSpecsClient,
+            volume_v2_snapshots_client.SnapshotsClient,
+            volume_v2_volumes_client.VolumesClient,
             identity_v2_identity_client.IdentityClient,
             credentials_client.CredentialsClient,
             endpoints_client.EndPointClient,
             identity_v3_identity_client.IdentityV3Client,
-            policy_client.PolicyClient,
-            region_client.RegionClient,
-            service_client.ServiceClient,
-            image_client.ImageClient,
-            image_v2_client.ImageClientV2
+            policies_client.PoliciesClient,
+            regions_client.RegionsClient,
+            services_client.ServicesClient,
+            images_client.ImagesClient,
+            images_v2_client.ImagesClientV2
         ]
 
         for client in test_clients:
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 68a8295..c7cc638 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -18,7 +18,7 @@
 
 from tempest.common import waiters
 from tempest import exceptions
-from tempest.services.volume.json import volumes_client
+from tempest.services.volume.base import base_volumes_client
 from tempest.tests import base
 
 
@@ -53,7 +53,7 @@
     def test_wait_for_volume_status_error_restoring(self, mock_sleep):
         # Tests that the wait method raises VolumeRestoreErrorException if
         # the volume status is 'error_restoring'.
-        client = mock.Mock(spec=volumes_client.BaseVolumesClient,
+        client = mock.Mock(spec=base_volumes_client.BaseVolumesClient,
                            build_interval=1)
         volume1 = {'volume': {'status': 'restoring-backup'}}
         volume2 = {'volume': {'status': 'error_restoring'}}
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index 3ff8e0d..9c2b99e 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -29,7 +29,7 @@
         self.useFixture(fake_config.ConfigFixture())
         self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
         cfg.CONF.set_default('ip_version_for_ssh', 4, group='validation')
-        cfg.CONF.set_default('network_for_ssh', 'public', group='compute')
+        cfg.CONF.set_default('network_for_ssh', 'public', group='validation')
         cfg.CONF.set_default('connect_timeout', 1, group='validation')
 
         self.conn = remote_client.RemoteClient('127.0.0.1', 'user', 'pass')
@@ -79,7 +79,7 @@
     def test_get_number_of_vcpus(self):
         self.ssh_mock.mock.exec_command.return_value = '16'
         self.assertEqual(self.conn.get_number_of_vcpus(), 16)
-        self._assert_exec_called_with('grep -c processor /proc/cpuinfo')
+        self._assert_exec_called_with('grep -c ^processor /proc/cpuinfo')
 
     def test_get_partitions(self):
         proc_partitions = """major minor  #blocks  name
@@ -146,8 +146,11 @@
         self._assert_exec_called_with(
             "sudo ip addr add %s/%s dev %s" % (ip, '28', nic))
 
-    def test_turn_nic_on(self):
+    def test_set_nic_state(self):
         nic = 'eth0'
-        self.conn.turn_nic_on(nic)
+        self.conn.set_nic_state(nic)
         self._assert_exec_called_with(
             'sudo ip link set %s up' % nic)
+        self.conn.set_nic_state(nic, "down")
+        self._assert_exec_called_with(
+            'sudo ip link set %s down' % nic)
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index ca8bc3e..c45f6da 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -24,6 +24,7 @@
 class ConfigFixture(conf_fixture.Config):
 
     def __init__(self):
+        cfg.CONF([], default_config_files=[])
         config.register_opts()
         super(ConfigFixture, self).__init__()
 
@@ -59,6 +60,5 @@
 
 class FakePrivate(config.TempestConfigPrivate):
     def __init__(self, parse_conf=True, config_path=None):
-        cfg.CONF([], default_config_files=[])
         self._set_attrs()
-        self.lock_path = cfg.CONF.lock_path
+        self.lock_path = cfg.CONF.oslo_concurrency.lock_path
diff --git a/tempest/tests/fake_http.py b/tempest/tests/fake_http.py
index 7d77484..d714055 100644
--- a/tempest/tests/fake_http.py
+++ b/tempest/tests/fake_http.py
@@ -50,7 +50,8 @@
 class fake_httplib(object):
     def __init__(self, headers, body=None,
                  version=1.0, status=200, reason="Ok"):
-        """
+        """Initialization of fake httplib
+
         :param headers: dict representing HTTP response headers
         :param body: file-like object
         :param version: HTTP Version
diff --git a/tempest/tests/services/compute/test_base_compute_client.py b/tempest/tests/services/compute/test_base_compute_client.py
new file mode 100644
index 0000000..7a55cdb
--- /dev/null
+++ b/tempest/tests/services/compute/test_base_compute_client.py
@@ -0,0 +1,136 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib2
+from oslotest import mockpatch
+from tempest_lib.common import rest_client
+
+from tempest import exceptions
+from tempest.services.compute.json import base_compute_client
+from tempest.tests import fake_auth_provider
+from tempest.tests.services.compute import base
+
+
+class TestMicroversionHeaderCheck(base.BaseComputeServiceTest):
+
+    def setUp(self):
+        super(TestMicroversionHeaderCheck, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = base_compute_client.BaseComputeClient(
+            fake_auth, 'compute', 'regionOne')
+        self.client.set_api_microversion('2.2')
+
+    def _check_microverion_header_in_response(self, fake_response):
+        def request(*args, **kwargs):
+            return (httplib2.Response(fake_response), {})
+
+        self.useFixture(mockpatch.PatchObject(
+            rest_client.RestClient,
+            'request',
+            side_effect=request))
+
+    def test_correct_microverion_in_response(self):
+        fake_response = {self.client.api_microversion_header_name: '2.2'}
+        self._check_microverion_header_in_response(fake_response)
+        self.client.get('fake_url')
+
+    def test_incorrect_microverion_in_response(self):
+        fake_response = {self.client.api_microversion_header_name: '2.3'}
+        self._check_microverion_header_in_response(fake_response)
+        self.assertRaises(exceptions.InvalidHTTPResponseHeader,
+                          self.client.get, 'fake_url')
+
+    def test_no_microverion_header_in_response(self):
+        self._check_microverion_header_in_response({})
+        self.assertRaises(exceptions.InvalidHTTPResponseHeader,
+                          self.client.get, 'fake_url')
+
+
+class DummyServiceClient1(base_compute_client.BaseComputeClient):
+    schema_versions_info = [
+        {'min': None, 'max': '2.1', 'schema': 'schemav21'},
+        {'min': '2.2', 'max': '2.9', 'schema': 'schemav22'},
+        {'min': '2.10', 'max': None, 'schema': 'schemav210'}]
+
+    def return_selected_schema(self):
+        return self.get_schema(self.schema_versions_info)
+
+
+class TestSchemaVersionsNone(base.BaseComputeServiceTest):
+    api_microversion = None
+    expected_schema = 'schemav21'
+
+    def setUp(self):
+        super(TestSchemaVersionsNone, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = DummyServiceClient1(fake_auth, 'compute', 'regionOne')
+        self.client.api_microversion = self.api_microversion
+
+    def test_schema(self):
+        self.assertEqual(self.expected_schema,
+                         self.client.return_selected_schema())
+
+
+class TestSchemaVersionsV21(TestSchemaVersionsNone):
+    api_microversion = '2.1'
+    expected_schema = 'schemav21'
+
+
+class TestSchemaVersionsV22(TestSchemaVersionsNone):
+    api_microversion = '2.2'
+    expected_schema = 'schemav22'
+
+
+class TestSchemaVersionsV25(TestSchemaVersionsNone):
+    api_microversion = '2.5'
+    expected_schema = 'schemav22'
+
+
+class TestSchemaVersionsV29(TestSchemaVersionsNone):
+    api_microversion = '2.9'
+    expected_schema = 'schemav22'
+
+
+class TestSchemaVersionsV210(TestSchemaVersionsNone):
+    api_microversion = '2.10'
+    expected_schema = 'schemav210'
+
+
+class TestSchemaVersionsLatest(TestSchemaVersionsNone):
+    api_microversion = 'latest'
+    expected_schema = 'schemav210'
+
+
+class DummyServiceClient2(base_compute_client.BaseComputeClient):
+    schema_versions_info = [
+        {'min': None, 'max': '2.1', 'schema': 'schemav21'},
+        {'min': '2.2', 'max': '2.9', 'schema': 'schemav22'}]
+
+    def return_selected_schema(self):
+        return self.get_schema(self.schema_versions_info)
+
+
+class TestSchemaVersionsNotFound(base.BaseComputeServiceTest):
+    api_microversion = '2.10'
+    expected_schema = 'schemav210'
+
+    def setUp(self):
+        super(TestSchemaVersionsNotFound, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = DummyServiceClient2(fake_auth, 'compute', 'regionOne')
+        self.client.api_microversion = self.api_microversion
+
+    def test_schema(self):
+        self.assertRaises(exceptions.JSONSchemaNotFound,
+                          self.client.return_selected_schema)
diff --git a/tempest/tests/services/compute/test_floating_ips_client.py b/tempest/tests/services/compute/test_floating_ips_client.py
deleted file mode 100644
index ee22004..0000000
--- a/tempest/tests/services/compute/test_floating_ips_client.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslotest import mockpatch
-from tempest_lib import exceptions as lib_exc
-
-from tempest.services.compute.json import floating_ips_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestFloatingIpsClient(base.BaseComputeServiceTest):
-
-    floating_ip = {"fixed_ip": None,
-                   "id": "46d61064-13ba-4bf0-9557-69de824c3d6f",
-                   "instance_id": "a1daa443-a6bb-463e-aea2-104b7d912eb8",
-                   "ip": "10.10.10.1",
-                   "pool": "nova"}
-
-    def setUp(self):
-        super(TestFloatingIpsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = floating_ips_client.FloatingIPsClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_list_floating_ips(self, bytes_body=False):
-        expected = {'floating_ips': [TestFloatingIpsClient.floating_ip]}
-        self.check_service_client_function(
-            self.client.list_floating_ips,
-            'tempest.common.service_client.ServiceClient.get',
-            expected,
-            bytes_body)
-
-    def test_list_floating_ips_str_body(self):
-        self._test_list_floating_ips(bytes_body=False)
-
-    def test_list_floating_ips_byte_body(self):
-        self._test_list_floating_ips(bytes_body=True)
-
-    def _test_show_floating_ip(self, bytes_body=False):
-        expected = {"floating_ip": TestFloatingIpsClient.floating_ip}
-        self.check_service_client_function(
-            self.client.show_floating_ip,
-            'tempest.common.service_client.ServiceClient.get',
-            expected,
-            bytes_body,
-            floating_ip_id='a1daa443-a6bb-463e-aea2-104b7d912eb8')
-
-    def test_show_floating_ip_str_body(self):
-        self._test_show_floating_ip(bytes_body=False)
-
-    def test_show_floating_ip_byte_body(self):
-        self._test_show_floating_ip(bytes_body=True)
-
-    def _test_create_floating_ip(self, bytes_body=False):
-        expected = {"floating_ip": TestFloatingIpsClient.floating_ip}
-        self.check_service_client_function(
-            self.client.create_floating_ip,
-            'tempest.common.service_client.ServiceClient.post',
-            expected,
-            bytes_body,
-            pool_name='nova')
-
-    def test_create_floating_ip_str_body(self):
-        self._test_create_floating_ip(bytes_body=False)
-
-    def test_create_floating_ip_byte_body(self):
-        self._test_create_floating_ip(bytes_body=True)
-
-    def test_delete_floating_ip(self):
-        self.check_service_client_function(
-            self.client.delete_floating_ip,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=202, floating_ip_id='fake-id')
-
-    def test_associate_floating_ip_to_server(self):
-        self.check_service_client_function(
-            self.client.associate_floating_ip_to_server,
-            'tempest.common.service_client.ServiceClient.post',
-            {}, status=202, floating_ip='10.10.10.1',
-            server_id='c782b7a9-33cd-45f0-b795-7f87f456408b')
-
-    def test_disassociate_floating_ip_from_server(self):
-        self.check_service_client_function(
-            self.client.disassociate_floating_ip_from_server,
-            'tempest.common.service_client.ServiceClient.post',
-            {}, status=202, floating_ip='10.10.10.1',
-            server_id='c782b7a9-33cd-45f0-b795-7f87f456408b')
-
-    def test_is_resource_deleted_true(self):
-        self.useFixture(mockpatch.Patch(
-            'tempest.services.compute.json.floating_ips_client.'
-            'FloatingIPsClient.show_floating_ip',
-            side_effect=lib_exc.NotFound()))
-        self.assertTrue(self.client.is_resource_deleted('fake-id'))
-
-    def test_is_resource_deleted_false(self):
-        self.useFixture(mockpatch.Patch(
-            'tempest.services.compute.json.floating_ips_client.'
-            'FloatingIPsClient.show_floating_ip',
-            return_value={"floating_ip": TestFloatingIpsClient.floating_ip}))
-        self.assertFalse(self.client.is_resource_deleted('fake-id'))
diff --git a/tempest/tests/services/compute/test_images_client.py b/tempest/tests/services/compute/test_images_client.py
deleted file mode 100644
index 1d532b7..0000000
--- a/tempest/tests/services/compute/test_images_client.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from oslotest import mockpatch
-from tempest_lib import exceptions as lib_exc
-
-from tempest.services.compute.json import images_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestImagesClient(base.BaseComputeServiceTest):
-    # Data Dictionaries used for testing #
-    FAKE_IMAGE_METADATA = {
-        "list":
-            {"metadata": {
-             "auto_disk_config": "True",
-             "Label": "Changed"
-             }},
-        "set_item":
-            {"meta": {
-             "auto_disk_config": "True"
-             }},
-        "show_item":
-            {"meta": {
-             "kernel_id": "nokernel",
-             }},
-        "update":
-            {"metadata": {
-             "kernel_id": "False",
-             "Label": "UpdatedImage"
-             }},
-        "set":
-            {"metadata": {
-             "Label": "Changed",
-             "auto_disk_config": "True"
-             }},
-        "delete_item": {}
-        }
-
-    FAKE_IMAGE_DATA = {
-        "list":
-            {"images": [
-             {"id": "70a599e0-31e7-49b7-b260-868f441e862b",
-              "links": [
-                    {"href": "http://openstack.example.com/v2/openstack" +
-                             "/images/70a599e0-31e7-49b7-b260-868f441e862b",
-                     "rel": "self"
-                     }
-              ],
-              "name": "fakeimage7"
-              }]},
-        "show": {"image": {
-            "created": "2011-01-01T01:02:03Z",
-            "id": "70a599e0-31e7-49b7-b260-868f441e862b",
-            "links": [
-                {
-                    "href": "http://openstack.example.com/v2/openstack" +
-                            "/images/70a599e0-31e7-49b7-b260-868f441e862b",
-                    "rel": "self"
-                },
-            ],
-            "metadata": {
-                "architecture": "x86_64",
-                "auto_disk_config": "True",
-                "kernel_id": "nokernel",
-                "ramdisk_id": "nokernel"
-            },
-            "minDisk": 0,
-            "minRam": 0,
-            "name": "fakeimage7",
-            "progress": 100,
-            "status": "ACTIVE",
-            "updated": "2011-01-01T01:02:03Z"}},
-        "delete": {}
-        }
-    func2mock = {
-        'get': 'tempest.common.service_client.ServiceClient.get',
-        'post': 'tempest.common.service_client.ServiceClient.post',
-        'put': 'tempest.common.service_client.ServiceClient.put',
-        'delete': 'tempest.common.service_client.ServiceClient.delete'}
-    # Variable definition
-    FAKE_IMAGE_ID = FAKE_IMAGE_DATA['show']['image']['id']
-    FAKE_CREATE_INFO = {'location': 'None'}
-    FAKE_METADATA = FAKE_IMAGE_METADATA['show_item']['meta']
-
-    def setUp(self):
-        super(TestImagesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = images_client.ImagesClient(fake_auth,
-                                                 "compute", "regionOne")
-
-    def _test_image_operation(self, operation="delete", bytes_body=False):
-        response_code = 200
-        mock_operation = self.func2mock['get']
-        expected_op = self.FAKE_IMAGE_DATA[operation]
-        params = {"image_id": self.FAKE_IMAGE_ID}
-        if operation == 'list':
-            function = self.client.list_images
-        elif operation == 'show':
-            function = self.client.show_image
-        else:
-            function = self.client.delete_image
-            mock_operation = self.func2mock['delete']
-            response_code = 204
-
-        self.check_service_client_function(
-            function, mock_operation, expected_op,
-            bytes_body, response_code, **params)
-
-    def _test_image_metadata(self, operation="set_item", bytes_body=False):
-        response_code = 200
-        expected_op = self.FAKE_IMAGE_METADATA[operation]
-        if operation == 'list':
-            function = self.client.list_image_metadata
-            mock_operation = self.func2mock['get']
-            params = {"image_id": self.FAKE_IMAGE_ID}
-
-        elif operation == 'set':
-            function = self.client.set_image_metadata
-            mock_operation = self.func2mock['put']
-            params = {"image_id": "_dummy_data",
-                      "meta": self.FAKE_METADATA}
-
-        elif operation == 'update':
-            function = self.client.update_image_metadata
-            mock_operation = self.func2mock['post']
-            params = {"image_id": self.FAKE_IMAGE_ID,
-                      "meta": self.FAKE_METADATA}
-
-        elif operation == 'show_item':
-            mock_operation = self.func2mock['get']
-            function = self.client.show_image_metadata_item
-            params = {"image_id": self.FAKE_IMAGE_ID,
-                      "key": "123"}
-
-        elif operation == 'delete_item':
-            function = self.client.delete_image_metadata_item
-            mock_operation = self.func2mock['delete']
-            response_code = 204
-            params = {"image_id": self.FAKE_IMAGE_ID,
-                      "key": "123"}
-
-        else:
-            function = self.client.set_image_metadata_item
-            mock_operation = self.func2mock['put']
-            params = {"image_id": self.FAKE_IMAGE_ID,
-                      "key": "123",
-                      "meta": self.FAKE_METADATA}
-
-        self.check_service_client_function(
-            function, mock_operation, expected_op,
-            bytes_body, response_code, **params)
-
-    def _test_resource_deleted(self, bytes_body=False):
-        params = {"id": self.FAKE_IMAGE_ID}
-        expected_op = self.FAKE_IMAGE_DATA['show']['image']
-        self.useFixture(mockpatch.Patch('tempest.services.compute.json'
-                        '.images_client.ImagesClient.show_image',
-                                        side_effect=lib_exc.NotFound))
-        self.assertEqual(True, self.client.is_resource_deleted(**params))
-        tempdata = copy.deepcopy(self.FAKE_IMAGE_DATA['show'])
-        tempdata['image']['id'] = None
-        self.useFixture(mockpatch.Patch('tempest.services.compute.json'
-                        '.images_client.ImagesClient.show_image',
-                                        return_value=expected_op))
-        self.assertEqual(False, self.client.is_resource_deleted(**params))
-
-    def test_list_images_with_str_body(self):
-        self._test_image_operation('list')
-
-    def test_list_images_with_bytes_body(self):
-        self._test_image_operation('list', True)
-
-    def test_show_image_with_str_body(self):
-        self._test_image_operation('show')
-
-    def test_show_image_with_bytes_body(self):
-        self._test_image_operation('show', True)
-
-    def test_delete_image_with_str_body(self):
-        self._test_image_operation('delete')
-
-    def test_delete_image_with_bytes_body(self):
-        self._test_image_operation('delete', True)
-
-    def test_list_image_metadata_with_str_body(self):
-        self._test_image_metadata('list')
-
-    def test_list_image_metadata_with_bytes_body(self):
-        self._test_image_metadata('list', True)
-
-    def test_set_image_metadata_with_str_body(self):
-        self._test_image_metadata('set')
-
-    def test_set_image_metadata_with_bytes_body(self):
-        self._test_image_metadata('set', True)
-
-    def test_update_image_metadata_with_str_body(self):
-        self._test_image_metadata('update')
-
-    def test_update_image_metadata_with_bytes_body(self):
-        self._test_image_metadata('update', True)
-
-    def test_set_image_metadata_item_with_str_body(self):
-        self._test_image_metadata()
-
-    def test_set_image_metadata_item_with_bytes_body(self):
-        self._test_image_metadata(bytes_body=True)
-
-    def test_show_image_metadata_item_with_str_body(self):
-        self._test_image_metadata('show_item')
-
-    def test_show_image_metadata_item_with_bytes_body(self):
-        self._test_image_metadata('show_item', True)
-
-    def test_delete_image_metadata_item_with_str_body(self):
-        self._test_image_metadata('delete_item')
-
-    def test_delete_image_metadata_item_with_bytes_body(self):
-        self._test_image_metadata('delete_item', True)
-
-    def test_resource_delete_with_str_body(self):
-        self._test_resource_deleted()
-
-    def test_resource_delete_with_bytes_body(self):
-        self._test_resource_deleted(True)
diff --git a/tempest/tests/services/compute/test_instance_usage_audit_log_client.py b/tempest/tests/services/compute/test_instance_usage_audit_log_client.py
deleted file mode 100644
index b4af9d5..0000000
--- a/tempest/tests/services/compute/test_instance_usage_audit_log_client.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import datetime
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import instance_usage_audit_log_client
-from tempest.tests.services.compute import base
-
-
-class TestInstanceUsagesAuditLogClient(base.BaseComputeServiceTest):
-
-    FAKE_AUDIT_LOG = {
-        "hosts_not_run": [
-            "f4eb7cfd155f4574967f8b55a7faed75"
-        ],
-        "log": {},
-        "num_hosts": 1,
-        "num_hosts_done": 0,
-        "num_hosts_not_run": 1,
-        "num_hosts_running": 0,
-        "overall_status": "0 of 1 hosts done. 0 errors.",
-        "period_beginning": "2012-12-01 00:00:00",
-        "period_ending": "2013-01-01 00:00:00",
-        "total_errors": 0,
-        "total_instances": 0
-    }
-
-    def setUp(self):
-        super(TestInstanceUsagesAuditLogClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = (instance_usage_audit_log_client.
-                       InstanceUsagesAuditLogClient(fake_auth, 'compute',
-                                                    'regionOne'))
-
-    def _test_list_instance_usage_audit_logs(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_instance_usage_audit_logs,
-            'tempest.common.service_client.ServiceClient.get',
-            {"instance_usage_audit_logs": self.FAKE_AUDIT_LOG},
-            bytes_body)
-
-    def test_list_instance_usage_audit_logs_with_str_body(self):
-        self._test_list_instance_usage_audit_logs()
-
-    def test_list_instance_usage_audit_logs_with_bytes_body(self):
-        self._test_list_instance_usage_audit_logs(bytes_body=True)
-
-    def _test_show_instance_usage_audit_log(self, bytes_body=False):
-        before_time = datetime.datetime(2012, 12, 1, 0, 0)
-        self.check_service_client_function(
-            self.client.show_instance_usage_audit_log,
-            'tempest.common.service_client.ServiceClient.get',
-            {"instance_usage_audit_log": self.FAKE_AUDIT_LOG},
-            bytes_body,
-            time_before=before_time)
-
-    def test_show_instance_usage_audit_log_with_str_body(self):
-        self._test_show_instance_usage_audit_log()
-
-    def test_show_network_with_bytes_body_with_bytes_body(self):
-        self._test_show_instance_usage_audit_log(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_interfaces_client.py b/tempest/tests/services/compute/test_interfaces_client.py
deleted file mode 100644
index 235585a..0000000
--- a/tempest/tests/services/compute/test_interfaces_client.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.services.compute.json import interfaces_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestInterfacesClient(base.BaseComputeServiceTest):
-    # Data Values to be used for testing #
-    FAKE_INTERFACE_DATA = {
-        "fixed_ips": [{
-            "ip_address": "192.168.1.1",
-            "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
-            }],
-        "mac_addr": "fa:16:3e:4c:2c:30",
-        "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
-        "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
-        "port_state": "ACTIVE"}
-
-    FAKE_SHOW_DATA = {
-        "interfaceAttachment": FAKE_INTERFACE_DATA}
-    FAKE_LIST_DATA = {
-        "interfaceAttachments": [FAKE_INTERFACE_DATA]}
-
-    FAKE_SERVER_ID = "ec14c864-096e-4e27-bb8a-2c2b4dc6f3f5"
-    FAKE_PORT_ID = FAKE_SHOW_DATA['interfaceAttachment']['port_id']
-    func2mock = {
-        'delete': 'tempest.common.service_client.ServiceClient.delete',
-        'get': 'tempest.common.service_client.ServiceClient.get',
-        'post': 'tempest.common.service_client.ServiceClient.post'}
-
-    def setUp(self):
-        super(TestInterfacesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = interfaces_client.InterfacesClient(fake_auth,
-                                                         "compute",
-                                                         "regionOne")
-
-    def _test_interface_operation(self, operation="create", bytes_body=False):
-        response_code = 200
-        expected_op = self.FAKE_SHOW_DATA
-        mock_operation = self.func2mock['get']
-        params = {'server_id': self.FAKE_SERVER_ID,
-                  'port_id': self.FAKE_PORT_ID}
-        if operation == 'list':
-            expected_op = self.FAKE_LIST_DATA
-            function = self.client.list_interfaces
-            params = {'server_id': self.FAKE_SERVER_ID}
-        elif operation == 'show':
-            function = self.client.show_interface
-        elif operation == 'delete':
-            expected_op = {}
-            mock_operation = self.func2mock['delete']
-            function = self.client.delete_interface
-            response_code = 202
-        else:
-            function = self.client.create_interface
-            mock_operation = self.func2mock['post']
-
-        self.check_service_client_function(
-            function, mock_operation, expected_op,
-            bytes_body, response_code, **params)
-
-    def test_list_interfaces_with_str_body(self):
-        self._test_interface_operation('list')
-
-    def test_list_interfaces_with_bytes_body(self):
-        self._test_interface_operation('list', True)
-
-    def test_show_interface_with_str_body(self):
-        self._test_interface_operation('show')
-
-    def test_show_interface_with_bytes_body(self):
-        self._test_interface_operation('show', True)
-
-    def test_delete_interface_with_str_body(self):
-        self._test_interface_operation('delete')
-
-    def test_delete_interface_with_bytes_body(self):
-        self._test_interface_operation('delete', True)
-
-    def test_create_interface_with_str_body(self):
-        self._test_interface_operation()
-
-    def test_create_interface_with_bytes_body(self):
-        self._test_interface_operation(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_keypairs_client.py b/tempest/tests/services/compute/test_keypairs_client.py
index 8b1a9a8..03aee53 100644
--- a/tempest/tests/services/compute/test_keypairs_client.py
+++ b/tempest/tests/services/compute/test_keypairs_client.py
@@ -38,7 +38,7 @@
     def _test_list_keypairs(self, bytes_body=False):
         self.check_service_client_function(
             self.client.list_keypairs,
-            'tempest.common.service_client.ServiceClient.get',
+            'tempest_lib.common.rest_client.RestClient.get',
             {"keypairs": []},
             bytes_body)
 
@@ -60,7 +60,7 @@
 
         self.check_service_client_function(
             self.client.show_keypair,
-            'tempest.common.service_client.ServiceClient.get',
+            'tempest_lib.common.rest_client.RestClient.get',
             fake_keypair,
             bytes_body,
             keypair_name="test")
@@ -77,7 +77,7 @@
 
         self.check_service_client_function(
             self.client.create_keypair,
-            'tempest.common.service_client.ServiceClient.post',
+            'tempest_lib.common.rest_client.RestClient.post',
             fake_keypair,
             bytes_body,
             name="test")
@@ -91,5 +91,5 @@
     def test_delete_keypair(self):
         self.check_service_client_function(
             self.client.delete_keypair,
-            'tempest.common.service_client.ServiceClient.delete',
+            'tempest_lib.common.rest_client.RestClient.delete',
             {}, status=202, keypair_name='test')
diff --git a/tempest/tests/services/compute/test_limits_client.py b/tempest/tests/services/compute/test_limits_client.py
deleted file mode 100644
index 733d3d1..0000000
--- a/tempest/tests/services/compute/test_limits_client.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import limits_client
-from tempest.tests.services.compute import base
-
-
-class TestLimitsClient(base.BaseComputeServiceTest):
-
-    def setUp(self):
-        super(TestLimitsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = limits_client.LimitsClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_show_limits(self, bytes_body=False):
-        expected = {
-            "limits": {
-                "rate": [],
-                "absolute": {
-                    "maxServerMeta": 128,
-                    "maxPersonality": 5,
-                    "totalServerGroupsUsed": 0,
-                    "maxImageMeta": 128,
-                    "maxPersonalitySize": 10240,
-                    "maxServerGroups": 10,
-                    "maxSecurityGroupRules": 20,
-                    "maxTotalKeypairs": 100,
-                    "totalCoresUsed": 0,
-                    "totalRAMUsed": 0,
-                    "totalInstancesUsed": 0,
-                    "maxSecurityGroups": 10,
-                    "totalFloatingIpsUsed": 0,
-                    "maxTotalCores": 20,
-                    "totalSecurityGroupsUsed": 0,
-                    "maxTotalFloatingIps": 10,
-                    "maxTotalInstances": 10,
-                    "maxTotalRAMSize": 51200,
-                    "maxServerGroupMembers": 10
-                    }
-            }
-        }
-
-        self.check_service_client_function(
-            self.client.show_limits,
-            'tempest.common.service_client.ServiceClient.get',
-            expected,
-            bytes_body)
-
-    def test_show_limits_with_str_body(self):
-        self._test_show_limits()
-
-    def test_show_limits_with_bytes_body(self):
-        self._test_show_limits(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_migrations_client.py b/tempest/tests/services/compute/test_migrations_client.py
deleted file mode 100644
index 55f2ef2..0000000
--- a/tempest/tests/services/compute/test_migrations_client.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import migrations_client
-from tempest.tests.services.compute import base
-
-
-class TestMigrationsClient(base.BaseComputeServiceTest):
-    FAKE_MIGRATION_INFO = {"migrations": [{
-        "created_at": "2012-10-29T13:42:02",
-        "dest_compute": "compute2",
-        "dest_host": "1.2.3.4",
-        "dest_node": "node2",
-        "id": 1234,
-        "instance_uuid": "e9e4fdd7-f956-44ff-bfeb-d654a96ab3a2",
-        "new_instance_type_id": 2,
-        "old_instance_type_id": 1,
-        "source_compute": "compute1",
-        "source_node": "node1",
-        "status": "finished",
-        "updated_at": "2012-10-29T13:42:02"}]}
-
-    def setUp(self):
-        super(TestMigrationsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.mg_client_obj = migrations_client.MigrationsClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_list_migrations(self, bytes_body=False):
-        self.check_service_client_function(
-            self.mg_client_obj.list_migrations,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_MIGRATION_INFO,
-            bytes_body)
-
-    def test_list_migration_with_str_body(self):
-        self._test_list_migrations()
-
-    def test_list_migration_with_bytes_body(self):
-        self._test_list_migrations(True)
diff --git a/tempest/tests/services/compute/test_networks_client.py b/tempest/tests/services/compute/test_networks_client.py
deleted file mode 100644
index cec8262..0000000
--- a/tempest/tests/services/compute/test_networks_client.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import networks_client
-from tempest.tests.services.compute import base
-
-
-class TestNetworksClient(base.BaseComputeServiceTest):
-
-    FAKE_NETWORK = {
-        "bridge": None,
-        "vpn_public_port": None,
-        "dhcp_start": None,
-        "bridge_interface": None,
-        "share_address": None,
-        "updated_at": None,
-        "id": "34d5ae1e-5659-49cf-af80-73bccd7d7ad3",
-        "cidr_v6": None,
-        "deleted_at": None,
-        "gateway": None,
-        "rxtx_base": None,
-        "label": u'30d7',
-        "priority": None,
-        "project_id": None,
-        "vpn_private_address": None,
-        "deleted": None,
-        "vlan": None,
-        "broadcast": None,
-        "netmask": None,
-        "injected": None,
-        "cidr": None,
-        "vpn_public_address": None,
-        "multi_host": None,
-        "enable_dhcp": None,
-        "dns2": None,
-        "created_at": None,
-        "host": None,
-        "mtu": None,
-        "gateway_v6": None,
-        "netmask_v6": None,
-        "dhcp_server": None,
-        "dns1": None
-        }
-
-    network_id = "34d5ae1e-5659-49cf-af80-73bccd7d7ad3"
-
-    FAKE_NETWORKS = [FAKE_NETWORK]
-
-    def setUp(self):
-        super(TestNetworksClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = networks_client.NetworksClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_list_networks(self, bytes_body=False):
-        fake_list = {"networks": self.FAKE_NETWORKS}
-        self.check_service_client_function(
-            self.client.list_networks,
-            'tempest.common.service_client.ServiceClient.get',
-            fake_list,
-            bytes_body)
-
-    def test_list_networks_with_str_body(self):
-        self._test_list_networks()
-
-    def test_list_networks_with_bytes_body(self):
-        self._test_list_networks(bytes_body=True)
-
-    def _test_show_network(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_network,
-            'tempest.common.service_client.ServiceClient.get',
-            {"network": self.FAKE_NETWORK},
-            bytes_body,
-            network_id=self.network_id
-            )
-
-    def test_show_network_with_str_body(self):
-        self._test_show_network()
-
-    def test_show_network_with_bytes_body(self):
-        self._test_show_network(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_quota_classes_client.py b/tempest/tests/services/compute/test_quota_classes_client.py
deleted file mode 100644
index 29800a2..0000000
--- a/tempest/tests/services/compute/test_quota_classes_client.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import quota_classes_client
-from tempest.tests.services.compute import base
-
-
-class TestQuotaClassesClient(base.BaseComputeServiceTest):
-
-    FAKE_QUOTA_CLASS_SET = {
-        "injected_file_content_bytes": 10240,
-        "metadata_items": 128,
-        "server_group_members": 10,
-        "server_groups": 10,
-        "ram": 51200,
-        "floating_ips": 10,
-        "key_pairs": 100,
-        "id": u'\u2740(*\xb4\u25e1`*)\u2740',
-        "instances": 10,
-        "security_group_rules": 20,
-        "security_groups": 10,
-        "injected_files": 5,
-        "cores": 20,
-        "fixed_ips": -1,
-        "injected_file_path_bytes": 255,
-        }
-
-    def setUp(self):
-        super(TestQuotaClassesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = quota_classes_client.QuotaClassesClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_show_quota_class_set(self, bytes_body=False):
-        fake_body = {'quota_class_set': self.FAKE_QUOTA_CLASS_SET}
-        self.check_service_client_function(
-            self.client.show_quota_class_set,
-            'tempest.common.service_client.ServiceClient.get',
-            fake_body,
-            bytes_body,
-            quota_class_id="test")
-
-    def test_show_quota_class_set_with_str_body(self):
-        self._test_show_quota_class_set()
-
-    def test_show_quota_class_set_with_bytes_body(self):
-        self._test_show_quota_class_set(bytes_body=True)
-
-    def test_update_quota_class_set(self):
-        fake_quota_class_set = copy.deepcopy(self.FAKE_QUOTA_CLASS_SET)
-        fake_quota_class_set.pop("id")
-        fake_body = {'quota_class_set': fake_quota_class_set}
-        self.check_service_client_function(
-            self.client.update_quota_class_set,
-            'tempest.common.service_client.ServiceClient.put',
-            fake_body,
-            quota_class_id="test")
diff --git a/tempest/tests/services/compute/test_quotas_client.py b/tempest/tests/services/compute/test_quotas_client.py
deleted file mode 100644
index 9a9d8fe..0000000
--- a/tempest/tests/services/compute/test_quotas_client.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import quotas_client
-from tempest.tests.services.compute import base
-
-
-class TestQuotasClient(base.BaseComputeServiceTest):
-
-    FAKE_QUOTA_SET = {
-        "quota_set": {
-            "injected_file_content_bytes": 10240,
-            "metadata_items": 128,
-            "server_group_members": 10,
-            "server_groups": 10,
-            "ram": 51200,
-            "floating_ips": 10,
-            "key_pairs": 100,
-            "id": "8421f7be61064f50b680465c07f334af",
-            "instances": 10,
-            "security_group_rules": 20,
-            "injected_files": 5,
-            "cores": 20,
-            "fixed_ips": -1,
-            "injected_file_path_bytes": 255,
-            "security_groups": 10}
-        }
-
-    project_id = "8421f7be61064f50b680465c07f334af"
-
-    def setUp(self):
-        super(TestQuotasClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = quotas_client.QuotasClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_show_quota_set(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_quota_set,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_QUOTA_SET,
-            to_utf=bytes_body,
-            tenant_id=self.project_id)
-
-    def test_show_quota_set_with_str_body(self):
-        self._test_show_quota_set()
-
-    def test_show_quota_set_with_bytes_body(self):
-        self._test_show_quota_set(bytes_body=True)
-
-    def _test_show_default_quota_set(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_default_quota_set,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_QUOTA_SET,
-            to_utf=bytes_body,
-            tenant_id=self.project_id)
-
-    def test_show_default_quota_set_with_str_body(self):
-        self._test_show_quota_set()
-
-    def test_show_default_quota_set_with_bytes_body(self):
-        self._test_show_quota_set(bytes_body=True)
-
-    def test_update_quota_set(self):
-        fake_quota_set = copy.deepcopy(self.FAKE_QUOTA_SET)
-        fake_quota_set['quota_set'].pop("id")
-        self.check_service_client_function(
-            self.client.update_quota_set,
-            'tempest.common.service_client.ServiceClient.put',
-            fake_quota_set,
-            tenant_id=self.project_id)
-
-    def test_delete_quota_set(self):
-        self.check_service_client_function(
-            self.client.delete_quota_set,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=202, tenant_id=self.project_id)
diff --git a/tempest/tests/services/compute/test_security_group_default_rules_client.py b/tempest/tests/services/compute/test_security_group_default_rules_client.py
deleted file mode 100644
index 99ab305..0000000
--- a/tempest/tests/services/compute/test_security_group_default_rules_client.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import security_group_default_rules_client
-from tempest.tests.services.compute import base
-
-
-class TestSecurityGroupDefaultRulesClient(base.BaseComputeServiceTest):
-    FAKE_RULE = {
-        "from_port": 80,
-        "id": 1,
-        "ip_protocol": "TCP",
-        "ip_range": {
-            "cidr": "10.10.10.0/24"
-        },
-        "to_port": 80
-    }
-
-    def setUp(self):
-        super(TestSecurityGroupDefaultRulesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = (security_group_default_rules_client.
-                       SecurityGroupDefaultRulesClient(fake_auth, 'compute',
-                                                       'regionOne'))
-
-    def _test_list_security_group_default_rules(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_security_group_default_rules,
-            'tempest.common.service_client.ServiceClient.get',
-            {"security_group_default_rules": [self.FAKE_RULE]},
-            to_utf=bytes_body)
-
-    def test_list_security_group_default_rules_with_str_body(self):
-        self._test_list_security_group_default_rules()
-
-    def test_list_security_group_default_rules_with_bytes_body(self):
-        self._test_list_security_group_default_rules(bytes_body=True)
-
-    def _test_show_security_group_default_rule(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_security_group_default_rule,
-            'tempest.common.service_client.ServiceClient.get',
-            {"security_group_default_rule": self.FAKE_RULE},
-            to_utf=bytes_body,
-            security_group_default_rule_id=1)
-
-    def test_show_security_group_default_rule_with_str_body(self):
-        self._test_show_security_group_default_rule()
-
-    def test_show_security_group_default_rule_with_bytes_body(self):
-        self._test_show_security_group_default_rule(bytes_body=True)
-
-    def _test_create_security_default_group_rule(self, bytes_body=False):
-        request_body = {
-            "to_port": 80,
-            "from_port": 80,
-            "ip_protocol": "TCP",
-            "cidr": "10.10.10.0/24"
-        }
-        self.check_service_client_function(
-            self.client.create_security_default_group_rule,
-            'tempest.common.service_client.ServiceClient.post',
-            {"security_group_default_rule": self.FAKE_RULE},
-            to_utf=bytes_body, **request_body)
-
-    def test_create_security_default_group_rule_with_str_body(self):
-        self._test_create_security_default_group_rule()
-
-    def test_create_security_default_group_rule_with_bytes_body(self):
-        self._test_create_security_default_group_rule(bytes_body=True)
-
-    def test_delete_security_group_default_rule(self):
-        self.check_service_client_function(
-            self.client.delete_security_group_default_rule,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=204, security_group_default_rule_id=1)
diff --git a/tempest/tests/services/compute/test_security_group_rules_client.py b/tempest/tests/services/compute/test_security_group_rules_client.py
deleted file mode 100644
index c182742..0000000
--- a/tempest/tests/services/compute/test_security_group_rules_client.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.services.compute.json import security_group_rules_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestSecurityGroupRulesClient(base.BaseComputeServiceTest):
-
-    FAKE_SECURITY_GROUP_RULE = {
-        "security_group_rule": {
-            "id": "2d021cf1-ce4b-4292-994f-7a785d62a144",
-            "ip_range": {
-                "cidr": "0.0.0.0/0"
-            },
-            "parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb",
-            "to_port": 443,
-            "ip_protocol": "tcp",
-            "group": {},
-            "from_port": 443
-        }
-    }
-
-    def setUp(self):
-        super(TestSecurityGroupRulesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = security_group_rules_client.SecurityGroupRulesClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_create_security_group_rule(self, bytes_body=False):
-        req_body = {
-            "from_port": "443",
-            "ip_protocol": "tcp",
-            "to_port": "443",
-            "cidr": "0.0.0.0/0",
-            "parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb"
-        }
-        self.check_service_client_function(
-            self.client.create_security_group_rule,
-            'tempest.common.service_client.ServiceClient.post',
-            self.FAKE_SECURITY_GROUP_RULE,
-            to_utf=bytes_body, **req_body)
-
-    def test_create_security_group_rule_with_str_body(self):
-        self._test_create_security_group_rule()
-
-    def test_create_security_group_rule_with_bytes_body(self):
-        self._test_create_security_group_rule(bytes_body=True)
-
-    def test_delete_security_group_rule(self):
-        self.check_service_client_function(
-            self.client.delete_security_group_rule,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=202, group_rule_id='group-id')
diff --git a/tempest/tests/services/compute/test_security_groups_client.py b/tempest/tests/services/compute/test_security_groups_client.py
deleted file mode 100644
index 9e40b96..0000000
--- a/tempest/tests/services/compute/test_security_groups_client.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslotest import mockpatch
-from tempest_lib import exceptions as lib_exc
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import security_groups_client
-from tempest.tests.services.compute import base
-
-
-class TestSecurityGroupsClient(base.BaseComputeServiceTest):
-
-    FAKE_SECURITY_GROUP_INFO = [{
-        "description": "default",
-        "id": "3fb26eb3-581b-4420-9963-b0879a026506",
-        "name": "default",
-        "rules": [],
-        "tenant_id": "openstack"
-    }]
-
-    def setUp(self):
-        super(TestSecurityGroupsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = security_groups_client.SecurityGroupsClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_list_security_groups(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_security_groups,
-            'tempest.common.service_client.ServiceClient.get',
-            {"security_groups": self.FAKE_SECURITY_GROUP_INFO},
-            to_utf=bytes_body)
-
-    def test_list_security_groups_with_str_body(self):
-        self._test_list_security_groups()
-
-    def test_list_security_groups_with_bytes_body(self):
-        self._test_list_security_groups(bytes_body=True)
-
-    def _test_show_security_group(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_security_group,
-            'tempest.common.service_client.ServiceClient.get',
-            {"security_group": self.FAKE_SECURITY_GROUP_INFO[0]},
-            to_utf=bytes_body,
-            security_group_id='fake-id')
-
-    def test_show_security_group_with_str_body(self):
-        self._test_show_security_group()
-
-    def test_show_security_group_with_bytes_body(self):
-        self._test_show_security_group(bytes_body=True)
-
-    def _test_create_security_group(self, bytes_body=False):
-        post_body = {"name": "test", "description": "test_group"}
-        self.check_service_client_function(
-            self.client.create_security_group,
-            'tempest.common.service_client.ServiceClient.post',
-            {"security_group": self.FAKE_SECURITY_GROUP_INFO[0]},
-            to_utf=bytes_body,
-            kwargs=post_body)
-
-    def test_create_security_group_with_str_body(self):
-        self._test_create_security_group()
-
-    def test_create_security_group_with_bytes_body(self):
-        self._test_create_security_group(bytes_body=True)
-
-    def _test_update_security_group(self, bytes_body=False):
-        req_body = {"name": "test", "description": "test_group"}
-        self.check_service_client_function(
-            self.client.update_security_group,
-            'tempest.common.service_client.ServiceClient.put',
-            {"security_group": self.FAKE_SECURITY_GROUP_INFO[0]},
-            to_utf=bytes_body,
-            security_group_id='fake-id',
-            kwargs=req_body)
-
-    def test_update_security_group_with_str_body(self):
-        self._test_update_security_group()
-
-    def test_update_security_group_with_bytes_body(self):
-        self._test_update_security_group(bytes_body=True)
-
-    def test_delete_security_group(self):
-        self.check_service_client_function(
-            self.client.delete_security_group,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=202, security_group_id='fake-id')
-
-    def test_is_resource_deleted_true(self):
-        mod = ('tempest.services.compute.json.security_groups_client.'
-               'SecurityGroupsClient.show_security_group')
-        self.useFixture(mockpatch.Patch(mod, side_effect=lib_exc.NotFound))
-        self.assertTrue(self.client.is_resource_deleted('fake-id'))
-
-    def test_is_resource_deleted_false(self):
-        mod = ('tempest.services.compute.json.security_groups_client.'
-               'SecurityGroupsClient.show_security_group')
-        self.useFixture(mockpatch.Patch(mod, return_value='success'))
-        self.assertFalse(self.client.is_resource_deleted('fake-id'))
diff --git a/tempest/tests/services/compute/test_server_groups_client.py b/tempest/tests/services/compute/test_server_groups_client.py
deleted file mode 100644
index 5e058d6..0000000
--- a/tempest/tests/services/compute/test_server_groups_client.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import httplib2
-
-from oslotest import mockpatch
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import server_groups_client
-from tempest.tests.services.compute import base
-
-
-class TestServerGroupsClient(base.BaseComputeServiceTest):
-
-    server_group = {
-        "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
-        "name": "test",
-        "policies": ["anti-affinity"],
-        "members": [],
-        "metadata": {}}
-
-    def setUp(self):
-        super(TestServerGroupsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = server_groups_client.ServerGroupsClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_create_server_group(self, bytes_body=False):
-        expected = {"server_group": TestServerGroupsClient.server_group}
-        self.check_service_client_function(
-            self.client.create_server_group,
-            'tempest.common.service_client.ServiceClient.post', expected,
-            bytes_body, name='fake-group', policies=['affinity'])
-
-    def test_create_server_group_str_body(self):
-        self._test_create_server_group(bytes_body=False)
-
-    def test_create_server_group_byte_body(self):
-        self._test_create_server_group(bytes_body=True)
-
-    def test_delete_server_group(self):
-        response = (httplib2.Response({'status': 204}), None)
-        self.useFixture(mockpatch.Patch(
-            'tempest.common.service_client.ServiceClient.delete',
-            return_value=response))
-        self.client.delete_server_group('fake-group')
-
-    def _test_list_server_groups(self, bytes_body=False):
-        expected = {"server_groups": [TestServerGroupsClient.server_group]}
-        self.check_service_client_function(
-            self.client.list_server_groups,
-            'tempest.common.service_client.ServiceClient.get',
-            expected, bytes_body)
-
-    def test_list_server_groups_str_body(self):
-        self._test_list_server_groups(bytes_body=False)
-
-    def test_list_server_groups_byte_body(self):
-        self._test_list_server_groups(bytes_body=True)
-
-    def _test_get_server_group(self, bytes_body=False):
-        expected = {"server_group": TestServerGroupsClient.server_group}
-        self.check_service_client_function(
-            self.client.get_server_group,
-            'tempest.common.service_client.ServiceClient.get',
-            expected, bytes_body,
-            server_group_id='5bbcc3c4-1da2-4437-a48a-66f15b1b13f9')
-
-    def test_get_server_group_str_body(self):
-        self._test_get_server_group(bytes_body=False)
-
-    def test_get_server_group_byte_body(self):
-        self._test_get_server_group(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_servers_client.py b/tempest/tests/services/compute/test_servers_client.py
deleted file mode 100644
index e347cf1..0000000
--- a/tempest/tests/services/compute/test_servers_client.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.services.compute.json import servers_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestServersClient(base.BaseComputeServiceTest):
-
-    FAKE_SERVERS = {'servers': [{
-        "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
-        "links": [
-            {
-                "href": "http://os.co/v2/616fb98f-46ca-475e-917e-2563e5a8cd19",
-                "rel": "self"
-            },
-            {
-                "href": "http://os.co/616fb98f-46ca-475e-917e-2563e5a8cd19",
-                "rel": "bookmark"
-            }
-        ],
-        "name": u"new\u1234-server-test"}]
-    }
-
-    FAKE_SERVER_GET = {'server': {
-        "accessIPv4": "",
-        "accessIPv6": "",
-        "addresses": {
-            "private": [
-                {
-                    "addr": "192.168.0.3",
-                    "version": 4
-                }
-            ]
-        },
-        "created": "2012-08-20T21:11:09Z",
-        "flavor": {
-            "id": "1",
-            "links": [
-                {
-                    "href": "http://os.com/openstack/flavors/1",
-                    "rel": "bookmark"
-                }
-            ]
-        },
-        "hostId": "65201c14a29663e06d0748e561207d998b343e1d164bfa0aafa9c45d",
-        "id": "893c7791-f1df-4c3d-8383-3caae9656c62",
-        "image": {
-            "id": "70a599e0-31e7-49b7-b260-868f441e862b",
-            "links": [
-                {
-                    "href": "http://imgs/70a599e0-31e7-49b7-b260-868f441e862b",
-                    "rel": "bookmark"
-                }
-            ]
-        },
-        "links": [
-            {
-                "href": "http://v2/srvs/893c7791-f1df-4c3d-8383-3caae9656c62",
-                "rel": "self"
-            },
-            {
-                "href": "http://srvs/893c7791-f1df-4c3d-8383-3caae9656c62",
-                "rel": "bookmark"
-            }
-        ],
-        "metadata": {
-            u"My Server N\u1234me": u"Apa\u1234che1"
-        },
-        "name": u"new\u1234-server-test",
-        "progress": 0,
-        "status": "ACTIVE",
-        "tenant_id": "openstack",
-        "updated": "2012-08-20T21:11:09Z",
-        "user_id": "fake"}
-    }
-
-    FAKE_SERVER_POST = {"server": {
-        "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
-        "adminPass": "fake-admin-pass",
-        "security_groups": [
-            'fake-security-group-1',
-            'fake-security-group-2'
-        ],
-        "links": [
-            {
-                "href": "http://os.co/v2/616fb98f-46ca-475e-917e-2563e5a8cd19",
-                "rel": "self"
-            },
-            {
-                "href": "http://os.co/616fb98f-46ca-475e-917e-2563e5a8cd19",
-                "rel": "bookmark"
-            }
-        ],
-        "OS-DCF:diskConfig": "fake-disk-config"}
-    }
-
-    FAKE_ADDRESS = {"addresses": {
-        "private": [
-            {
-                "addr": "192.168.0.3",
-                "version": 4
-            }
-        ]}
-    }
-
-    server_id = FAKE_SERVER_GET['server']['id']
-    network_id = 'a6b0875b-6b5d-4a5a-81eb-0c3aa62e5fdb'
-
-    def setUp(self):
-        super(TestServersClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = servers_client.ServersClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def test_list_servers_with_str_body(self):
-        self._test_list_servers()
-
-    def test_list_servers_with_bytes_body(self):
-        self._test_list_servers(bytes_body=True)
-
-    def _test_list_servers(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_servers,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_SERVERS,
-            bytes_body)
-
-    def test_show_server_with_str_body(self):
-        self._test_show_server()
-
-    def test_show_server_with_bytes_body(self):
-        self._test_show_server(bytes_body=True)
-
-    def _test_show_server(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_server,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_SERVER_GET,
-            bytes_body,
-            server_id=self.server_id
-            )
-
-    def test_delete_server(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.delete_server,
-            'tempest.common.service_client.ServiceClient.delete',
-            {},
-            status=204,
-            server_id=self.server_id
-            )
-
-    def test_create_server_with_str_body(self):
-        self._test_create_server()
-
-    def test_create_server_with_bytes_body(self):
-        self._test_create_server(True)
-
-    def _test_create_server(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.create_server,
-            'tempest.common.service_client.ServiceClient.post',
-            self.FAKE_SERVER_POST,
-            bytes_body,
-            status=202,
-            name='fake-name',
-            imageRef='fake-image-ref',
-            flavorRef='fake-flavor-ref'
-            )
-
-    def test_list_addresses_with_str_body(self):
-        self._test_list_addresses()
-
-    def test_list_addresses_with_bytes_body(self):
-        self._test_list_addresses(True)
-
-    def _test_list_addresses(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_addresses,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_ADDRESS,
-            bytes_body,
-            server_id=self.server_id
-            )
-
-    def test_list_addresses_by_network_with_str_body(self):
-        self._test_list_addresses_by_network()
-
-    def test_list_addresses_by_network_with_bytes_body(self):
-        self._test_list_addresses_by_network(True)
-
-    def _test_list_addresses_by_network(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_addresses_by_network,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_ADDRESS['addresses'],
-            bytes_body,
-            server_id=self.server_id,
-            network_id=self.network_id
-            )
diff --git a/tempest/tests/services/compute/test_services_client.py b/tempest/tests/services/compute/test_services_client.py
deleted file mode 100644
index fce28e8..0000000
--- a/tempest/tests/services/compute/test_services_client.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import services_client
-from tempest.tests.services.compute import base
-
-
-class TestServicesClient(base.BaseComputeServiceTest):
-
-    FAKE_SERVICES = {
-        "services":
-        [{
-            "status": "enabled",
-            "binary": "nova-conductor",
-            "zone": "internal",
-            "state": "up",
-            "updated_at": "2015-08-19T06:50:55.000000",
-            "host": "controller",
-            "disabled_reason": None,
-            "id": 1
-        }]
-    }
-
-    FAKE_SERVICE = {
-        "service":
-        {
-            "status": "enabled",
-            "binary": "nova-conductor",
-            "host": "controller"
-        }
-    }
-
-    def setUp(self):
-        super(TestServicesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = services_client.ServicesClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def test_list_services_with_str_body(self):
-        self.check_service_client_function(
-            self.client.list_services,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_SERVICES)
-
-    def test_list_services_with_bytes_body(self):
-        self.check_service_client_function(
-            self.client.list_services,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_SERVICES, to_utf=True)
-
-    def _test_enable_service(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.enable_service,
-            'tempest.common.service_client.ServiceClient.put',
-            self.FAKE_SERVICE,
-            bytes_body,
-            host_name="nova-conductor", binary="controller")
-
-    def test_enable_service_with_str_body(self):
-        self._test_enable_service()
-
-    def test_enable_service_with_bytes_body(self):
-        self._test_enable_service(bytes_body=True)
-
-    def _test_disable_service(self, bytes_body=False):
-        fake_service = copy.deepcopy(self.FAKE_SERVICE)
-        fake_service["service"]["status"] = "disable"
-
-        self.check_service_client_function(
-            self.client.disable_service,
-            'tempest.common.service_client.ServiceClient.put',
-            fake_service,
-            bytes_body,
-            host_name="nova-conductor", binary="controller")
-
-    def test_disable_service_with_str_body(self):
-        self._test_disable_service()
-
-    def test_disable_service_with_bytes_body(self):
-        self._test_disable_service(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_snapshots_client.py b/tempest/tests/services/compute/test_snapshots_client.py
deleted file mode 100644
index c24c6ae..0000000
--- a/tempest/tests/services/compute/test_snapshots_client.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslotest import mockpatch
-from tempest_lib import exceptions as lib_exc
-
-from tempest.services.compute.json import snapshots_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestSnapshotsClient(base.BaseComputeServiceTest):
-
-    FAKE_SNAPSHOT = {
-        "createdAt": "2015-10-02T16:27:54.724209",
-        "displayDescription": u"Another \u1234.",
-        "displayName": u"v\u1234-001",
-        "id": "100",
-        "size": 100,
-        "status": "available",
-        "volumeId": "12"
-    }
-
-    FAKE_SNAPSHOTS = {"snapshots": [FAKE_SNAPSHOT]}
-
-    def setUp(self):
-        super(TestSnapshotsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = snapshots_client.SnapshotsClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_create_snapshot(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.create_snapshot,
-            'tempest.common.service_client.ServiceClient.post',
-            {"snapshot": self.FAKE_SNAPSHOT},
-            to_utf=bytes_body, status=200,
-            volume_id=self.FAKE_SNAPSHOT["volumeId"])
-
-    def test_create_snapshot_with_str_body(self):
-        self._test_create_snapshot()
-
-    def test_create_shapshot_with_bytes_body(self):
-        self._test_create_snapshot(bytes_body=True)
-
-    def _test_show_snapshot(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_snapshot,
-            'tempest.common.service_client.ServiceClient.get',
-            {"snapshot": self.FAKE_SNAPSHOT},
-            to_utf=bytes_body, snapshot_id=self.FAKE_SNAPSHOT["id"])
-
-    def test_show_snapshot_with_str_body(self):
-        self._test_show_snapshot()
-
-    def test_show_snapshot_with_bytes_body(self):
-        self._test_show_snapshot(bytes_body=True)
-
-    def _test_list_snapshots(self, bytes_body=False, **params):
-        self.check_service_client_function(
-            self.client.list_snapshots,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_SNAPSHOTS, to_utf=bytes_body, **params)
-
-    def test_list_snapshots_with_str_body(self):
-        self._test_list_snapshots()
-
-    def test_list_snapshots_with_byte_body(self):
-        self._test_list_snapshots(bytes_body=True)
-
-    def test_list_snapshots_with_params(self):
-        self._test_list_snapshots('fake')
-
-    def test_delete_snapshot(self):
-        self.check_service_client_function(
-            self.client.delete_snapshot,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=202, snapshot_id=self.FAKE_SNAPSHOT['id'])
-
-    def test_is_resource_deleted_true(self):
-        module = ('tempest.services.compute.json.snapshots_client.'
-                  'SnapshotsClient.show_snapshot')
-        self.useFixture(mockpatch.Patch(
-            module, side_effect=lib_exc.NotFound))
-        self.assertTrue(self.client.is_resource_deleted('fake-id'))
-
-    def test_is_resource_deleted_false(self):
-        module = ('tempest.services.compute.json.snapshots_client.'
-                  'SnapshotsClient.show_snapshot')
-        self.useFixture(mockpatch.Patch(
-            module, return_value={}))
-        self.assertFalse(self.client.is_resource_deleted('fake-id'))
diff --git a/tempest/tests/services/compute/test_tenant_networks_client.py b/tempest/tests/services/compute/test_tenant_networks_client.py
deleted file mode 100644
index 691792a..0000000
--- a/tempest/tests/services/compute/test_tenant_networks_client.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import tenant_networks_client
-from tempest.tests.services.compute import base
-
-
-class TestTenantNetworksClient(base.BaseComputeServiceTest):
-
-    FAKE_NETWORK = {
-        "cidr": "None",
-        "id": "c2329eb4-cc8e-4439-ac4c-932369309e36",
-        "label": u'\u30d7'
-        }
-
-    FAKE_NETWORKS = [FAKE_NETWORK]
-
-    NETWORK_ID = FAKE_NETWORK['id']
-
-    def setUp(self):
-        super(TestTenantNetworksClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = tenant_networks_client.TenantNetworksClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_list_tenant_networks(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_tenant_networks,
-            'tempest.common.service_client.ServiceClient.get',
-            {"networks": self.FAKE_NETWORKS},
-            bytes_body)
-
-    def test_list_tenant_networks_with_str_body(self):
-        self._test_list_tenant_networks()
-
-    def test_list_tenant_networks_with_bytes_body(self):
-        self._test_list_tenant_networks(bytes_body=True)
-
-    def _test_show_tenant_network(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_tenant_network,
-            'tempest.common.service_client.ServiceClient.get',
-            {"network": self.FAKE_NETWORK},
-            bytes_body,
-            network_id=self.NETWORK_ID)
-
-    def test_show_tenant_network_with_str_body(self):
-        self._test_show_tenant_network()
-
-    def test_show_tenant_network_with_bytes_body(self):
-        self._test_show_tenant_network(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_tenant_usages_client.py b/tempest/tests/services/compute/test_tenant_usages_client.py
deleted file mode 100644
index 58e0b7a..0000000
--- a/tempest/tests/services/compute/test_tenant_usages_client.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import tenant_usages_client
-from tempest.tests.services.compute import base
-
-
-class TestTenantUsagesClient(base.BaseComputeServiceTest):
-
-    FAKE_SERVER_USAGES = [{
-        "ended_at": None,
-        "flavor": "m1.tiny",
-        "hours": 1.0,
-        "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8fe0",
-        "local_gb": 1,
-        "memory_mb": 512,
-        "name": "new-server-test",
-        "started_at": "2012-10-08T20:10:44.541277",
-        "state": "active",
-        "tenant_id": "openstack",
-        "uptime": 3600,
-        "vcpus": 1
-        }]
-
-    FAKE_TENANT_USAGES = [{
-        "server_usages": FAKE_SERVER_USAGES,
-        "start": "2012-10-08T21:10:44.587336",
-        "stop": "2012-10-08T22:10:44.587336",
-        "tenant_id": "openstack",
-        "total_hours": 1,
-        "total_local_gb_usage": 1,
-        "total_memory_mb_usage": 512,
-        "total_vcpus_usage": 1
-        }]
-
-    def setUp(self):
-        super(TestTenantUsagesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = tenant_usages_client.TenantUsagesClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_list_tenant_usages(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_tenant_usages,
-            'tempest.common.service_client.ServiceClient.get',
-            {"tenant_usages": self.FAKE_TENANT_USAGES},
-            to_utf=bytes_body)
-
-    def test_list_tenant_usages_with_str_body(self):
-        self._test_list_tenant_usages()
-
-    def test_list_tenant_usages_with_bytes_body(self):
-        self._test_list_tenant_usages(bytes_body=True)
-
-    def _test_show_tenant_usage(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_tenant_usage,
-            'tempest.common.service_client.ServiceClient.get',
-            {"tenant_usage": self.FAKE_TENANT_USAGES[0]},
-            to_utf=bytes_body,
-            tenant_id='openstack')
-
-    def test_show_tenant_usage_with_str_body(self):
-        self._test_show_tenant_usage()
-
-    def test_show_tenant_usage_with_bytes_body(self):
-        self._test_show_tenant_usage(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_volumes_client.py b/tempest/tests/services/compute/test_volumes_client.py
deleted file mode 100644
index 33d4bad..0000000
--- a/tempest/tests/services/compute/test_volumes_client.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-
-from oslotest import mockpatch
-from tempest_lib import exceptions as lib_exc
-
-from tempest.services.compute.json import volumes_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestVolumesClient(base.BaseComputeServiceTest):
-
-    FAKE_VOLUME = {
-        "id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-        "displayName": u"v\u12345ol-001",
-        "displayDescription": u"Another \u1234volume.",
-        "size": 30,
-        "status": "Active",
-        "volumeType": "289da7f8-6440-407c-9fb4-7db01ec49164",
-        "metadata": {
-            "contents": "junk"
-        },
-        "availabilityZone": "us-east1",
-        "snapshotId": None,
-        "attachments": [],
-        "createdAt": "2012-02-14T20:53:07Z"
-    }
-
-    FAKE_VOLUMES = {"volumes": [FAKE_VOLUME]}
-
-    def setUp(self):
-        super(TestVolumesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = volumes_client.VolumesClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_list_volumes(self, bytes_body=False, **params):
-        self.check_service_client_function(
-            self.client.list_volumes,
-            'tempest.common.service_client.ServiceClient.get',
-            self.FAKE_VOLUMES, to_utf=bytes_body, **params)
-
-    def test_list_volumes_with_str_body(self):
-        self._test_list_volumes()
-
-    def test_list_volumes_with_byte_body(self):
-        self._test_list_volumes(bytes_body=True)
-
-    def test_list_volumes_with_params(self):
-        self._test_list_volumes(name='fake')
-
-    def _test_show_volume(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_volume,
-            'tempest.common.service_client.ServiceClient.get',
-            {"volume": self.FAKE_VOLUME},
-            to_utf=bytes_body, volume_id=self.FAKE_VOLUME['id'])
-
-    def test_show_volume_with_str_body(self):
-        self._test_show_volume()
-
-    def test_show_volume_with_bytes_body(self):
-        self._test_show_volume(bytes_body=True)
-
-    def _test_create_volume(self, bytes_body=False):
-        post_body = copy.deepcopy(self.FAKE_VOLUME)
-        del post_body['id']
-        del post_body['createdAt']
-        del post_body['status']
-        self.check_service_client_function(
-            self.client.create_volume,
-            'tempest.common.service_client.ServiceClient.post',
-            {"volume": self.FAKE_VOLUME},
-            to_utf=bytes_body, status=200, **post_body)
-
-    def test_create_volume_with_str_body(self):
-        self._test_create_volume()
-
-    def test_create_volume_with_bytes_body(self):
-        self._test_create_volume(bytes_body=True)
-
-    def test_delete_volume(self):
-        self.check_service_client_function(
-            self.client.delete_volume,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=202, volume_id=self.FAKE_VOLUME['id'])
-
-    def test_is_resource_deleted_true(self):
-        module = ('tempest.services.compute.json.volumes_client.'
-                  'VolumesClient.show_volume')
-        self.useFixture(mockpatch.Patch(
-            module, side_effect=lib_exc.NotFound))
-        self.assertTrue(self.client.is_resource_deleted('fake-id'))
-
-    def test_is_resource_deleted_false(self):
-        module = ('tempest.services.compute.json.volumes_client.'
-                  'VolumesClient.show_volume')
-        self.useFixture(mockpatch.Patch(
-            module, return_value={}))
-        self.assertFalse(self.client.is_resource_deleted('fake-id'))
diff --git a/tempest/tests/services/test_base_microversion_client.py b/tempest/tests/services/test_base_microversion_client.py
new file mode 100644
index 0000000..11b8170
--- /dev/null
+++ b/tempest/tests/services/test_base_microversion_client.py
@@ -0,0 +1,75 @@
+# Copyright 2016 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib2
+import mock
+from tempest_lib.common import rest_client
+
+from tempest.services import base_microversion_client
+from tempest.tests import fake_auth_provider
+from tempest.tests.services.compute import base
+
+
+class TestClientWithoutMicroversionHeader(base.BaseComputeServiceTest):
+
+    def setUp(self):
+        super(TestClientWithoutMicroversionHeader, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = base_microversion_client.BaseMicroversionClient(
+            fake_auth, 'compute', 'regionOne', 'X-OpenStack-Nova-API-Version')
+
+    def test_no_microverion_header(self):
+        header = self.client.get_headers()
+        self.assertNotIn(self.client.api_microversion_header_name, header)
+
+    def test_no_microverion_header_in_raw_request(self):
+        def raw_request(*args, **kwargs):
+            self.assertNotIn(self.client.api_microversion_header_name,
+                             kwargs['headers'])
+            return (httplib2.Response({'status': 200}), {})
+
+        with mock.patch.object(rest_client.RestClient,
+                               'raw_request') as mock_get:
+            mock_get.side_effect = raw_request
+            self.client.get('fake_url')
+
+
+class TestClientWithMicroversionHeader(base.BaseComputeServiceTest):
+
+    def setUp(self):
+        super(TestClientWithMicroversionHeader, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = base_microversion_client.BaseMicroversionClient(
+            fake_auth, 'compute', 'regionOne', 'X-OpenStack-Nova-API-Version')
+        self.client.set_api_microversion('2.2')
+
+    def test_microverion_header(self):
+        header = self.client.get_headers()
+        self.assertIn(self.client.api_microversion_header_name, header)
+        self.assertEqual(self.client.api_microversion,
+                         header[self.client.api_microversion_header_name])
+
+    def test_microverion_header_in_raw_request(self):
+        def raw_request(*args, **kwargs):
+            self.assertIn(self.client.api_microversion_header_name,
+                          kwargs['headers'])
+            self.assertEqual(
+                self.client.api_microversion,
+                kwargs['headers'][self.client.api_microversion_header_name])
+            return (httplib2.Response({'status': 200}), {})
+
+        with mock.patch.object(rest_client.RestClient,
+                               'raw_request') as mock_get:
+            mock_get.side_effect = raw_request
+            self.client.get('fake_url')
diff --git a/tempest/tests/stress/test_stress.py b/tempest/tests/stress/test_stress.py
index 16f1ac7..0ec2a5d 100644
--- a/tempest/tests/stress/test_stress.py
+++ b/tempest/tests/stress/test_stress.py
@@ -25,8 +25,7 @@
 
 
 class StressFrameworkTest(base.TestCase):
-    """Basic test for the stress test framework.
-    """
+    """Basic test for the stress test framework."""
 
     def _cmd(self, cmd, param):
         """Executes specified command."""
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index ce3eb7e..98b045a 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -140,7 +140,7 @@
                 self.fail('%s is not listed in the valid service tag list'
                           % service)
             except KeyError:
-                # NOTE(mtreinish): This condition is to test for a entry in
+                # NOTE(mtreinish): This condition is to test for an entry in
                 # the outer decorator list but not in the service_list dict.
                 # However, because we're looping over the service_list dict
                 # it's unlikely we'll trigger this. So manual review is still
diff --git a/tempest/tests/test_glance_http.py b/tempest/tests/test_glance_http.py
index 105caec..db9db34 100644
--- a/tempest/tests/test_glance_http.py
+++ b/tempest/tests/test_glance_http.py
@@ -13,14 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import socket
-
 import mock
-from oslo_serialization import jsonutils as json
 from oslotest import mockpatch
 import six
 from six.moves import http_client as httplib
-from tempest_lib import exceptions as lib_exc
 
 from tempest.common import glance_http
 from tempest import exceptions
@@ -56,60 +52,6 @@
                         'getresponse', return_value=resp))
         return resp
 
-    def test_json_request_without_content_type_header_in_response(self):
-        self._set_response_fixture({}, 200, 'fake_response_body')
-        self.assertRaises(lib_exc.InvalidContentType,
-                          self.client.json_request, 'GET', '/images')
-
-    def test_json_request_with_xml_content_type_header_in_request(self):
-        self.assertRaises(lib_exc.InvalidContentType,
-                          self.client.json_request, 'GET', '/images',
-                          headers={'Content-Type': 'application/xml'})
-
-    def test_json_request_with_xml_content_type_header_in_response(self):
-        self._set_response_fixture({'content-type': 'application/xml'},
-                                   200, 'fake_response_body')
-        self.assertRaises(lib_exc.InvalidContentType,
-                          self.client.json_request, 'GET', '/images')
-
-    def test_json_request_with_json_content_type_header_only_in_resp(self):
-        self._set_response_fixture({'content-type': 'application/json'},
-                                   200, 'fake_response_body')
-        resp, body = self.client.json_request('GET', '/images')
-        self.assertEqual(200, resp.status)
-        self.assertEqual('fake_response_body', body)
-
-    def test_json_request_with_json_content_type_header_in_req_and_resp(self):
-        self._set_response_fixture({'content-type': 'application/json'},
-                                   200, 'fake_response_body')
-        resp, body = self.client.json_request('GET', '/images', headers={
-            'Content-Type': 'application/json'})
-        self.assertEqual(200, resp.status)
-        self.assertEqual('fake_response_body', body)
-
-    def test_json_request_fails_to_json_loads(self):
-        self._set_response_fixture({'content-type': 'application/json'},
-                                   200, 'fake_response_body')
-        self.useFixture(mockpatch.PatchObject(json, 'loads',
-                        side_effect=ValueError()))
-        resp, body = self.client.json_request('GET', '/images')
-        self.assertEqual(200, resp.status)
-        self.assertEqual(body, 'fake_response_body')
-
-    def test_json_request_socket_timeout(self):
-        self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
-                                              'request',
-                                              side_effect=socket.timeout()))
-        self.assertRaises(exceptions.TimeoutException,
-                          self.client.json_request, 'GET', '/images')
-
-    def test_json_request_endpoint_not_found(self):
-        self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
-                                              'request',
-                                              side_effect=socket.gaierror()))
-        self.assertRaises(exceptions.EndpointNotFound,
-                          self.client.json_request, 'GET', '/images')
-
     def test_raw_request(self):
         self._set_response_fixture({}, 200, 'fake_response_body')
         resp, body = self.client.raw_request('GET', '/images')
@@ -141,22 +83,22 @@
         self.assertEqual(call_count - 1, req_body.tell())
 
     def test_get_connection_class_for_https(self):
-        conn_class = self.client.get_connection_class('https')
+        conn_class = self.client._get_connection_class('https')
         self.assertEqual(glance_http.VerifiedHTTPSConnection, conn_class)
 
     def test_get_connection_class_for_http(self):
-        conn_class = (self.client.get_connection_class('http'))
+        conn_class = (self.client._get_connection_class('http'))
         self.assertEqual(httplib.HTTPConnection, conn_class)
 
     def test_get_connection_http(self):
-        self.assertTrue(isinstance(self.client.get_connection(),
+        self.assertTrue(isinstance(self.client._get_connection(),
                                    httplib.HTTPConnection))
 
     def test_get_connection_https(self):
         endpoint = 'https://fake_url.com'
         self.fake_auth.base_url = mock.MagicMock(return_value=endpoint)
         self.client = glance_http.HTTPClient(self.fake_auth, {})
-        self.assertTrue(isinstance(self.client.get_connection(),
+        self.assertTrue(isinstance(self.client._get_connection(),
                                    glance_http.VerifiedHTTPSConnection))
 
     def test_get_connection_url_not_fount(self):
@@ -164,37 +106,37 @@
                                               side_effect=httplib.InvalidURL()
                                               ))
         self.assertRaises(exceptions.EndpointNotFound,
-                          self.client.get_connection)
+                          self.client._get_connection)
 
     def test_get_connection_kwargs_default_for_http(self):
-        kwargs = self.client.get_connection_kwargs('http')
+        kwargs = self.client._get_connection_kwargs('http')
         self.assertEqual(600, kwargs['timeout'])
         self.assertEqual(1, len(kwargs.keys()))
 
     def test_get_connection_kwargs_set_timeout_for_http(self):
-        kwargs = self.client.get_connection_kwargs('http', timeout=10,
-                                                   ca_certs='foo')
+        kwargs = self.client._get_connection_kwargs('http', timeout=10,
+                                                    ca_certs='foo')
         self.assertEqual(10, kwargs['timeout'])
         # nothing more than timeout is evaluated for http connections
         self.assertEqual(1, len(kwargs.keys()))
 
     def test_get_connection_kwargs_default_for_https(self):
-        kwargs = self.client.get_connection_kwargs('https')
+        kwargs = self.client._get_connection_kwargs('https')
         self.assertEqual(600, kwargs['timeout'])
-        self.assertEqual(None, kwargs['ca_certs'])
-        self.assertEqual(None, kwargs['cert_file'])
-        self.assertEqual(None, kwargs['key_file'])
+        self.assertIsNone(kwargs['ca_certs'])
+        self.assertIsNone(kwargs['cert_file'])
+        self.assertIsNone(kwargs['key_file'])
         self.assertEqual(False, kwargs['insecure'])
         self.assertEqual(True, kwargs['ssl_compression'])
         self.assertEqual(6, len(kwargs.keys()))
 
     def test_get_connection_kwargs_set_params_for_https(self):
-        kwargs = self.client.get_connection_kwargs('https', timeout=10,
-                                                   ca_certs='foo',
-                                                   cert_file='/foo/bar.cert',
-                                                   key_file='/foo/key.pem',
-                                                   insecure=True,
-                                                   ssl_compression=False)
+        kwargs = self.client._get_connection_kwargs('https', timeout=10,
+                                                    ca_certs='foo',
+                                                    cert_file='/foo/bar.cert',
+                                                    key_file='/foo/key.pem',
+                                                    insecure=True,
+                                                    ssl_compression=False)
         self.assertEqual(10, kwargs['timeout'])
         self.assertEqual('foo', kwargs['ca_certs'])
         self.assertEqual('/foo/bar.cert', kwargs['cert_file'])
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 62d2aee..55f00ef 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -17,7 +17,8 @@
 
 
 class HackingTestCase(base.TestCase):
-    """
+    """Test class for hacking rule
+
     This class tests the hacking checks in tempest.hacking.checks by passing
     strings to the check methods like the pep8/flake8 parser would. The parser
     loops over each line in the file and then passes the parameters to the
diff --git a/tempest/tests/test_microversions.py b/tempest/tests/test_microversions.py
new file mode 100644
index 0000000..6738641
--- /dev/null
+++ b/tempest/tests/test_microversions.py
@@ -0,0 +1,153 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+import testtools
+
+from tempest.api.compute import base as compute_base
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class VersionTestNoneTolatest(compute_base.BaseV2ComputeTest):
+    min_microversion = None
+    max_microversion = 'latest'
+
+
+class VersionTestNoneTo2_2(compute_base.BaseV2ComputeTest):
+    min_microversion = None
+    max_microversion = '2.2'
+
+
+class VersionTest2_3ToLatest(compute_base.BaseV2ComputeTest):
+    min_microversion = '2.3'
+    max_microversion = 'latest'
+
+
+class VersionTest2_5To2_10(compute_base.BaseV2ComputeTest):
+    min_microversion = '2.5'
+    max_microversion = '2.10'
+
+
+class VersionTest2_10To2_10(compute_base.BaseV2ComputeTest):
+    min_microversion = '2.10'
+    max_microversion = '2.10'
+
+
+class InvalidVersionTest(compute_base.BaseV2ComputeTest):
+    min_microversion = '2.11'
+    max_microversion = '2.1'
+
+
+class TestMicroversionsTestsClass(base.TestCase):
+
+    def setUp(self):
+        super(TestMicroversionsTestsClass, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate',
+                       fake_config.FakePrivate)
+
+    def _test_version(self, cfg_min, cfg_max,
+                      expected_pass_tests,
+                      expected_skip_tests):
+        cfg.CONF.set_default('min_microversion',
+                             cfg_min, group='compute-feature-enabled')
+        cfg.CONF.set_default('max_microversion',
+                             cfg_max, group='compute-feature-enabled')
+        try:
+            for test_class in expected_pass_tests:
+                test_class.skip_checks()
+            for test_class in expected_skip_tests:
+                self.assertRaises(testtools.TestCase.skipException,
+                                  test_class.skip_checks)
+        except testtools.TestCase.skipException as e:
+            raise testtools.TestCase.failureException(e.message)
+
+    def test_config_version_none_none(self):
+        expected_pass_tests = [VersionTestNoneTolatest, VersionTestNoneTo2_2]
+        expected_skip_tests = [VersionTest2_3ToLatest, VersionTest2_5To2_10,
+                               VersionTest2_10To2_10]
+        self._test_version(None, None,
+                           expected_pass_tests,
+                           expected_skip_tests)
+
+    def test_config_version_none_23(self):
+        expected_pass_tests = [VersionTestNoneTolatest, VersionTestNoneTo2_2,
+                               VersionTest2_3ToLatest]
+        expected_skip_tests = [VersionTest2_5To2_10, VersionTest2_10To2_10]
+        self._test_version(None, '2.3',
+                           expected_pass_tests,
+                           expected_skip_tests)
+
+    def test_config_version_22_latest(self):
+        expected_pass_tests = [VersionTestNoneTolatest, VersionTestNoneTo2_2,
+                               VersionTest2_3ToLatest, VersionTest2_5To2_10,
+                               VersionTest2_10To2_10]
+        expected_skip_tests = []
+        self._test_version('2.2', 'latest',
+                           expected_pass_tests,
+                           expected_skip_tests)
+
+    def test_config_version_22_23(self):
+        expected_pass_tests = [VersionTestNoneTolatest, VersionTestNoneTo2_2,
+                               VersionTest2_3ToLatest]
+        expected_skip_tests = [VersionTest2_5To2_10, VersionTest2_10To2_10]
+        self._test_version('2.2', '2.3',
+                           expected_pass_tests,
+                           expected_skip_tests)
+
+    def test_config_version_210_210(self):
+        expected_pass_tests = [VersionTestNoneTolatest,
+                               VersionTest2_3ToLatest,
+                               VersionTest2_5To2_10,
+                               VersionTest2_10To2_10]
+        expected_skip_tests = [VersionTestNoneTo2_2]
+        self._test_version('2.10', '2.10',
+                           expected_pass_tests,
+                           expected_skip_tests)
+
+    def test_config_version_none_latest(self):
+        expected_pass_tests = [VersionTestNoneTolatest, VersionTestNoneTo2_2,
+                               VersionTest2_3ToLatest, VersionTest2_5To2_10,
+                               VersionTest2_10To2_10]
+        expected_skip_tests = []
+        self._test_version(None, 'latest',
+                           expected_pass_tests,
+                           expected_skip_tests)
+
+    def test_config_version_latest_latest(self):
+        expected_pass_tests = [VersionTestNoneTolatest, VersionTest2_3ToLatest]
+        expected_skip_tests = [VersionTestNoneTo2_2, VersionTest2_5To2_10,
+                               VersionTest2_10To2_10]
+        self._test_version('latest', 'latest',
+                           expected_pass_tests,
+                           expected_skip_tests)
+
+    def test_config_invalid_version(self):
+        cfg.CONF.set_default('min_microversion',
+                             '2.5', group='compute-feature-enabled')
+        cfg.CONF.set_default('max_microversion',
+                             '2.1', group='compute-feature-enabled')
+        self.assertRaises(exceptions.InvalidAPIVersionRange,
+                          VersionTestNoneTolatest.skip_checks)
+
+    def test_config_version_invalid_test_version(self):
+        cfg.CONF.set_default('min_microversion',
+                             None, group='compute-feature-enabled')
+        cfg.CONF.set_default('max_microversion',
+                             '2.13', group='compute-feature-enabled')
+        self.assertRaises(exceptions.InvalidAPIVersionRange,
+                          InvalidVersionTest.skip_checks)
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
deleted file mode 100644
index b0bfdf7..0000000
--- a/tempest/thirdparty/README.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-.. _third_party_field_guide:
-
-Tempest Field Guide to Third Party API tests
-============================================
-
-
-What are these tests?
----------------------
-
-Third party tests are tests for non native OpenStack APIs that are
-part of OpenStack projects. If we ship an API, we're really required
-to ensure that it's working.
-
-An example is that Nova Compute currently has EC2 API support in tree,
-which should be tested as part of normal process.
-
-
-Why are these tests in tempest?
--------------------------------
-
-If we ship an API in an OpenStack component, there should be tests in
-tempest to exercise it in some way.
-
-
-Scope of these tests
---------------------
-
-Third party API testing should be limited to the functional testing of
-third party API compliance. Complex scenarios should be avoided, and
-instead exercised with the OpenStack API, unless the third party API
-can't be tested without those scenarios.
-
-Whenever possible third party API testing should use a client as close
-to the third party API as possible. The point of these tests is API
-validation.
diff --git a/tempest/thirdparty/__init__.py b/tempest/thirdparty/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/thirdparty/__init__.py
+++ /dev/null
diff --git a/tempest/thirdparty/boto/__init__.py b/tempest/thirdparty/boto/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/thirdparty/boto/__init__.py
+++ /dev/null
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
deleted file mode 100644
index 1ced180..0000000
--- a/tempest/thirdparty/boto/test.py
+++ /dev/null
@@ -1,688 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import logging as orig_logging
-import os
-import re
-
-import boto
-from boto import ec2
-from boto import exception
-from boto import s3
-from oslo_log import log as logging
-import six
-from six.moves.urllib import parse as urlparse
-from tempest_lib import exceptions as lib_exc
-
-import tempest.clients
-from tempest.common.utils import file_utils
-from tempest import config
-from tempest import exceptions
-import tempest.test
-from tempest.thirdparty.boto.utils import wait
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def decision_maker():
-    A_I_IMAGES_READY = True  # ari,ami,aki
-    S3_CAN_CONNECT_ERROR = None
-    EC2_CAN_CONNECT_ERROR = None
-    secret_matcher = re.compile("[A-Za-z0-9+/]{32,}")  # 40 in other system
-    id_matcher = re.compile("[A-Za-z0-9]{20,}")
-
-    def all_read(*args):
-        return all(map(file_utils.have_effective_read_access, args))
-
-    materials_path = CONF.boto.s3_materials_path
-    ami_path = materials_path + os.sep + CONF.boto.ami_manifest
-    aki_path = materials_path + os.sep + CONF.boto.aki_manifest
-    ari_path = materials_path + os.sep + CONF.boto.ari_manifest
-
-    A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
-    boto_logger = logging.getLogger('boto')
-    level = boto_logger.logger.level
-    # suppress logging for boto
-    boto_logger.logger.setLevel(orig_logging.CRITICAL)
-
-    def _cred_sub_check(connection_data):
-        if not id_matcher.match(connection_data["aws_access_key_id"]):
-            raise Exception("Invalid AWS access Key")
-        if not secret_matcher.match(connection_data["aws_secret_access_key"]):
-            raise Exception("Invalid AWS secret Key")
-        raise Exception("Unknown (Authentication?) Error")
-    # NOTE(andreaf) Setting up an extra manager here is redundant,
-    # and should be removed.
-    openstack = tempest.clients.Manager()
-    try:
-        if urlparse.urlparse(CONF.boto.ec2_url).hostname is None:
-            raise Exception("Failed to get hostname from the ec2_url")
-        ec2client = openstack.ec2api_client
-        try:
-            ec2client.get_all_regions()
-        except exception.BotoServerError as exc:
-                if exc.error_code is None:
-                    raise Exception("EC2 target does not looks EC2 service")
-                _cred_sub_check(ec2client.connection_data)
-
-    except lib_exc.Unauthorized:
-        EC2_CAN_CONNECT_ERROR = "AWS credentials not set," +\
-                                " also failed to get it from keystone"
-    except Exception as exc:
-        EC2_CAN_CONNECT_ERROR = str(exc)
-
-    try:
-        if urlparse.urlparse(CONF.boto.s3_url).hostname is None:
-            raise Exception("Failed to get hostname from the s3_url")
-        s3client = openstack.s3_client
-        try:
-            s3client.get_bucket("^INVALID*#()@INVALID.")
-        except exception.BotoServerError as exc:
-            if exc.status == 403:
-                _cred_sub_check(s3client.connection_data)
-    except Exception as exc:
-        S3_CAN_CONNECT_ERROR = str(exc)
-    except lib_exc.Unauthorized:
-        S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
-                               " failed to get them even by keystoneclient"
-    boto_logger.logger.setLevel(level)
-    return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
-            'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
-            'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
-
-
-class BotoExceptionMatcher(object):
-    STATUS_RE = r'[45]\d\d'
-    CODE_RE = '.*'  # regexp makes sense in group match
-
-    def match(self, exc):
-        """:returns: Returns with an error string if it does not match,
-               returns with None when it matches.
-        """
-        if not isinstance(exc, exception.BotoServerError):
-            return "%r not an BotoServerError instance" % exc
-        LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
-        if re.match(self.STATUS_RE, str(exc.status)) is None:
-            return ("Status code (%s) does not match"
-                    "the expected re pattern \"%s\""
-                    % (exc.status, self.STATUS_RE))
-        if re.match(self.CODE_RE, str(exc.error_code)) is None:
-            return ("Error code (%s) does not match" +
-                    "the expected re pattern \"%s\"") %\
-                   (exc.error_code, self.CODE_RE)
-        return None
-
-
-class ClientError(BotoExceptionMatcher):
-    STATUS_RE = r'4\d\d'
-
-
-class ServerError(BotoExceptionMatcher):
-    STATUS_RE = r'5\d\d'
-
-
-def _add_matcher_class(error_cls, error_data, base=BotoExceptionMatcher):
-    """
-        Usable for adding an ExceptionMatcher(s) into the exception tree.
-        The not leaf elements does wildcard match
-    """
-    # in error_code just literal and '.' characters expected
-    if not isinstance(error_data, six.string_types):
-        (error_code, status_code) = map(str, error_data)
-    else:
-        status_code = None
-        error_code = error_data
-    parts = error_code.split('.')
-    basematch = ""
-    num_parts = len(parts)
-    max_index = num_parts - 1
-    add_cls = error_cls
-    for i_part in six.moves.xrange(num_parts):
-        part = parts[i_part]
-        leaf = i_part == max_index
-        if not leaf:
-            match = basematch + part + "[.].*"
-        else:
-            match = basematch + part
-
-        basematch += part + "[.]"
-        if not hasattr(add_cls, part):
-            cls_dict = {"CODE_RE": match}
-            if leaf and status_code is not None:
-                cls_dict["STATUS_RE"] = status_code
-            cls = type(part, (base, ), cls_dict)
-            setattr(add_cls, part, cls())
-            add_cls = cls
-        elif leaf:
-            raise LookupError("Tries to redefine an error code \"%s\"" % part)
-        else:
-            add_cls = getattr(add_cls, part)
-
-
-# TODO(afazekas): classmethod handling
-def friendly_function_name_simple(call_able):
-    name = ""
-    if hasattr(call_able, "im_class"):
-        name += call_able.im_class.__name__ + "."
-    name += call_able.__name__
-    return name
-
-
-def friendly_function_call_str(call_able, *args, **kwargs):
-    string = friendly_function_name_simple(call_able)
-    string += "(" + ", ".join(map(str, args))
-    if len(kwargs):
-        if len(args):
-            string += ", "
-    string += ", ".join("=".join(map(str, (key, value)))
-                        for (key, value) in kwargs.items())
-    return string + ")"
-
-
-class BotoTestCase(tempest.test.BaseTestCase):
-    """Recommended to use as base class for boto related test."""
-
-    credentials = ['primary']
-
-    @classmethod
-    def skip_checks(cls):
-        super(BotoTestCase, cls).skip_checks()
-        if not CONF.compute_feature_enabled.ec2_api:
-            raise cls.skipException("The EC2 API is not available")
-        if not CONF.identity_feature_enabled.api_v2 or \
-                not CONF.identity.auth_version == 'v2':
-            raise cls.skipException("Identity v2 is not available")
-
-    @classmethod
-    def resource_setup(cls):
-        super(BotoTestCase, cls).resource_setup()
-        cls.conclusion = decision_maker()
-        # The trash contains cleanup functions and parameters in tuples
-        # (function, *args, **kwargs)
-        cls._resource_trash_bin = {}
-        cls._sequence = -1
-        if (hasattr(cls, "EC2") and
-            cls.conclusion['EC2_CAN_CONNECT_ERROR'] is not None):
-            raise cls.skipException("EC2 " + cls.__name__ + ": " +
-                                    cls.conclusion['EC2_CAN_CONNECT_ERROR'])
-        if (hasattr(cls, "S3") and
-            cls.conclusion['S3_CAN_CONNECT_ERROR'] is not None):
-            raise cls.skipException("S3 " + cls.__name__ + ": " +
-                                    cls.conclusion['S3_CAN_CONNECT_ERROR'])
-
-    @classmethod
-    def addResourceCleanUp(cls, function, *args, **kwargs):
-        """Adds CleanUp callable, used by tearDownClass.
-        Recommended to a use (deep)copy on the mutable args.
-        """
-        cls._sequence = cls._sequence + 1
-        cls._resource_trash_bin[cls._sequence] = (function, args, kwargs)
-        return cls._sequence
-
-    @classmethod
-    def cancelResourceCleanUp(cls, key):
-        """Cancel Clean up request."""
-        del cls._resource_trash_bin[key]
-
-    # TODO(afazekas): Add "with" context handling
-    def assertBotoError(self, excMatcher, callableObj,
-                        *args, **kwargs):
-        """Example usage:
-            self.assertBotoError(self.ec2_error_code.client.
-                                 InvalidKeyPair.Duplicate,
-                                 self.client.create_keypair,
-                                 key_name)
-        """
-        try:
-            callableObj(*args, **kwargs)
-        except exception.BotoServerError as exc:
-            error_msg = excMatcher.match(exc)
-            if error_msg is not None:
-                raise self.failureException(error_msg)
-        else:
-            raise self.failureException("BotoServerError not raised")
-
-    @classmethod
-    def resource_cleanup(cls):
-        """Calls the callables added by addResourceCleanUp,
-        when you overwrite this function don't forget to call this too.
-        """
-        fail_count = 0
-        trash_keys = sorted(cls._resource_trash_bin, reverse=True)
-        for key in trash_keys:
-            (function, pos_args, kw_args) = cls._resource_trash_bin[key]
-            try:
-                func_name = friendly_function_call_str(function, *pos_args,
-                                                       **kw_args)
-                LOG.debug("Cleaning up: %s" % func_name)
-                function(*pos_args, **kw_args)
-            except BaseException:
-                fail_count += 1
-                LOG.exception("Cleanup failed %s" % func_name)
-            finally:
-                del cls._resource_trash_bin[key]
-        super(BotoTestCase, cls).resource_cleanup()
-        # NOTE(afazekas): let the super called even on exceptions
-        # The real exceptions already logged, if the super throws another,
-        # does not causes hidden issues
-        if fail_count:
-            raise exceptions.TearDownException(num=fail_count)
-
-    ec2_error_code = BotoExceptionMatcher()
-    # InsufficientInstanceCapacity can be both server and client error
-    ec2_error_code.server = ServerError()
-    ec2_error_code.client = ClientError()
-    s3_error_code = BotoExceptionMatcher()
-    s3_error_code.server = ServerError()
-    s3_error_code.client = ClientError()
-    valid_image_state = set(('available', 'pending', 'failed'))
-    # NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
-    # a good mapping, because it uses memory, but not really a running machine
-    valid_instance_state = set(('pending', 'running', 'shutting-down',
-                                'terminated', 'stopping', 'stopped', 'paused'))
-    valid_volume_status = set(('creating', 'available', 'in-use',
-                               'deleting', 'deleted', 'error'))
-    valid_snapshot_status = set(('pending', 'completed', 'error'))
-
-    gone_set = set(('_GONE',))
-
-    @classmethod
-    def get_lfunction_gone(cls, obj):
-        """If the object is instance of a well know type returns back with
-            with the corresponding function otherwise it assumes the obj itself
-            is the function.
-            """
-        ec = cls.ec2_error_code
-        if isinstance(obj, ec2.instance.Instance):
-            colusure_matcher = ec.client.InvalidInstanceID.NotFound
-            status_attr = "state"
-        elif isinstance(obj, ec2.image.Image):
-            colusure_matcher = ec.client.InvalidAMIID.NotFound
-            status_attr = "state"
-        elif isinstance(obj, ec2.snapshot.Snapshot):
-            colusure_matcher = ec.client.InvalidSnapshot.NotFound
-            status_attr = "status"
-        elif isinstance(obj, ec2.volume.Volume):
-            colusure_matcher = ec.client.InvalidVolume.NotFound
-            status_attr = "status"
-        else:
-            return obj
-
-        def _status():
-            try:
-                obj.update(validate=True)
-            except ValueError:
-                return "_GONE"
-            except exception.EC2ResponseError as exc:
-                if colusure_matcher.match(exc) is None:
-                    return "_GONE"
-                else:
-                    raise
-            return getattr(obj, status_attr)
-
-        return _status
-
-    def state_wait_gone(self, lfunction, final_set, valid_set):
-        if not isinstance(final_set, set):
-            final_set = set((final_set,))
-        final_set |= self.gone_set
-        lfunction = self.get_lfunction_gone(lfunction)
-        state = wait.state_wait(lfunction, final_set, valid_set)
-        self.assertIn(state, valid_set | self.gone_set)
-        return state
-
-    def waitImageState(self, lfunction, wait_for):
-        return self.state_wait_gone(lfunction, wait_for,
-                                    self.valid_image_state)
-
-    def waitInstanceState(self, lfunction, wait_for):
-        return self.state_wait_gone(lfunction, wait_for,
-                                    self.valid_instance_state)
-
-    def waitSnapshotStatus(self, lfunction, wait_for):
-        return self.state_wait_gone(lfunction, wait_for,
-                                    self.valid_snapshot_status)
-
-    def waitVolumeStatus(self, lfunction, wait_for):
-        return self.state_wait_gone(lfunction, wait_for,
-                                    self.valid_volume_status)
-
-    def assertImageStateWait(self, lfunction, wait_for):
-        state = self.waitImageState(lfunction, wait_for)
-        self.assertIn(state, wait_for)
-
-    def assertInstanceStateWait(self, lfunction, wait_for):
-        state = self.waitInstanceState(lfunction, wait_for)
-        self.assertIn(state, wait_for)
-
-    def assertVolumeStatusWait(self, lfunction, wait_for):
-        state = self.waitVolumeStatus(lfunction, wait_for)
-        self.assertIn(state, wait_for)
-
-    def assertSnapshotStatusWait(self, lfunction, wait_for):
-        state = self.waitSnapshotStatus(lfunction, wait_for)
-        self.assertIn(state, wait_for)
-
-    def assertAddressDisassociatedWait(self, address):
-
-        def _disassociate():
-            cli = self.ec2_client
-            addresses = cli.get_all_addresses(addresses=(address.public_ip,))
-            if len(addresses) != 1:
-                return "INVALID"
-            if addresses[0].instance_id:
-                LOG.info("%s associated to %s",
-                         address.public_ip,
-                         addresses[0].instance_id)
-                return "ASSOCIATED"
-            return "DISASSOCIATED"
-
-        state = wait.state_wait(_disassociate, "DISASSOCIATED",
-                                set(("ASSOCIATED", "DISASSOCIATED")))
-        self.assertEqual(state, "DISASSOCIATED")
-
-    def assertAddressReleasedWait(self, address):
-
-        def _address_delete():
-            # NOTE(afazekas): the filter gives back IP
-            # even if it is not associated to my tenant
-            if (address.public_ip not in map(lambda a: a.public_ip,
-                self.ec2_client.get_all_addresses())):
-                    return "DELETED"
-            return "NOTDELETED"
-
-        state = wait.state_wait(_address_delete, "DELETED")
-        self.assertEqual(state, "DELETED")
-
-    def assertReSearch(self, regexp, string):
-        if re.search(regexp, string) is None:
-            raise self.failureException("regexp: '%s' not found in '%s'" %
-                                        (regexp, string))
-
-    def assertNotReSearch(self, regexp, string):
-        if re.search(regexp, string) is not None:
-            raise self.failureException("regexp: '%s' found in '%s'" %
-                                        (regexp, string))
-
-    def assertReMatch(self, regexp, string):
-        if re.match(regexp, string) is None:
-            raise self.failureException("regexp: '%s' not matches on '%s'" %
-                                        (regexp, string))
-
-    def assertNotReMatch(self, regexp, string):
-        if re.match(regexp, string) is not None:
-            raise self.failureException("regexp: '%s' matches on '%s'" %
-                                        (regexp, string))
-
-    @classmethod
-    def destroy_bucket(cls, connection_data, bucket):
-        """Destroys the bucket and its content, just for teardown."""
-        exc_num = 0
-        try:
-            with contextlib.closing(
-                    boto.connect_s3(**connection_data)) as conn:
-                if isinstance(bucket, basestring):
-                    bucket = conn.lookup(bucket)
-                    assert isinstance(bucket, s3.bucket.Bucket)
-                for obj in bucket.list():
-                    try:
-                        bucket.delete_key(obj.key)
-                        obj.close()
-                    except BaseException:
-                        LOG.exception("Failed to delete key %s " % obj.key)
-                        exc_num += 1
-            conn.delete_bucket(bucket)
-        except BaseException:
-            LOG.exception("Failed to destroy bucket %s " % bucket)
-            exc_num += 1
-        if exc_num:
-            raise exceptions.TearDownException(num=exc_num)
-
-    @classmethod
-    def destroy_reservation(cls, reservation):
-        """Terminate instances in a reservation, just for teardown."""
-        exc_num = 0
-
-        def _instance_state():
-            try:
-                instance.update(validate=True)
-            except ValueError:
-                return "_GONE"
-            except exception.EC2ResponseError as exc:
-                if cls.ec2_error_code.\
-                        client.InvalidInstanceID.NotFound.match(exc) is None:
-                    return "_GONE"
-                # NOTE(afazekas): incorrect code,
-                # but the resource must be destroyed
-                if exc.error_code == "InstanceNotFound":
-                    return "_GONE"
-
-            return instance.state
-
-        for instance in reservation.instances:
-            try:
-                instance.terminate()
-                wait.re_search_wait(_instance_state, "_GONE")
-            except BaseException:
-                LOG.exception("Failed to terminate instance %s " % instance)
-                exc_num += 1
-        if exc_num:
-            raise exceptions.TearDownException(num=exc_num)
-
-    # NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
-    # to write better teardown
-
-    @classmethod
-    def destroy_security_group_wait(cls, group):
-        """Delete group.
-           Use just for teardown!
-        """
-        # NOTE(afazekas): should wait/try until all related instance terminates
-        group.delete()
-
-    @classmethod
-    def destroy_volume_wait(cls, volume):
-        """Delete volume, tries to detach first.
-           Use just for teardown!
-        """
-        exc_num = 0
-        snaps = volume.snapshots()
-        if len(snaps):
-            LOG.critical("%s Volume has %s snapshot(s)", volume.id,
-                         map(snaps.id, snaps))
-
-        # NOTE(afazekas): detaching/attaching not valid EC2 status
-        def _volume_state():
-            volume.update(validate=True)
-            try:
-                # NOTE(gmann): Make sure volume is attached.
-                # Checking status as 'not "available"' is not enough to make
-                # sure volume is attached as it can be in "error" state
-                if volume.status == "in-use":
-                    volume.detach(force=True)
-            except BaseException:
-                LOG.exception("Failed to detach volume %s" % volume)
-                # exc_num += 1 "nonlocal" not in python2
-            return volume.status
-
-        try:
-            wait.re_search_wait(_volume_state, "available")
-            # not validates status
-            LOG.info(_volume_state())
-            volume.delete()
-        except BaseException:
-            LOG.exception("Failed to delete volume %s" % volume)
-            exc_num += 1
-        if exc_num:
-            raise exceptions.TearDownException(num=exc_num)
-
-    @classmethod
-    def destroy_snapshot_wait(cls, snapshot):
-        """delete snapshot, wait until it ceases to exist."""
-        snapshot.delete()
-
-        def _update():
-            snapshot.update(validate=True)
-
-        wait.wait_exception(_update)
-
-
-# you can specify tuples if you want to specify the status pattern
-for code in ('AddressLimitExceeded', 'AttachmentLimitExceeded', 'AuthFailure',
-             'Blocked', 'CustomerGatewayLimitExceeded', 'DependencyViolation',
-             'DiskImageSizeTooLarge', 'FilterLimitExceeded',
-             'Gateway.NotAttached', 'IdempotentParameterMismatch',
-             'IncorrectInstanceState', 'IncorrectState',
-             'InstanceLimitExceeded', 'InsufficientInstanceCapacity',
-             'InsufficientReservedInstancesCapacity',
-             'InternetGatewayLimitExceeded', 'InvalidAMIAttributeItemValue',
-             'InvalidAMIID.Malformed', 'InvalidAMIID.NotFound',
-             'InvalidAMIID.Unavailable', 'InvalidAssociationID.NotFound',
-             'InvalidAttachment.NotFound', 'InvalidConversionTaskId',
-             'InvalidCustomerGateway.DuplicateIpAddress',
-             'InvalidCustomerGatewayID.NotFound', 'InvalidDevice.InUse',
-             'InvalidDhcpOptionsID.NotFound', 'InvalidFormat',
-             'InvalidFilter', 'InvalidGatewayID.NotFound',
-             'InvalidGroup.Duplicate', 'InvalidGroupId.Malformed',
-             'InvalidGroup.InUse', 'InvalidGroup.NotFound',
-             'InvalidGroup.Reserved', 'InvalidInstanceID.Malformed',
-             'InvalidInstanceID.NotFound',
-             'InvalidInternetGatewayID.NotFound', 'InvalidIPAddress.InUse',
-             'InvalidKeyPair.Duplicate', 'InvalidKeyPair.Format',
-             'InvalidKeyPair.NotFound', 'InvalidManifest',
-             'InvalidNetworkAclEntry.NotFound',
-             'InvalidNetworkAclID.NotFound', 'InvalidParameterCombination',
-             'InvalidParameterValue', 'InvalidPermission.Duplicate',
-             'InvalidPermission.Malformed', 'InvalidReservationID.Malformed',
-             'InvalidReservationID.NotFound', 'InvalidRoute.NotFound',
-             'InvalidRouteTableID.NotFound',
-             'InvalidSecurity.RequestHasExpired',
-             'InvalidSnapshotID.Malformed', 'InvalidSnapshot.NotFound',
-             'InvalidUserID.Malformed', 'InvalidReservedInstancesId',
-             'InvalidReservedInstancesOfferingId',
-             'InvalidSubnetID.NotFound', 'InvalidVolumeID.Duplicate',
-             'InvalidVolumeID.Malformed', 'InvalidVolumeID.ZoneMismatch',
-             'InvalidVolume.NotFound', 'InvalidVpcID.NotFound',
-             'InvalidVpnConnectionID.NotFound',
-             'InvalidVpnGatewayID.NotFound',
-             'InvalidZone.NotFound', 'LegacySecurityGroup',
-             'MissingParameter', 'NetworkAclEntryAlreadyExists',
-             'NetworkAclEntryLimitExceeded', 'NetworkAclLimitExceeded',
-             'NonEBSInstance', 'PendingSnapshotLimitExceeded',
-             'PendingVerification', 'OptInRequired', 'RequestLimitExceeded',
-             'ReservedInstancesLimitExceeded', 'Resource.AlreadyAssociated',
-             'ResourceLimitExceeded', 'RouteAlreadyExists',
-             'RouteLimitExceeded', 'RouteTableLimitExceeded',
-             'RulesPerSecurityGroupLimitExceeded',
-             'SecurityGroupLimitExceeded',
-             'SecurityGroupsPerInstanceLimitExceeded',
-             'SnapshotLimitExceeded', 'SubnetLimitExceeded',
-             'UnknownParameter', 'UnsupportedOperation',
-             'VolumeLimitExceeded', 'VpcLimitExceeded',
-             'VpnConnectionLimitExceeded',
-             'VpnGatewayAttachmentLimitExceeded', 'VpnGatewayLimitExceeded'):
-    _add_matcher_class(BotoTestCase.ec2_error_code.client,
-                       code, base=ClientError)
-
-for code in ('InsufficientAddressCapacity', 'InsufficientInstanceCapacity',
-             'InsufficientReservedInstanceCapacity', 'InternalError',
-             'Unavailable'):
-    _add_matcher_class(BotoTestCase.ec2_error_code.server,
-                       code, base=ServerError)
-
-
-for code in (('AccessDenied', 403),
-             ('AccountProblem', 403),
-             ('AmbiguousGrantByEmailAddress', 400),
-             ('BadDigest', 400),
-             ('BucketAlreadyExists', 409),
-             ('BucketAlreadyOwnedByYou', 409),
-             ('BucketNotEmpty', 409),
-             ('CredentialsNotSupported', 400),
-             ('CrossLocationLoggingProhibited', 403),
-             ('EntityTooSmall', 400),
-             ('EntityTooLarge', 400),
-             ('ExpiredToken', 400),
-             ('IllegalVersioningConfigurationException', 400),
-             ('IncompleteBody', 400),
-             ('IncorrectNumberOfFilesInPostRequest', 400),
-             ('InlineDataTooLarge', 400),
-             ('InvalidAccessKeyId', 403),
-             'InvalidAddressingHeader',
-             ('InvalidArgument', 400),
-             ('InvalidBucketName', 400),
-             ('InvalidBucketState', 409),
-             ('InvalidDigest', 400),
-             ('InvalidLocationConstraint', 400),
-             ('InvalidPart', 400),
-             ('InvalidPartOrder', 400),
-             ('InvalidPayer', 403),
-             ('InvalidPolicyDocument', 400),
-             ('InvalidRange', 416),
-             ('InvalidRequest', 400),
-             ('InvalidSecurity', 403),
-             ('InvalidSOAPRequest', 400),
-             ('InvalidStorageClass', 400),
-             ('InvalidTargetBucketForLogging', 400),
-             ('InvalidToken', 400),
-             ('InvalidURI', 400),
-             ('KeyTooLong', 400),
-             ('MalformedACLError', 400),
-             ('MalformedPOSTRequest', 400),
-             ('MalformedXML', 400),
-             ('MaxMessageLengthExceeded', 400),
-             ('MaxPostPreDataLengthExceededError', 400),
-             ('MetadataTooLarge', 400),
-             ('MethodNotAllowed', 405),
-             ('MissingAttachment'),
-             ('MissingContentLength', 411),
-             ('MissingRequestBodyError', 400),
-             ('MissingSecurityElement', 400),
-             ('MissingSecurityHeader', 400),
-             ('NoLoggingStatusForKey', 400),
-             ('NoSuchBucket', 404),
-             ('NoSuchKey', 404),
-             ('NoSuchLifecycleConfiguration', 404),
-             ('NoSuchUpload', 404),
-             ('NoSuchVersion', 404),
-             ('NotSignedUp', 403),
-             ('NotSuchBucketPolicy', 404),
-             ('OperationAborted', 409),
-             ('PermanentRedirect', 301),
-             ('PreconditionFailed', 412),
-             ('Redirect', 307),
-             ('RequestIsNotMultiPartContent', 400),
-             ('RequestTimeout', 400),
-             ('RequestTimeTooSkewed', 403),
-             ('RequestTorrentOfBucketError', 400),
-             ('SignatureDoesNotMatch', 403),
-             ('TemporaryRedirect', 307),
-             ('TokenRefreshRequired', 400),
-             ('TooManyBuckets', 400),
-             ('UnexpectedContent', 400),
-             ('UnresolvableGrantByEmailAddress', 400),
-             ('UserKeyMustBeSpecified', 400)):
-    _add_matcher_class(BotoTestCase.s3_error_code.client,
-                       code, base=ClientError)
-
-
-for code in (('InternalError', 500),
-             ('NotImplemented', 501),
-             ('ServiceUnavailable', 503),
-             ('SlowDown', 503)):
-    _add_matcher_class(BotoTestCase.s3_error_code.server,
-                       code, base=ServerError)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
deleted file mode 100644
index 49a1854..0000000
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from tempest.common.utils import data_utils
-from tempest.common.utils.linux import remote_client
-from tempest import config
-from tempest import exceptions
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-from tempest.thirdparty.boto.utils import s3
-from tempest.thirdparty.boto.utils import wait
-
-CONF = config.CONF
-
-LOG = logging.getLogger(__name__)
-
-
-class InstanceRunTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(InstanceRunTest, cls).setup_clients()
-        cls.s3_client = cls.os.s3_client
-        cls.ec2_client = cls.os.ec2api_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(InstanceRunTest, cls).resource_setup()
-        if not cls.conclusion['A_I_IMAGES_READY']:
-            raise cls.skipException("".join(("EC2 ", cls.__name__,
-                                    ": requires ami/aki/ari manifest")))
-        cls.zone = CONF.boto.aws_zone
-        cls.materials_path = CONF.boto.s3_materials_path
-        ami_manifest = CONF.boto.ami_manifest
-        aki_manifest = CONF.boto.aki_manifest
-        ari_manifest = CONF.boto.ari_manifest
-        cls.instance_type = CONF.boto.instance_type
-        cls.bucket_name = data_utils.rand_name("s3bucket")
-        cls.keypair_name = data_utils.rand_name("keypair")
-        cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
-        cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
-                               cls.keypair_name)
-        bucket = cls.s3_client.create_bucket(cls.bucket_name)
-        cls.addResourceCleanUp(cls.destroy_bucket,
-                               cls.s3_client.connection_data,
-                               cls.bucket_name)
-        s3.s3_upload_dir(bucket, cls.materials_path)
-        cls.images = {"ami":
-                      {"name": data_utils.rand_name("ami-name"),
-                       "location": cls.bucket_name + "/" + ami_manifest},
-                      "aki":
-                      {"name": data_utils.rand_name("aki-name"),
-                       "location": cls.bucket_name + "/" + aki_manifest},
-                      "ari":
-                      {"name": data_utils.rand_name("ari-name"),
-                       "location": cls.bucket_name + "/" + ari_manifest}}
-        for image_type in ("aki", "ari"):
-            image = cls.images[image_type]
-            image["image_id"] = cls.ec2_client.register_image(
-                name=image["name"],
-                image_location=image["location"])
-            cls.addResourceCleanUp(cls.ec2_client.deregister_image,
-                                   image["image_id"])
-        image = cls.images["ami"]
-        image["image_id"] = cls.ec2_client.register_image(
-            name=image["name"],
-            image_location=image["location"],
-            kernel_id=cls.images["aki"]["image_id"],
-            ramdisk_id=cls.images["ari"]["image_id"])
-        cls.addResourceCleanUp(cls.ec2_client.deregister_image,
-                               image["image_id"])
-
-        for image in cls.images.itervalues():
-            def _state():
-                retr = cls.ec2_client.get_image(image["image_id"])
-                return retr.state
-            state = wait.state_wait(_state, "available")
-            if state != "available":
-                for _image in cls.images.itervalues():
-                    cls.ec2_client.deregister_image(_image["image_id"])
-                raise exceptions.EC2RegisterImageException(
-                    image_id=image["image_id"])
-
-    def _terminate_reservation(self, reservation, rcuk):
-        for instance in reservation.instances:
-            instance.terminate()
-        for instance in reservation.instances:
-            self.assertInstanceStateWait(instance, '_GONE')
-        self.cancelResourceCleanUp(rcuk)
-
-    @test.idempotent_id('c881fbb7-d56e-4054-9d76-1c3a60a207b0')
-    def test_run_idempotent_instances(self):
-        # EC2 run instances idempotently
-
-        def _run_instance(client_token):
-            reservation = self.ec2_client.run_instances(
-                image_id=self.images["ami"]["image_id"],
-                kernel_id=self.images["aki"]["image_id"],
-                ramdisk_id=self.images["ari"]["image_id"],
-                instance_type=self.instance_type,
-                client_token=client_token)
-            rcuk = self.addResourceCleanUp(self.destroy_reservation,
-                                           reservation)
-            return (reservation, rcuk)
-
-        reservation_1, rcuk_1 = _run_instance('token_1')
-        reservation_2, rcuk_2 = _run_instance('token_2')
-        reservation_1a, rcuk_1a = _run_instance('token_1')
-
-        self.assertIsNotNone(reservation_1)
-        self.assertIsNotNone(reservation_2)
-        self.assertIsNotNone(reservation_1a)
-
-        # same reservation for token_1
-        self.assertEqual(reservation_1.id, reservation_1a.id)
-
-        # Cancel cleanup -- since it's a duplicate, it's
-        # handled by rcuk1
-        self.cancelResourceCleanUp(rcuk_1a)
-
-        self._terminate_reservation(reservation_1, rcuk_1)
-        self._terminate_reservation(reservation_2, rcuk_2)
-
-    @test.idempotent_id('2ea26a39-f96c-48fc-8374-5c10ec184c67')
-    def test_run_stop_terminate_instance(self):
-        # EC2 run, stop and terminate instance
-        image_ami = self.ec2_client.get_image(self.images["ami"]
-                                              ["image_id"])
-        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
-                                    ramdisk_id=self.images["ari"]["image_id"],
-                                    instance_type=self.instance_type)
-        rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
-
-        for instance in reservation.instances:
-            LOG.info("state: %s", instance.state)
-            if instance.state != "running":
-                self.assertInstanceStateWait(instance, "running")
-
-        for instance in reservation.instances:
-            instance.stop()
-            LOG.info("state: %s", instance.state)
-            if instance.state != "stopped":
-                self.assertInstanceStateWait(instance, "stopped")
-
-        self._terminate_reservation(reservation, rcuk)
-
-    @test.idempotent_id('3d77225a-5cec-4e54-a017-9ebf11a266e6')
-    def test_run_stop_terminate_instance_with_tags(self):
-        # EC2 run, stop and terminate instance with tags
-        image_ami = self.ec2_client.get_image(self.images["ami"]
-                                              ["image_id"])
-        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
-                                    ramdisk_id=self.images["ari"]["image_id"],
-                                    instance_type=self.instance_type)
-        rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
-
-        for instance in reservation.instances:
-            LOG.info("state: %s", instance.state)
-            if instance.state != "running":
-                self.assertInstanceStateWait(instance, "running")
-            instance.add_tag('key1', value='value1')
-
-        tags = self.ec2_client.get_all_tags()
-        td = {item.name: item.value for item in tags}
-
-        self.assertIn('key1', td)
-        self.assertEqual('value1', td['key1'])
-
-        tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
-        td = {item.name: item.value for item in tags}
-        self.assertIn('key1', td)
-        self.assertEqual('value1', td['key1'])
-
-        tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
-        td = {item.name: item.value for item in tags}
-        self.assertIn('key1', td)
-        self.assertEqual('value1', td['key1'])
-
-        tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
-        td = {item.name: item.value for item in tags}
-        self.assertNotIn('key1', td)
-
-        for instance in reservation.instances:
-            instance.remove_tag('key1', value='value1')
-
-        tags = self.ec2_client.get_all_tags()
-
-        # NOTE: Volume-attach and detach causes metadata (tags) to be created
-        # for the volume. So exclude them while asserting.
-        self.assertNotIn('key1', tags)
-
-        for instance in reservation.instances:
-            instance.stop()
-            LOG.info("state: %s", instance.state)
-            if instance.state != "stopped":
-                self.assertInstanceStateWait(instance, "stopped")
-
-        self._terminate_reservation(reservation, rcuk)
-
-    @test.idempotent_id('252945b5-3294-4fda-ae21-928a42f63f76')
-    def test_run_terminate_instance(self):
-        # EC2 run, terminate immediately
-        image_ami = self.ec2_client.get_image(self.images["ami"]
-                                              ["image_id"])
-        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
-                                    ramdisk_id=self.images["ari"]["image_id"],
-                                    instance_type=self.instance_type)
-
-        for instance in reservation.instances:
-            instance.terminate()
-        self.assertInstanceStateWait(instance, '_GONE')
-
-    @test.idempotent_id('ab836c29-737b-4101-9fb9-87045eaf89e9')
-    def test_compute_with_volumes(self):
-        # EC2 1. integration test (not strict)
-        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
-        sec_group_name = data_utils.rand_name("securitygroup")
-        group_desc = sec_group_name + " security group description "
-        security_group = self.ec2_client.create_security_group(sec_group_name,
-                                                               group_desc)
-        self.addResourceCleanUp(self.destroy_security_group_wait,
-                                security_group)
-        self.assertTrue(
-            self.ec2_client.authorize_security_group(
-                sec_group_name,
-                ip_protocol="icmp",
-                cidr_ip="0.0.0.0/0",
-                from_port=-1,
-                to_port=-1))
-        self.assertTrue(
-            self.ec2_client.authorize_security_group(
-                sec_group_name,
-                ip_protocol="tcp",
-                cidr_ip="0.0.0.0/0",
-                from_port=22,
-                to_port=22))
-        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
-                                    ramdisk_id=self.images["ari"]["image_id"],
-                                    instance_type=self.instance_type,
-                                    key_name=self.keypair_name,
-                                    security_groups=(sec_group_name,))
-
-        LOG.debug("Instance booted - state: %s",
-                  reservation.instances[0].state)
-
-        self.addResourceCleanUp(self.destroy_reservation,
-                                reservation)
-        volume = self.ec2_client.create_volume(CONF.volume.volume_size,
-                                               self.zone)
-        LOG.debug("Volume created - status: %s", volume.status)
-
-        self.addResourceCleanUp(self.destroy_volume_wait, volume)
-        instance = reservation.instances[0]
-        if instance.state != "running":
-            self.assertInstanceStateWait(instance, "running")
-        LOG.debug("Instance now running - state: %s", instance.state)
-
-        address = self.ec2_client.allocate_address()
-        rcuk_a = self.addResourceCleanUp(address.delete)
-        self.assertTrue(address.associate(instance.id))
-
-        rcuk_da = self.addResourceCleanUp(address.disassociate)
-        # TODO(afazekas): ping test. dependecy/permission ?
-
-        self.assertVolumeStatusWait(volume, "available")
-        # NOTE(afazekas): it may be reports available before it is available
-
-        ssh = remote_client.RemoteClient(address.public_ip,
-                                         CONF.compute.ssh_user,
-                                         pkey=self.keypair.material)
-        text = data_utils.rand_name("Pattern text for console output")
-        try:
-            resp = ssh.write_to_console(text)
-        except Exception:
-            if not CONF.compute_feature_enabled.console_output:
-                LOG.debug('Console output not supported, cannot log')
-            else:
-                console_output = instance.get_console_output().output
-                LOG.debug('Console output for %s\nbody=\n%s',
-                          instance.id, console_output)
-            raise
-
-        self.assertFalse(resp)
-
-        def _output():
-            output = instance.get_console_output()
-            return output.output
-
-        wait.re_search_wait(_output, text)
-        part_lines = ssh.get_partitions().split('\n')
-        volume.attach(instance.id, "/dev/vdh")
-
-        def _volume_state():
-            """Return volume state realizing that 'in-use' is overloaded."""
-            volume.update(validate=True)
-            status = volume.status
-            attached = volume.attach_data.status
-            LOG.debug("Volume %s is in status: %s, attach_status: %s",
-                      volume.id, status, attached)
-            # Nova reports 'in-use' on 'attaching' volumes because we
-            # have a single volume status, and EC2 has 2. Ensure that
-            # if we aren't attached yet we return something other than
-            # 'in-use'
-            if status == 'in-use' and attached != 'attached':
-                return 'attaching'
-            else:
-                return status
-
-        wait.re_search_wait(_volume_state, "in-use")
-
-        # NOTE(afazekas):  Different Hypervisor backends names
-        # differently the devices,
-        # now we just test is the partition number increased/decrised
-
-        def _part_state():
-            current = ssh.get_partitions().split('\n')
-            LOG.debug("Partition map for instance: %s", current)
-            if current > part_lines:
-                return 'INCREASE'
-            if current < part_lines:
-                return 'DECREASE'
-            return 'EQUAL'
-
-        wait.state_wait(_part_state, 'INCREASE')
-        part_lines = ssh.get_partitions().split('\n')
-
-        # TODO(afazekas): Resource compare to the flavor settings
-
-        volume.detach()
-
-        self.assertVolumeStatusWait(volume, "available")
-
-        wait.state_wait(_part_state, 'DECREASE')
-
-        instance.stop()
-        address.disassociate()
-        self.assertAddressDisassociatedWait(address)
-        self.cancelResourceCleanUp(rcuk_da)
-        address.release()
-        self.assertAddressReleasedWait(address)
-        self.cancelResourceCleanUp(rcuk_a)
-
-        LOG.debug("Instance %s state: %s", instance.id, instance.state)
-        if instance.state != "stopped":
-            self.assertInstanceStateWait(instance, "stopped")
-        # TODO(afazekas): move steps from teardown to the test case
-
-
-# TODO(afazekas): Snapshot/volume read/write test case
diff --git a/tempest/thirdparty/boto/test_ec2_keys.py b/tempest/thirdparty/boto/test_ec2_keys.py
deleted file mode 100644
index 1b58cb4..0000000
--- a/tempest/thirdparty/boto/test_ec2_keys.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-def compare_key_pairs(a, b):
-    return (a.name == b.name and
-            a.fingerprint == b.fingerprint)
-
-
-class EC2KeysTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(EC2KeysTest, cls).setup_clients()
-        cls.client = cls.os.ec2api_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(EC2KeysTest, cls).resource_setup()
-        cls.ec = cls.ec2_error_code
-
-# TODO(afazekas): merge create, delete, get test cases
-    @test.idempotent_id('54236804-01b7-4cfe-a6f9-bce1340feec8')
-    def test_create_ec2_keypair(self):
-        # EC2 create KeyPair
-        key_name = data_utils.rand_name("keypair")
-        self.addResourceCleanUp(self.client.delete_key_pair, key_name)
-        keypair = self.client.create_key_pair(key_name)
-        self.assertTrue(compare_key_pairs(keypair,
-                        self.client.get_key_pair(key_name)))
-
-    @test.idempotent_id('3283b898-f90c-4952-b238-3e42b8c3f34f')
-    def test_delete_ec2_keypair(self):
-        # EC2 delete KeyPair
-        key_name = data_utils.rand_name("keypair")
-        self.client.create_key_pair(key_name)
-        self.client.delete_key_pair(key_name)
-        self.assertIsNone(self.client.get_key_pair(key_name))
-
-    @test.idempotent_id('fd89bd26-4d4d-4cf3-a303-65dd9158fcdc')
-    def test_get_ec2_keypair(self):
-        # EC2 get KeyPair
-        key_name = data_utils.rand_name("keypair")
-        self.addResourceCleanUp(self.client.delete_key_pair, key_name)
-        keypair = self.client.create_key_pair(key_name)
-        self.assertTrue(compare_key_pairs(keypair,
-                        self.client.get_key_pair(key_name)))
-
-    @test.idempotent_id('daa73da1-e11c-4558-8d76-a716be79a401')
-    def test_duplicate_ec2_keypair(self):
-        # EC2 duplicate KeyPair
-        key_name = data_utils.rand_name("keypair")
-        self.addResourceCleanUp(self.client.delete_key_pair, key_name)
-        keypair = self.client.create_key_pair(key_name)
-        self.assertBotoError(self.ec.client.InvalidKeyPair.Duplicate,
-                             self.client.create_key_pair,
-                             key_name)
-        self.assertTrue(compare_key_pairs(keypair,
-                        self.client.get_key_pair(key_name)))
diff --git a/tempest/thirdparty/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
deleted file mode 100644
index 594dc8b..0000000
--- a/tempest/thirdparty/boto/test_ec2_security_groups.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class EC2SecurityGroupTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(EC2SecurityGroupTest, cls).setup_clients()
-        cls.client = cls.os.ec2api_client
-
-    @test.idempotent_id('519b566e-0c38-4629-905e-7d6b6355f524')
-    def test_create_authorize_security_group(self):
-        # EC2 Create, authorize/revoke security group
-        group_name = data_utils.rand_name("securty_group")
-        group_description = group_name + " security group description "
-        group = self.client.create_security_group(group_name,
-                                                  group_description)
-        self.addResourceCleanUp(self.client.delete_security_group, group_name)
-        groups_get = self.client.get_all_security_groups(
-            groupnames=(group_name,))
-        self.assertEqual(len(groups_get), 1)
-        group_get = groups_get[0]
-        self.assertEqual(group.name, group_get.name)
-        self.assertEqual(group.name, group_get.name)
-        # ping (icmp_echo) and other icmp allowed from everywhere
-        # from_port and to_port act as icmp type
-        success = self.client.authorize_security_group(group_name,
-                                                       ip_protocol="icmp",
-                                                       cidr_ip="0.0.0.0/0",
-                                                       from_port=-1,
-                                                       to_port=-1)
-        self.assertTrue(success)
-        # allow standard ssh port from anywhere
-        success = self.client.authorize_security_group(group_name,
-                                                       ip_protocol="tcp",
-                                                       cidr_ip="0.0.0.0/0",
-                                                       from_port=22,
-                                                       to_port=22)
-        self.assertTrue(success)
-        # TODO(afazekas): Duplicate tests
-        group_get = self.client.get_all_security_groups(
-            groupnames=(group_name,))[0]
-        # remove listed rules
-        for ip_permission in group_get.rules:
-            for cidr in ip_permission.grants:
-                self.assertTrue(self.client.revoke_security_group(group_name,
-                                ip_protocol=ip_permission.ip_protocol,
-                                cidr_ip=cidr,
-                                from_port=ip_permission.from_port,
-                                to_port=ip_permission.to_port))
-
-        group_get = self.client.get_all_security_groups(
-            groupnames=(group_name,))[0]
-        # all rules should be removed now
-        self.assertEqual(0, len(group_get.rules))
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
deleted file mode 100644
index 483d4c3..0000000
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from tempest import config
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def compare_volumes(a, b):
-    return (a.id == b.id and
-            a.size == b.size)
-
-
-class EC2VolumesTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def skip_checks(cls):
-        super(EC2VolumesTest, cls).skip_checks()
-        if not CONF.service_available.cinder:
-            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
-            raise cls.skipException(skip_msg)
-
-    @classmethod
-    def setup_clients(cls):
-        super(EC2VolumesTest, cls).setup_clients()
-        cls.client = cls.os.ec2api_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(EC2VolumesTest, cls).resource_setup()
-        cls.zone = CONF.boto.aws_zone
-
-    @test.idempotent_id('663f0077-c743-48ad-8ae0-46821cbc0918')
-    def test_create_get_delete(self):
-        # EC2 Create, get, delete Volume
-        volume = self.client.create_volume(CONF.volume.volume_size, self.zone)
-        cuk = self.addResourceCleanUp(self.client.delete_volume, volume.id)
-        self.assertIn(volume.status, self.valid_volume_status)
-        retrieved = self.client.get_all_volumes((volume.id,))
-        self.assertEqual(1, len(retrieved))
-        self.assertTrue(compare_volumes(volume, retrieved[0]))
-        self.assertVolumeStatusWait(volume, "available")
-        self.client.delete_volume(volume.id)
-        self.cancelResourceCleanUp(cuk)
-
-    @test.idempotent_id('c6b60d7a-1af7-4f8e-af21-d539d9496149')
-    def test_create_volume_from_snapshot(self):
-        # EC2 Create volume from snapshot
-        volume = self.client.create_volume(CONF.volume.volume_size, self.zone)
-        self.addResourceCleanUp(self.client.delete_volume, volume.id)
-        self.assertVolumeStatusWait(volume, "available")
-        snap = self.client.create_snapshot(volume.id)
-        self.addResourceCleanUp(self.destroy_snapshot_wait, snap)
-        self.assertSnapshotStatusWait(snap, "completed")
-
-        svol = self.client.create_volume(CONF.volume.volume_size, self.zone,
-                                         snapshot=snap)
-        cuk = self.addResourceCleanUp(svol.delete)
-        self.assertVolumeStatusWait(svol, "available")
-        svol.delete()
-        self.cancelResourceCleanUp(cuk)
diff --git a/tempest/thirdparty/boto/test_s3_buckets.py b/tempest/thirdparty/boto/test_s3_buckets.py
deleted file mode 100644
index f008973..0000000
--- a/tempest/thirdparty/boto/test_s3_buckets.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class S3BucketsTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(S3BucketsTest, cls).setup_clients()
-        cls.client = cls.os.s3_client
-
-    @test.idempotent_id('4678525d-8da0-4518-81c1-f1f67d595b00')
-    def test_create_and_get_delete_bucket(self):
-        # S3 Create, get and delete bucket
-        bucket_name = data_utils.rand_name("s3bucket")
-        cleanup_key = self.addResourceCleanUp(self.client.delete_bucket,
-                                              bucket_name)
-        bucket = self.client.create_bucket(bucket_name)
-        self.assertTrue(bucket.name == bucket_name)
-        bucket = self.client.get_bucket(bucket_name)
-        self.assertTrue(bucket.name == bucket_name)
-        self.client.delete_bucket(bucket_name)
-        self.assertBotoError(self.s3_error_code.client.NoSuchBucket,
-                             self.client.get_bucket, bucket_name)
-        self.cancelResourceCleanUp(cleanup_key)
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
deleted file mode 100644
index c41c7ac..0000000
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from tempest.common.utils import data_utils
-from tempest import config
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-from tempest.thirdparty.boto.utils import s3
-
-CONF = config.CONF
-
-
-class S3ImagesTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(S3ImagesTest, cls).setup_clients()
-        cls.s3_client = cls.os.s3_client
-        cls.images_client = cls.os.ec2api_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(S3ImagesTest, cls).resource_setup()
-        if not cls.conclusion['A_I_IMAGES_READY']:
-            raise cls.skipException("".join(("EC2 ", cls.__name__,
-                                    ": requires ami/aki/ari manifest")))
-        cls.materials_path = CONF.boto.s3_materials_path
-        cls.ami_manifest = CONF.boto.ami_manifest
-        cls.aki_manifest = CONF.boto.aki_manifest
-        cls.ari_manifest = CONF.boto.ari_manifest
-        cls.ami_path = cls.materials_path + os.sep + cls.ami_manifest
-        cls.aki_path = cls.materials_path + os.sep + cls.aki_manifest
-        cls.ari_path = cls.materials_path + os.sep + cls.ari_manifest
-        cls.bucket_name = data_utils.rand_name("bucket")
-        bucket = cls.s3_client.create_bucket(cls.bucket_name)
-        cls.addResourceCleanUp(cls.destroy_bucket,
-                               cls.s3_client.connection_data,
-                               cls.bucket_name)
-        s3.s3_upload_dir(bucket, cls.materials_path)
-
-    @test.idempotent_id('f9d360a5-0188-4c77-9db2-4c34c28d12a5')
-    def test_register_get_deregister_ami_image(self):
-        # Register and deregister ami image
-        image = {"name": data_utils.rand_name("ami-name"),
-                 "location": self.bucket_name + "/" + self.ami_manifest,
-                 "type": "ami"}
-        image["image_id"] = self.images_client.register_image(
-            name=image["name"],
-            image_location=image["location"])
-        # NOTE(afazekas): delete_snapshot=True might trigger boto lib? bug
-        image["cleanUp"] = self.addResourceCleanUp(
-            self.images_client.deregister_image,
-            image["image_id"])
-        self.assertEqual(image["image_id"][0:3], image["type"])
-        retrieved_image = self.images_client.get_image(image["image_id"])
-        self.assertTrue(retrieved_image.name == image["name"])
-        self.assertTrue(retrieved_image.id == image["image_id"])
-        if retrieved_image.state != "available":
-            self.assertImageStateWait(retrieved_image, "available")
-        self.images_client.deregister_image(image["image_id"])
-        self.assertNotIn(image["image_id"], str(
-            self.images_client.get_all_images()))
-        self.cancelResourceCleanUp(image["cleanUp"])
-
-    @test.idempotent_id('42cca5b0-453b-4618-b99f-dbc039db426f')
-    def test_register_get_deregister_aki_image(self):
-        # Register and deregister aki image
-        image = {"name": data_utils.rand_name("aki-name"),
-                 "location": self.bucket_name + "/" + self.aki_manifest,
-                 "type": "aki"}
-        image["image_id"] = self.images_client.register_image(
-            name=image["name"],
-            image_location=image["location"])
-        image["cleanUp"] = self.addResourceCleanUp(
-            self.images_client.deregister_image,
-            image["image_id"])
-        self.assertEqual(image["image_id"][0:3], image["type"])
-        retrieved_image = self.images_client.get_image(image["image_id"])
-        self.assertTrue(retrieved_image.name == image["name"])
-        self.assertTrue(retrieved_image.id == image["image_id"])
-        self.assertIn(retrieved_image.state, self.valid_image_state)
-        if retrieved_image.state != "available":
-            self.assertImageStateWait(retrieved_image, "available")
-        self.images_client.deregister_image(image["image_id"])
-        self.assertNotIn(image["image_id"], str(
-            self.images_client.get_all_images()))
-        self.cancelResourceCleanUp(image["cleanUp"])
-
-    @test.idempotent_id('1359e860-841c-43bb-80f3-bb389cbfd81d')
-    def test_register_get_deregister_ari_image(self):
-        # Register and deregister ari image
-        image = {"name": data_utils.rand_name("ari-name"),
-                 "location": "/" + self.bucket_name + "/" + self.ari_manifest,
-                 "type": "ari"}
-        image["image_id"] = self.images_client.register_image(
-            name=image["name"],
-            image_location=image["location"])
-        image["cleanUp"] = self.addResourceCleanUp(
-            self.images_client.deregister_image,
-            image["image_id"])
-        self.assertEqual(image["image_id"][0:3], image["type"])
-        retrieved_image = self.images_client.get_image(image["image_id"])
-        self.assertIn(retrieved_image.state, self.valid_image_state)
-        if retrieved_image.state != "available":
-            self.assertImageStateWait(retrieved_image, "available")
-        self.assertIn(retrieved_image.state, self.valid_image_state)
-        self.assertTrue(retrieved_image.name == image["name"])
-        self.assertTrue(retrieved_image.id == image["image_id"])
-        self.images_client.deregister_image(image["image_id"])
-        self.cancelResourceCleanUp(image["cleanUp"])
-
-# TODO(afazekas): less copy-paste style
diff --git a/tempest/thirdparty/boto/test_s3_objects.py b/tempest/thirdparty/boto/test_s3_objects.py
deleted file mode 100644
index c42d85c..0000000
--- a/tempest/thirdparty/boto/test_s3_objects.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import boto.s3.key
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class S3BucketsTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(S3BucketsTest, cls).setup_clients()
-        cls.client = cls.os.s3_client
-
-    @test.idempotent_id('4eea567a-b46a-405b-a475-6097e1faebde')
-    def test_create_get_delete_object(self):
-        # S3 Create, get and delete object
-        bucket_name = data_utils.rand_name("s3bucket")
-        object_name = data_utils.rand_name("s3object")
-        content = 'x' * 42
-        bucket = self.client.create_bucket(bucket_name)
-        self.addResourceCleanUp(self.destroy_bucket,
-                                self.client.connection_data,
-                                bucket_name)
-
-        self.assertTrue(bucket.name == bucket_name)
-        with contextlib.closing(boto.s3.key.Key(bucket)) as key:
-            key.key = object_name
-            key.set_contents_from_string(content)
-            readback = key.get_contents_as_string()
-            self.assertTrue(readback == content)
-            bucket.delete_key(key)
-            self.assertBotoError(self.s3_error_code.client.NoSuchKey,
-                                 key.get_contents_as_string)
diff --git a/tempest/thirdparty/boto/utils/__init__.py b/tempest/thirdparty/boto/utils/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/thirdparty/boto/utils/__init__.py
+++ /dev/null
diff --git a/tempest/thirdparty/boto/utils/s3.py b/tempest/thirdparty/boto/utils/s3.py
deleted file mode 100644
index 55c1b0a..0000000
--- a/tempest/thirdparty/boto/utils/s3.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import os
-import re
-
-import boto
-import boto.s3.key
-
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def s3_upload_dir(bucket, path, prefix="", connection_data=None):
-    if isinstance(bucket, basestring):
-        with contextlib.closing(boto.connect_s3(**connection_data)) as conn:
-            bucket = conn.lookup(bucket)
-    for root, dirs, files in os.walk(path):
-        for fil in files:
-            with contextlib.closing(boto.s3.key.Key(bucket)) as key:
-                source = root + os.sep + fil
-                target = re.sub("^" + re.escape(path) + "?/", prefix, source)
-                if os.sep != '/':
-                    target = re.sub(re.escape(os.sep), '/', target)
-                key.key = target
-                LOG.info("Uploading %s to %s/%s", source, bucket.name, target)
-                key.set_contents_from_filename(source)
diff --git a/tempest/thirdparty/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
deleted file mode 100644
index 8771ed7..0000000
--- a/tempest/thirdparty/boto/utils/wait.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-import time
-
-import boto.exception
-from oslo_log import log as logging
-import testtools
-
-from tempest import config
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def state_wait(lfunction, final_set=set(), valid_set=None):
-    # TODO(afazekas): evaluate using ABC here
-    if not isinstance(final_set, set):
-        final_set = set((final_set,))
-    if not isinstance(valid_set, set) and valid_set is not None:
-        valid_set = set((valid_set,))
-    start_time = time.time()
-    old_status = status = lfunction()
-    while True:
-        if status != old_status:
-            LOG.info('State transition "%s" ==> "%s" %d second', old_status,
-                     status, time.time() - start_time)
-        if status in final_set:
-            return status
-        if valid_set is not None and status not in valid_set:
-            return status
-        dtime = time.time() - start_time
-        if dtime > CONF.boto.build_timeout:
-            raise testtools.TestCase\
-                .failureException("State change timeout exceeded!"
-                                  '(%ds) While waiting'
-                                  'for %s at "%s"' %
-                                  (dtime, final_set, status))
-        time.sleep(CONF.boto.build_interval)
-        old_status = status
-        status = lfunction()
-
-
-def re_search_wait(lfunction, regexp):
-    """Stops waiting on success."""
-    start_time = time.time()
-    while True:
-        text = lfunction()
-        result = re.search(regexp, text)
-        if result is not None:
-            LOG.info('Pattern "%s" found in %d second in "%s"',
-                     regexp,
-                     time.time() - start_time,
-                     text)
-            return result
-        dtime = time.time() - start_time
-        if dtime > CONF.boto.build_timeout:
-            raise testtools.TestCase\
-                .failureException('Pattern find timeout exceeded!'
-                                  '(%ds) While waiting for'
-                                  '"%s" pattern in "%s"' %
-                                  (dtime, regexp, text))
-        time.sleep(CONF.boto.build_interval)
-
-
-def wait_no_exception(lfunction, exc_class=None, exc_matcher=None):
-    """Stops waiting on success."""
-    start_time = time.time()
-    if exc_matcher is not None:
-        exc_class = boto.exception.BotoServerError
-
-    if exc_class is None:
-        exc_class = BaseException
-    while True:
-        result = None
-        try:
-            result = lfunction()
-            LOG.info('No Exception in %d second',
-                     time.time() - start_time)
-            return result
-        except exc_class as exc:
-            if exc_matcher is not None:
-                res = exc_matcher.match(exc)
-                if res is not None:
-                    LOG.info(res)
-                    raise exc
-        # Let the other exceptions propagate
-        dtime = time.time() - start_time
-        if dtime > CONF.boto.build_timeout:
-            raise testtools.TestCase\
-                .failureException("Wait timeout exceeded! (%ds)" % dtime)
-        time.sleep(CONF.boto.build_interval)
-
-
-# NOTE(afazekas): EC2/boto normally raise exception instead of empty list
-def wait_exception(lfunction):
-    """Returns with the exception or raises one."""
-    start_time = time.time()
-    while True:
-        try:
-            lfunction()
-        except BaseException as exc:
-            LOG.info('Exception in %d second',
-                     time.time() - start_time)
-            return exc
-        dtime = time.time() - start_time
-        if dtime > CONF.boto.build_timeout:
-            raise testtools.TestCase\
-                .failureException("Wait timeout exceeded! (%ds)" % dtime)
-        time.sleep(CONF.boto.build_interval)
-
-# TODO(afazekas): consider strategy design pattern..
diff --git a/test-requirements.txt b/test-requirements.txt
index db2b2ce..eb43f31 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,10 +3,10 @@
 # process, which may cause wedges in the gate later.
 hacking<0.11,>=0.10.0
 # needed for doc build
-sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
-python-subunit>=0.0.18
-oslosphinx>=2.5.0 # Apache-2.0
-mox>=0.5.3
-mock>=1.2
-coverage>=3.6
+sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
+python-subunit>=0.0.18 # Apache-2.0/BSD
+oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
+mox>=0.5.3 # Apache-2.0
+mock>=1.2 # BSD
+coverage>=3.6 # Apache-2.0
 oslotest>=1.10.0 # Apache-2.0
diff --git a/tools/check_logs.py b/tools/check_logs.py
index c8d3a1a..fa7129d 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -19,7 +19,7 @@
 import gzip
 import os
 import re
-import StringIO
+import six
 import sys
 import urllib2
 
@@ -71,7 +71,7 @@
         req = urllib2.Request(url)
         req.add_header('Accept-Encoding', 'gzip')
         page = urllib2.urlopen(req)
-        buf = StringIO.StringIO(page.read())
+        buf = six.StringIO(page.read())
         f = gzip.GzipFile(fileobj=buf)
         if scan_content(name, f.read().splitlines(), regexp, whitelist):
             logs_with_errors.append(name)
@@ -105,7 +105,7 @@
 def main(opts):
     if opts.directory and opts.url or not (opts.directory or opts.url):
         print("Must provide exactly one of -d or -u")
-        exit(1)
+        return 1
     print("Checking logs...")
     WHITELIST_FILE = os.path.join(
         os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
diff --git a/tools/check_uuid.py b/tools/check_uuid.py
deleted file mode 100755
index e21c3d8..0000000
--- a/tools/check_uuid.py
+++ /dev/null
@@ -1,360 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2014 Mirantis, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import argparse
-import ast
-import importlib
-import inspect
-import os
-import sys
-import unittest
-import urllib
-import uuid
-
-DECORATOR_MODULE = 'test'
-DECORATOR_NAME = 'idempotent_id'
-DECORATOR_IMPORT = 'tempest.%s' % DECORATOR_MODULE
-IMPORT_LINE = 'from tempest import %s' % DECORATOR_MODULE
-DECORATOR_TEMPLATE = "@%s.%s('%%s')" % (DECORATOR_MODULE,
-                                        DECORATOR_NAME)
-UNIT_TESTS_EXCLUDE = 'tempest.tests'
-
-
-class SourcePatcher(object):
-
-    """"Lazy patcher for python source files"""
-
-    def __init__(self):
-        self.source_files = None
-        self.patches = None
-        self.clear()
-
-    def clear(self):
-        """Clear inner state"""
-        self.source_files = {}
-        self.patches = {}
-
-    @staticmethod
-    def _quote(s):
-        return urllib.quote(s)
-
-    @staticmethod
-    def _unquote(s):
-        return urllib.unquote(s)
-
-    def add_patch(self, filename, patch, line_no):
-        """Add lazy patch"""
-        if filename not in self.source_files:
-            with open(filename) as f:
-                self.source_files[filename] = self._quote(f.read())
-        patch_id = str(uuid.uuid4())
-        if not patch.endswith('\n'):
-            patch += '\n'
-        self.patches[patch_id] = self._quote(patch)
-        lines = self.source_files[filename].split(self._quote('\n'))
-        lines[line_no - 1] = ''.join(('{%s:s}' % patch_id, lines[line_no - 1]))
-        self.source_files[filename] = self._quote('\n').join(lines)
-
-    def _save_changes(self, filename, source):
-        print('%s fixed' % filename)
-        with open(filename, 'w') as f:
-            f.write(source)
-
-    def apply_patches(self):
-        """Apply all patches"""
-        for filename in self.source_files:
-            patched_source = self._unquote(
-                self.source_files[filename].format(**self.patches)
-            )
-            self._save_changes(filename, patched_source)
-        self.clear()
-
-
-class TestChecker(object):
-
-    def __init__(self, package):
-        self.package = package
-        self.base_path = os.path.abspath(os.path.dirname(package.__file__))
-
-    def _path_to_package(self, path):
-        relative_path = path[len(self.base_path) + 1:]
-        if relative_path:
-            return '.'.join((self.package.__name__,) +
-                            tuple(relative_path.split('/')))
-        else:
-            return self.package.__name__
-
-    def _modules_search(self):
-        """Recursive search for python modules in base package"""
-        modules = []
-        for root, dirs, files in os.walk(self.base_path):
-            if not os.path.exists(os.path.join(root, '__init__.py')):
-                continue
-            root_package = self._path_to_package(root)
-            for item in files:
-                if item.endswith('.py'):
-                    module_name = '.'.join((root_package,
-                                           os.path.splitext(item)[0]))
-                    if not module_name.startswith(UNIT_TESTS_EXCLUDE):
-                        modules.append(module_name)
-        return modules
-
-    @staticmethod
-    def _get_idempotent_id(test_node):
-        """
-        Return key-value dict with all metadata from @test.idempotent_id
-        decorators for test method
-        """
-        idempotent_id = None
-        for decorator in test_node.decorator_list:
-            if (hasattr(decorator, 'func') and
-                hasattr(decorator.func, 'attr') and
-                decorator.func.attr == DECORATOR_NAME and
-                hasattr(decorator.func, 'value') and
-                decorator.func.value.id == DECORATOR_MODULE):
-                for arg in decorator.args:
-                    idempotent_id = ast.literal_eval(arg)
-        return idempotent_id
-
-    @staticmethod
-    def _is_decorator(line):
-        return line.strip().startswith('@')
-
-    @staticmethod
-    def _is_def(line):
-        return line.strip().startswith('def ')
-
-    def _add_uuid_to_test(self, patcher, test_node, source_path):
-        with open(source_path) as src:
-            src_lines = src.read().split('\n')
-        lineno = test_node.lineno
-        insert_position = lineno
-        while True:
-            if (self._is_def(src_lines[lineno - 1]) or
-                    (self._is_decorator(src_lines[lineno - 1]) and
-                        (DECORATOR_TEMPLATE.split('(')[0] <=
-                            src_lines[lineno - 1].strip().split('(')[0]))):
-                insert_position = lineno
-                break
-            lineno += 1
-        patcher.add_patch(
-            source_path,
-            ' ' * test_node.col_offset + DECORATOR_TEMPLATE % uuid.uuid4(),
-            insert_position
-        )
-
-    @staticmethod
-    def _is_test_case(module, node):
-        if (node.__class__ is ast.ClassDef and
-                hasattr(module, node.name) and
-                inspect.isclass(getattr(module, node.name))):
-            return issubclass(getattr(module, node.name), unittest.TestCase)
-
-    @staticmethod
-    def _is_test_method(node):
-        return (node.__class__ is ast.FunctionDef
-                and node.name.startswith('test_'))
-
-    @staticmethod
-    def _next_node(body, node):
-        if body.index(node) < len(body):
-            return body[body.index(node) + 1]
-
-    @staticmethod
-    def _import_name(node):
-        if type(node) == ast.Import:
-            return node.names[0].name
-        elif type(node) == ast.ImportFrom:
-            return '%s.%s' % (node.module, node.names[0].name)
-
-    def _add_import_for_test_uuid(self, patcher, src_parsed, source_path):
-        with open(source_path) as f:
-            src_lines = f.read().split('\n')
-        line_no = 0
-        tempest_imports = [node for node in src_parsed.body
-                           if self._import_name(node) and
-                           'tempest.' in self._import_name(node)]
-        if not tempest_imports:
-            import_snippet = '\n'.join(('', IMPORT_LINE, ''))
-        else:
-            for node in tempest_imports:
-                if self._import_name(node) < DECORATOR_IMPORT:
-                    continue
-                else:
-                    line_no = node.lineno
-                    import_snippet = IMPORT_LINE
-                    break
-            else:
-                line_no = tempest_imports[-1].lineno
-                while True:
-                    if (not src_lines[line_no - 1] or
-                            getattr(self._next_node(src_parsed.body,
-                                                    tempest_imports[-1]),
-                                    'lineno') == line_no or
-                            line_no == len(src_lines)):
-                        break
-                    line_no += 1
-                import_snippet = '\n'.join((IMPORT_LINE, ''))
-        patcher.add_patch(source_path, import_snippet, line_no)
-
-    def get_tests(self):
-        """Get test methods with sources from base package with metadata"""
-        tests = {}
-        for module_name in self._modules_search():
-            tests[module_name] = {}
-            module = importlib.import_module(module_name)
-            source_path = '.'.join(
-                (os.path.splitext(module.__file__)[0], 'py')
-            )
-            with open(source_path, 'r') as f:
-                source = f.read()
-            tests[module_name]['source_path'] = source_path
-            tests[module_name]['tests'] = {}
-            source_parsed = ast.parse(source)
-            tests[module_name]['ast'] = source_parsed
-            tests[module_name]['import_valid'] = (
-                hasattr(module, DECORATOR_MODULE) and
-                inspect.ismodule(getattr(module, DECORATOR_MODULE))
-            )
-            test_cases = (node for node in source_parsed.body
-                          if self._is_test_case(module, node))
-            for node in test_cases:
-                for subnode in filter(self._is_test_method, node.body):
-                        test_name = '%s.%s' % (node.name, subnode.name)
-                        tests[module_name]['tests'][test_name] = subnode
-        return tests
-
-    @staticmethod
-    def _filter_tests(function, tests):
-        """Filter tests with condition 'function(test_node) == True'"""
-        result = {}
-        for module_name in tests:
-            for test_name in tests[module_name]['tests']:
-                if function(module_name, test_name, tests):
-                    if module_name not in result:
-                        result[module_name] = {
-                            'ast': tests[module_name]['ast'],
-                            'source_path': tests[module_name]['source_path'],
-                            'import_valid': tests[module_name]['import_valid'],
-                            'tests': {}
-                        }
-                    result[module_name]['tests'][test_name] = \
-                        tests[module_name]['tests'][test_name]
-        return result
-
-    def find_untagged(self, tests):
-        """Filter all tests without uuid in metadata"""
-        def check_uuid_in_meta(module_name, test_name, tests):
-            idempotent_id = self._get_idempotent_id(
-                tests[module_name]['tests'][test_name])
-            return not idempotent_id
-        return self._filter_tests(check_uuid_in_meta, tests)
-
-    def report_collisions(self, tests):
-        """Reports collisions if there are any. Returns true if
-        collisions exist.
-        """
-        uuids = {}
-
-        def report(module_name, test_name, tests):
-            test_uuid = self._get_idempotent_id(
-                tests[module_name]['tests'][test_name])
-            if not test_uuid:
-                return
-            if test_uuid in uuids:
-                error_str = "%s:%s\n uuid %s collision: %s<->%s\n%s:%s" % (
-                    tests[module_name]['source_path'],
-                    tests[module_name]['tests'][test_name].lineno,
-                    test_uuid,
-                    test_name,
-                    uuids[test_uuid]['test_name'],
-                    uuids[test_uuid]['source_path'],
-                    uuids[test_uuid]['test_node'].lineno,
-                )
-                print(error_str)
-                print("cannot automatically resolve the collision, please "
-                      "manually remove the duplicate value on the new test.")
-                return True
-            else:
-                uuids[test_uuid] = {
-                    'module': module_name,
-                    'test_name': test_name,
-                    'test_node': tests[module_name]['tests'][test_name],
-                    'source_path': tests[module_name]['source_path']
-                }
-        return bool(self._filter_tests(report, tests))
-
-    def report_untagged(self, tests):
-        """Reports untagged tests if there are any. Returns true if
-        untagged tests exist.
-        """
-        def report(module_name, test_name, tests):
-            error_str = "%s:%s\nmissing @test.idempotent_id('...')\n%s\n" % (
-                tests[module_name]['source_path'],
-                tests[module_name]['tests'][test_name].lineno,
-                test_name
-            )
-            print(error_str)
-            return True
-        return bool(self._filter_tests(report, tests))
-
-    def fix_tests(self, tests):
-        """Add uuids to all tests specified in tests and
-        fix it in source files
-        """
-        patcher = SourcePatcher()
-        for module_name in tests:
-            add_import_once = True
-            for test_name in tests[module_name]['tests']:
-                if not tests[module_name]['import_valid'] and add_import_once:
-                    self._add_import_for_test_uuid(
-                        patcher,
-                        tests[module_name]['ast'],
-                        tests[module_name]['source_path']
-                    )
-                    add_import_once = False
-                self._add_uuid_to_test(
-                    patcher, tests[module_name]['tests'][test_name],
-                    tests[module_name]['source_path'])
-        patcher.apply_patches()
-
-
-def run():
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--package', action='store', dest='package',
-                        default='tempest', type=str,
-                        help='Package with tests')
-    parser.add_argument('--fix', action='store_true', dest='fix_tests',
-                        help='Attempt to fix tests without UUIDs')
-    args = parser.parse_args()
-    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
-    pkg = importlib.import_module(args.package)
-    checker = TestChecker(pkg)
-    errors = False
-    tests = checker.get_tests()
-    untagged = checker.find_untagged(tests)
-    errors = checker.report_collisions(tests) or errors
-    if args.fix_tests and untagged:
-        checker.fix_tests(untagged)
-    else:
-        errors = checker.report_untagged(untagged) or errors
-    if errors:
-        sys.exit("@test.idempotent_id existence and uniqueness checks failed\n"
-                 "Run 'tox -v -euuidgen' to automatically fix tests with\n"
-                 "missing @test.idempotent_id decorators.")
-
-if __name__ == '__main__':
-    run()
diff --git a/tools/colorizer.py b/tools/colorizer.py
deleted file mode 100755
index e7152f2..0000000
--- a/tools/colorizer.py
+++ /dev/null
@@ -1,332 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2013, Nebula, Inc.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# Colorizer Code is borrowed from Twisted:
-# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
-#
-#    Permission is hereby granted, free of charge, to any person obtaining
-#    a copy of this software and associated documentation files (the
-#    "Software"), to deal in the Software without restriction, including
-#    without limitation the rights to use, copy, modify, merge, publish,
-#    distribute, sublicense, and/or sell copies of the Software, and to
-#    permit persons to whom the Software is furnished to do so, subject to
-#    the following conditions:
-#
-#    The above copyright notice and this permission notice shall be
-#    included in all copies or substantial portions of the Software.
-#
-#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-#    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-#    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-#    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-#    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-"""Display a subunit stream through a colorized unittest test runner."""
-
-import heapq
-import sys
-import unittest
-
-import subunit
-import testtools
-
-
-class _AnsiColorizer(object):
-    """
-    A colorizer is an object that loosely wraps around a stream, allowing
-    callers to write text to the stream in a particular color.
-
-    Colorizer classes must implement C{supported()} and C{write(text, color)}.
-    """
-    _colors = dict(black=30, red=31, green=32, yellow=33,
-                   blue=34, magenta=35, cyan=36, white=37)
-
-    def __init__(self, stream):
-        self.stream = stream
-
-    def supported(cls, stream=sys.stdout):
-        """
-        A class method that returns True if the current platform supports
-        coloring terminal output using this method. Returns False otherwise.
-        """
-        if not stream.isatty():
-            return False  # auto color only on TTYs
-        try:
-            import curses
-        except ImportError:
-            return False
-        else:
-            try:
-                try:
-                    return curses.tigetnum("colors") > 2
-                except curses.error:
-                    curses.setupterm()
-                    return curses.tigetnum("colors") > 2
-            except Exception:
-                # guess false in case of error
-                return False
-    supported = classmethod(supported)
-
-    def write(self, text, color):
-        """
-        Write the given text to the stream in the given color.
-
-        @param text: Text to be written to the stream.
-
-        @param color: A string label for a color. e.g. 'red', 'white'.
-        """
-        color = self._colors[color]
-        self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
-
-
-class _Win32Colorizer(object):
-    """
-    See _AnsiColorizer docstring.
-    """
-    def __init__(self, stream):
-        import win32console
-        red, green, blue, bold = (win32console.FOREGROUND_RED,
-                                  win32console.FOREGROUND_GREEN,
-                                  win32console.FOREGROUND_BLUE,
-                                  win32console.FOREGROUND_INTENSITY)
-        self.stream = stream
-        self.screenBuffer = win32console.GetStdHandle(
-            win32console.STD_OUT_HANDLE)
-        self._colors = {'normal': red | green | blue,
-                        'red': red | bold,
-                        'green': green | bold,
-                        'blue': blue | bold,
-                        'yellow': red | green | bold,
-                        'magenta': red | blue | bold,
-                        'cyan': green | blue | bold,
-                        'white': red | green | blue | bold}
-
-    def supported(cls, stream=sys.stdout):
-        try:
-            import win32console
-            screenBuffer = win32console.GetStdHandle(
-                win32console.STD_OUT_HANDLE)
-        except ImportError:
-            return False
-        import pywintypes
-        try:
-            screenBuffer.SetConsoleTextAttribute(
-                win32console.FOREGROUND_RED |
-                win32console.FOREGROUND_GREEN |
-                win32console.FOREGROUND_BLUE)
-        except pywintypes.error:
-            return False
-        else:
-            return True
-    supported = classmethod(supported)
-
-    def write(self, text, color):
-        color = self._colors[color]
-        self.screenBuffer.SetConsoleTextAttribute(color)
-        self.stream.write(text)
-        self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
-
-
-class _NullColorizer(object):
-    """
-    See _AnsiColorizer docstring.
-    """
-    def __init__(self, stream):
-        self.stream = stream
-
-    def supported(cls, stream=sys.stdout):
-        return True
-    supported = classmethod(supported)
-
-    def write(self, text, color):
-        self.stream.write(text)
-
-
-def get_elapsed_time_color(elapsed_time):
-    if elapsed_time > 1.0:
-        return 'red'
-    elif elapsed_time > 0.25:
-        return 'yellow'
-    else:
-        return 'green'
-
-
-class NovaTestResult(testtools.TestResult):
-    def __init__(self, stream, descriptions, verbosity):
-        super(NovaTestResult, self).__init__()
-        self.stream = stream
-        self.showAll = verbosity > 1
-        self.num_slow_tests = 10
-        self.slow_tests = []  # this is a fixed-sized heap
-        self.colorizer = None
-        # NOTE(vish): reset stdout for the terminal check
-        stdout = sys.stdout
-        sys.stdout = sys.__stdout__
-        for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
-            if colorizer.supported():
-                self.colorizer = colorizer(self.stream)
-                break
-        sys.stdout = stdout
-        self.start_time = None
-        self.last_time = {}
-        self.results = {}
-        self.last_written = None
-
-    def _writeElapsedTime(self, elapsed):
-        color = get_elapsed_time_color(elapsed)
-        self.colorizer.write("  %.2f" % elapsed, color)
-
-    def _addResult(self, test, *args):
-        try:
-            name = test.id()
-        except AttributeError:
-            name = 'Unknown.unknown'
-        test_class, test_name = name.rsplit('.', 1)
-
-        elapsed = (self._now() - self.start_time).total_seconds()
-        item = (elapsed, test_class, test_name)
-        if len(self.slow_tests) >= self.num_slow_tests:
-            heapq.heappushpop(self.slow_tests, item)
-        else:
-            heapq.heappush(self.slow_tests, item)
-
-        self.results.setdefault(test_class, [])
-        self.results[test_class].append((test_name, elapsed) + args)
-        self.last_time[test_class] = self._now()
-        self.writeTests()
-
-    def _writeResult(self, test_name, elapsed, long_result, color,
-                     short_result, success):
-        if self.showAll:
-            self.stream.write('    %s' % str(test_name).ljust(66))
-            self.colorizer.write(long_result, color)
-            if success:
-                self._writeElapsedTime(elapsed)
-            self.stream.writeln()
-        else:
-            self.colorizer.write(short_result, color)
-
-    def addSuccess(self, test):
-        super(NovaTestResult, self).addSuccess(test)
-        self._addResult(test, 'OK', 'green', '.', True)
-
-    def addFailure(self, test, err):
-        if test.id() == 'process-returncode':
-            return
-        super(NovaTestResult, self).addFailure(test, err)
-        self._addResult(test, 'FAIL', 'red', 'F', False)
-
-    def addError(self, test, err):
-        super(NovaTestResult, self).addFailure(test, err)
-        self._addResult(test, 'ERROR', 'red', 'E', False)
-
-    def addSkip(self, test, reason=None, details=None):
-        super(NovaTestResult, self).addSkip(test, reason, details)
-        self._addResult(test, 'SKIP', 'blue', 'S', True)
-
-    def startTest(self, test):
-        self.start_time = self._now()
-        super(NovaTestResult, self).startTest(test)
-
-    def writeTestCase(self, cls):
-        if not self.results.get(cls):
-            return
-        if cls != self.last_written:
-            self.colorizer.write(cls, 'white')
-            self.stream.writeln()
-        for result in self.results[cls]:
-            self._writeResult(*result)
-        del self.results[cls]
-        self.stream.flush()
-        self.last_written = cls
-
-    def writeTests(self):
-        time = self.last_time.get(self.last_written, self._now())
-        if not self.last_written or (self._now() - time).total_seconds() > 2.0:
-            diff = 3.0
-            while diff > 2.0:
-                classes = self.results.keys()
-                oldest = min(classes, key=lambda x: self.last_time[x])
-                diff = (self._now() - self.last_time[oldest]).total_seconds()
-                self.writeTestCase(oldest)
-        else:
-            self.writeTestCase(self.last_written)
-
-    def done(self):
-        self.stopTestRun()
-
-    def stopTestRun(self):
-        for cls in list(self.results.iterkeys()):
-            self.writeTestCase(cls)
-        self.stream.writeln()
-        self.writeSlowTests()
-
-    def writeSlowTests(self):
-        # Pare out 'fast' tests
-        slow_tests = [item for item in self.slow_tests
-                      if get_elapsed_time_color(item[0]) != 'green']
-        if slow_tests:
-            slow_total_time = sum(item[0] for item in slow_tests)
-            slow = ("Slowest %i tests took %.2f secs:"
-                    % (len(slow_tests), slow_total_time))
-            self.colorizer.write(slow, 'yellow')
-            self.stream.writeln()
-            last_cls = None
-            # sort by name
-            for elapsed, cls, name in sorted(slow_tests,
-                                             key=lambda x: x[1] + x[2]):
-                if cls != last_cls:
-                    self.colorizer.write(cls, 'white')
-                    self.stream.writeln()
-                last_cls = cls
-                self.stream.write('    %s' % str(name).ljust(68))
-                self._writeElapsedTime(elapsed)
-                self.stream.writeln()
-
-    def printErrors(self):
-        if self.showAll:
-            self.stream.writeln()
-        self.printErrorList('ERROR', self.errors)
-        self.printErrorList('FAIL', self.failures)
-
-    def printErrorList(self, flavor, errors):
-        for test, err in errors:
-            self.colorizer.write("=" * 70, 'red')
-            self.stream.writeln()
-            self.colorizer.write(flavor, 'red')
-            self.stream.writeln(": %s" % test.id())
-            self.colorizer.write("-" * 70, 'red')
-            self.stream.writeln()
-            self.stream.writeln("%s" % err)
-
-
-test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
-
-if sys.version_info[0:2] <= (2, 6):
-    runner = unittest.TextTestRunner(verbosity=2)
-else:
-    runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult)
-
-if runner.run(test).wasSuccessful():
-    exit_code = 0
-else:
-    exit_code = 1
-sys.exit(exit_code)
diff --git a/tools/find_stack_traces.py b/tools/find_stack_traces.py
index 4862d01..49a42fe 100755
--- a/tools/find_stack_traces.py
+++ b/tools/find_stack_traces.py
@@ -18,7 +18,7 @@
 import gzip
 import pprint
 import re
-import StringIO
+import six
 import sys
 import urllib2
 
@@ -68,7 +68,7 @@
     req = urllib2.Request(url)
     req.add_header('Accept-Encoding', 'gzip')
     page = urllib2.urlopen(req)
-    buf = StringIO.StringIO(page.read())
+    buf = six.StringIO(page.read())
     f = gzip.GzipFile(fileobj=buf)
     content = f.read()
 
diff --git a/tools/skip_tracker.py b/tools/skip_tracker.py
index 50f33eb..a47e217 100755
--- a/tools/skip_tracker.py
+++ b/tools/skip_tracker.py
@@ -40,7 +40,8 @@
 
 
 def find_skips(start=TESTDIR):
-    """
+    """Find skipped tests
+
     Returns a list of tuples (method, bug) that represent
     test methods that have been decorated to skip because of
     a particular bug.
@@ -67,9 +68,7 @@
 
 
 def find_skips_in_file(path):
-    """
-    Return the skip tuples in a test file
-    """
+    """Return the skip tuples in a test file"""
     BUG_RE = re.compile(r'\s*@.*skip_because\(bug=[\'"](\d+)[\'"]')
     DEF_RE = re.compile(r'\s*def (\w+)\(')
     bug_found = False
diff --git a/tools/use_tempest_lib.sh b/tools/use_tempest_lib.sh
new file mode 100755
index 0000000..ca62c4a
--- /dev/null
+++ b/tools/use_tempest_lib.sh
@@ -0,0 +1,141 @@
+#!/bin/bash
+#
+# Use this script to use interfaces/files from tempest-lib.
+# Many files have been migrated to tempest-lib and tempest has
+# its own copy too.
+# This script helps to remove those from tempest and make use of tempest-lib.
+# It adds the change-id of each file on which they were migrated in lib.
+# This should only be done for files which were migrated to lib with
+# "Migrated" in commit message as done by tempest-lib/tools/migrate_from_tempest.sh script.
+# "Migrated" keyword is used to fetch the migration commit history from lib.
+# To use:
+#  1. Create a new branch in the tempest repo so not to destroy your current
+#     working branch
+#  2. Run the script from the repo dir and specify the file paths relative to
+#     the root tempest dir(only code and unit tests):
+#
+#   tools/use_tempest_lib.sh.sh tempest/file1.py tempest/file2.py
+
+
+function usage {
+    echo "Usage: $0 [OPTION] file1 file2 .."
+    echo "Use files from tempest-lib"
+    echo -e "Input files should be tempest files with path. \n  Example- tempest/file1.py tempest/file2.py .."
+    echo ""
+    echo "-s, --service_client Specify if files are service clients."
+    echo "-u, --tempest_lib_git_url Specify the repo to clone tempest-lib from."
+}
+
+function check_all_files_valid {
+    failed=0
+    for file in $files; do
+        # Get the latest change-id for each file
+        latest_commit_id=`git log -n1 -- $file | grep "^commit" | awk '{print $2}'`
+        cd $tmpdir
+        filename=`basename $file`
+        lib_path=`find ./ -name $filename`
+        if [ -z $lib_path ]; then
+            echo "ERROR: $filename does not exist in tempest-lib."
+            failed=$(( failed + 1))
+            cd - > /dev/null
+            continue
+        fi
+        # Get the CHANGE_ID of tempest-lib patch where file was migrated
+        migration_change_id=`git log  -n1 --grep "Migrated" -- $lib_path | grep "Change-Id: " | awk '{print $2}'`
+        MIGRATION_IDS=`echo -e "$MIGRATION_IDS\n * $filename: $migration_change_id"`
+        # Get tempest CHANGE_ID of file which was migrated to lib
+        migrated_change_id=`git log  -n1 --grep "Migrated" -- $lib_path | grep "* $filename"`
+        migrated_change_id=${migrated_change_id#*:}
+        cd - > /dev/null
+        # Get the commit-id of tempest which was migrated to tempest-lib
+        migrated_commit_id=`git log --grep "$migrated_change_id" -- $file | grep "^commit" | awk '{print $2}'`
+        DIFF=$(git diff $latest_commit_id $migrated_commit_id $file)
+        if [ "$DIFF" != "" ]; then
+            echo "ERROR: $filename in tempest has been updated after migration to tempest-lib. First sync the file to tempest-lib."
+            failed=$(( failed + 1))
+        fi
+    done
+    if [[ $failed -gt 0 ]]; then
+        echo "$failed files had issues"
+        exit $failed
+    fi
+}
+
+set -e
+
+service_client=0
+file_list=''
+
+while [ $# -gt 0 ]; do
+    case "$1" in
+        -h|--help) usage; exit;;
+        -u|--tempest_lib_git_url) tempest_lib_git_url="$2"; shift;;
+        -s|--service_client) service_client=1;;
+        *) files="$files $1";;
+    esac
+    shift
+done
+
+if [ -z "$files" ]; then
+    usage; exit
+fi
+
+TEMPEST_LIB_GIT_URL=${tempest_lib_git_url:-git://git.openstack.org/openstack/tempest-lib}
+
+tmpdir=$(mktemp -d -t use-tempest-lib.XXXX)
+
+# Clone tempest-lib
+git clone $TEMPEST_LIB_GIT_URL $tmpdir
+
+# Checks all provided files are present in lib and
+# not updated in tempest after migration to lib.
+check_all_files_valid
+
+for file in $files; do
+    rm -f $file
+    tempest_dir=`pwd`
+    tempest_dir="$tempest_dir/tempest/"
+    tempest_dirname=`dirname $file`
+    lib_dirname=`echo $tempest_dirname | sed s@tempest\/@tempest_lib/\@`
+    # Convert tempest dirname to import string
+    tempest_import="${tempest_dirname//\//.}"
+    tempest_import=${tempest_import:2:${#tempest_import}}
+    if [ $service_client -eq 1 ]; then
+        # Remove /json path because tempest-lib supports JSON only without XML
+        lib_dirname=`echo $lib_dirname | sed s@\/json@@`
+    fi
+    # Convert tempest-lib dirname to import string
+    tempest_lib_import="${lib_dirname//\//.}"
+    tempest_lib_import=${tempest_lib_import:2:${#tempest_lib_import}}
+    module_name=`basename $file .py`
+    tempest_import1="from $tempest_import.$module_name"
+    tempest_lib_import1="from $tempest_lib_import.$module_name"
+    tempest_import2="from $tempest_import import $module_name"
+    tempest_lib_import2="from $tempest_lib_import import $module_name"
+    set +e
+    grep -rl "$tempest_import1" $tempest_dir | xargs sed -i'' s/"$tempest_import1"/"$tempest_lib_import1"/g 2> /dev/null
+    grep -rl "$tempest_import2" $tempest_dir | xargs sed -i'' s/"$tempest_import2"/"$tempest_lib_import2"/g 2> /dev/null
+    set -e
+    if [[ -z "$file_list" ]]; then
+        file_list="$module_name"
+    else
+        tmp_file_list="$file_list, $module_name"
+        char_size=`echo $tmp_file_list | wc -c`
+        if [ $char_size -lt 27 ]; then
+            file_list="$file_list, $module_name"
+        fi
+    fi
+done
+
+rm -rf $tmpdir
+echo "Completed. Run pep8 and fix error if any"
+
+git add -A tempest/
+# Generate a migration commit
+commit_message="Use $file_list from tempest-lib"
+pre_list=$"The files below have been migrated to tempest-lib\n"
+pre_list=`echo -e $pre_list`
+post_list=$"Now Tempest-lib provides those as stable interfaces. So Tempest should\nstart using those from lib and remove its own copy."
+post_list=`echo -e $post_list`
+
+git commit -m "$commit_message" -m "$pre_list" -m "$MIGRATION_IDS" -m "$post_list"
diff --git a/tox.ini b/tox.ini
index 09c8626..95f2cf1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
-envlist = pep8,py27,py34
-minversion = 1.6
+envlist = pep8,py34,py27
+minversion = 2.3.1
 skipsdist = True
 
 [tempestenv]
@@ -34,7 +34,7 @@
 sitepackages = {[tempestenv]sitepackages}
 # 'all' includes slow tests
 setenv = {[tempestenv]setenv}
-         OS_TEST_TIMEOUT=1200
+         OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
 deps = {[tempestenv]deps}
 commands =
   find . -type f -name "*.pyc" -delete
@@ -44,7 +44,7 @@
 sitepackages = True
 # 'all' includes slow tests
 setenv = {[tempestenv]setenv}
-         OS_TEST_TIMEOUT=1200
+         OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
 deps = {[tempestenv]deps}
 commands =
   find . -type f -name "*.pyc" -delete
@@ -55,7 +55,7 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
-# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
+# See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
 commands =
   find . -type f -name "*.pyc" -delete
   bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty)) {posargs}'
@@ -65,7 +65,7 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
-# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
+# See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
 commands =
   find . -type f -name "*.pyc" -delete
   bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty)) {posargs}'
@@ -114,11 +114,11 @@
 [testenv:pep8]
 commands =
    flake8 {posargs}
-   python tools/check_uuid.py
+   check-uuid
 
 [testenv:uuidgen]
 commands =
-   python tools/check_uuid.py --fix
+   check-uuid --fix
 
 [hacking]
 local-check-factory = tempest.hacking.checks.factory
@@ -129,6 +129,6 @@
 # E123 skipped because it is ignored by default in the default pep8
 # E129 skipped because it is too limiting when combined with other rules
 # Skipped because of new hacking 0.9: H405
-ignore = E125,E123,E129,H404,H405
+ignore = E125,E123,E129
 show-source = True
 exclude = .git,.venv,.tox,dist,doc,openstack,*egg