Merge "Remove unused RFCViolation"
diff --git a/HACKING.rst b/HACKING.rst
index cc1f161..8407734 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -103,10 +103,10 @@
 Service Tagging
 ---------------
 Service tagging is used to specify which services are exercised by a particular
-test method. You specify the services with the ``tempest.test.services``
+test method. You specify the services with the ``tempest.common.utils.services``
 decorator. For example:
 
-@services('compute', 'image')
+@utils.services('compute', 'image')
 
 Valid service tag names are the same as the list of directories in tempest.api
 that have tests.
@@ -128,6 +128,12 @@
 Test class level resources should be defined in the `resource_setup` method of
 the test class, except for any credential obtained from the credentials
 provider, which should be set-up in the `setup_credentials` method.
+Cleanup is best scheduled using `addClassResourceCleanup` which ensures that
+the cleanup code is always invoked, and in reverse order with respect to the
+creation order.
+
+In both cases - test level and class level cleanups - a wait loop should be
+scheduled before the actual delete of resources with an asynchronous delete.
 
 The test base class `BaseTestCase` defines Tempest framework for class level
 fixtures. `setUpClass` and `tearDownClass` are defined here and cannot be
diff --git a/data/tempest-plugins-registry.header b/doc/source/data/tempest-plugins-registry.header
similarity index 100%
rename from data/tempest-plugins-registry.header
rename to doc/source/data/tempest-plugins-registry.header
diff --git a/doc/source/library.rst b/doc/source/library.rst
index a461a0f..074d642 100644
--- a/doc/source/library.rst
+++ b/doc/source/library.rst
@@ -69,3 +69,4 @@
    library/auth
    library/clients
    library/credential_providers
+   library/validation_resources
diff --git a/doc/source/library/credential_providers.rst b/doc/source/library/credential_providers.rst
index f4eb37d..d96c97a 100644
--- a/doc/source/library/credential_providers.rst
+++ b/doc/source/library/credential_providers.rst
@@ -130,19 +130,18 @@
   # role
   provider.clear_creds()
 
-API Reference
-=============
 
-------------------------------
+API Reference
+-------------
+
 The dynamic credentials module
-------------------------------
+''''''''''''''''''''''''''''''
 
 .. automodule:: tempest.lib.common.dynamic_creds
    :members:
 
---------------------------------------
 The pre-provisioned credentials module
---------------------------------------
+''''''''''''''''''''''''''''''''''''''
 
 .. automodule:: tempest.lib.common.preprov_creds
    :members:
diff --git a/doc/source/library/validation_resources.rst b/doc/source/library/validation_resources.rst
new file mode 100644
index 0000000..9b36476
--- /dev/null
+++ b/doc/source/library/validation_resources.rst
@@ -0,0 +1,11 @@
+.. _validation_resources:
+
+Validation Resources
+====================
+
+-------------------------------
+The validation_resources module
+-------------------------------
+
+.. automodule:: tempest.lib.common.validation_resources
+   :members:
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index aec55e9..5a2876e 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -59,10 +59,16 @@
  * setup_clients
  * resource_setup
 
-which is executed in that order. An example of a TestCase which defines all
+which is executed in that order. Cleanup of resources provisioned during
+the resource_setup must be scheduled right after provisioning using
+the addClassResourceCleanp helper. The resource cleanups stacked this way
+are executed in reverse order during tearDownClass, before the cleanup of
+test credentials takes place. An example of a TestCase which defines all
 of these would be::
-
+  
+  from tempest.common import waiters
   from tempest import config
+  from tempest.lib.common.utils import test_utils
   from tempest import test
 
   CONF = config.CONF
@@ -111,6 +117,13 @@
         """
         super(TestExampleCase, cls).resource_setup()
         cls.shared_server = cls.servers_client.create_server(...)
+        cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+                                    cls.servers_client,
+                                    cls.shared_server['id'])
+        cls.addClassResourceCleanup(
+            test_utils.call_and_ignore_notfound_exc(
+                cls.servers_client.delete_server,
+                cls.shared_server['id']))
 
 .. _credentials:
 
diff --git a/releasenotes/notes/add-reset-group-status-api-to-v3-groups-client-9aa048617c66756a.yaml b/releasenotes/notes/add-reset-group-status-api-to-v3-groups-client-9aa048617c66756a.yaml
new file mode 100644
index 0000000..a39c23b
--- /dev/null
+++ b/releasenotes/notes/add-reset-group-status-api-to-v3-groups-client-9aa048617c66756a.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add reset group status API to v3 groups_client library, min_microversion
+    of this API is 3.20. This feature enables the possibility to reset group
+    status.
diff --git a/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml b/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml
new file mode 100644
index 0000000..7814f4e
--- /dev/null
+++ b/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml
@@ -0,0 +1,7 @@
+---
+features:
+  - |
+    Add the `validation_resources` module to tempest.lib. The module provides
+    a set of helpers that can be used to provision and cleanup all the
+    resources required to perform ping / ssh tests against a virtual machine:
+    a keypair, a security group with targeted rules and a floating IP.
diff --git a/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml b/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
new file mode 100644
index 0000000..775a383
--- /dev/null
+++ b/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+  - |
+    Fix list_group_snapshots API in v3 group_snapshots_client: Bug#1715786.
+    The url path for list group snapshots with details API is changed from
+    ``?detail=True`` to ``/detail``.
diff --git a/releasenotes/notes/make-account-client-as-stable-interface-d1b07c7e8f17bef6.yaml b/releasenotes/notes/make-account-client-as-stable-interface-d1b07c7e8f17bef6.yaml
new file mode 100644
index 0000000..9d5a1f5
--- /dev/null
+++ b/releasenotes/notes/make-account-client-as-stable-interface-d1b07c7e8f17bef6.yaml
@@ -0,0 +1,9 @@
+---
+features:
+  - |
+    Define below object storage service clients as libraries.
+    Add new service clients to the library interface so the
+    other projects can use these modules as stable libraries
+    without any maintenance changes.
+
+      * account_client
diff --git a/releasenotes/notes/remove-deprecated-skip-decorators-f8b42d812d20b537.yaml b/releasenotes/notes/remove-deprecated-skip-decorators-f8b42d812d20b537.yaml
new file mode 100644
index 0000000..920bc5d
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-skip-decorators-f8b42d812d20b537.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    Remove two deprecated skip decorators in ``config`` module:
+    ``skip_unless_config`` and ``skip_if_config``.
diff --git a/requirements.txt b/requirements.txt
index 36b9efa..911f0e5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,23 +3,23 @@
 # process, which may cause wedges in the gate later.
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 cliff>=2.8.0 # Apache-2.0
-jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
+jsonschema<3.0.0,>=2.6.0 # MIT
 testtools>=1.4.0 # MIT
 paramiko>=2.0.0 # LGPLv2.1+
-netaddr!=0.7.16,>=0.7.13 # BSD
+netaddr>=0.7.18 # BSD
 testrepository>=0.0.18 # Apache-2.0/BSD
-oslo.concurrency>=3.8.0 # Apache-2.0
-oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0
+oslo.concurrency>=3.20.0 # Apache-2.0
+oslo.config>=4.6.0 # Apache-2.0
 oslo.log>=3.30.0 # Apache-2.0
-oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0
-oslo.utils>=3.20.0 # Apache-2.0
+oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
+oslo.utils>=3.28.0 # Apache-2.0
 six>=1.9.0 # MIT
 fixtures>=3.0.0 # Apache-2.0/BSD
 PyYAML>=3.10 # MIT
 python-subunit>=0.0.18 # Apache-2.0/BSD
 stevedore>=1.20.0 # Apache-2.0
 PrettyTable<0.8,>=0.7.1 # BSD
-os-testr>=0.8.0 # Apache-2.0
+os-testr>=1.0.0 # Apache-2.0
 urllib3>=1.21.1 # MIT
 debtcollector>=1.2.0 # Apache-2.0
-unittest2 # BSD
+unittest2>=1.1.0 # BSD
diff --git a/tempest/api/compute/admin/test_auto_allocate_network.py b/tempest/api/compute/admin/test_auto_allocate_network.py
index 6f23866..a9772c4 100644
--- a/tempest/api/compute/admin/test_auto_allocate_network.py
+++ b/tempest/api/compute/admin/test_auto_allocate_network.py
@@ -143,6 +143,8 @@
             test_utils.call_and_ignore_notfound_exc(
                 cls.networks_client.delete_network, network['id'])
 
+        super(AutoAllocateNetworkTest, cls).resource_cleanup()
+
     @decorators.idempotent_id('5eb7b8fa-9c23-47a2-9d7d-02ed5809dd34')
     def test_server_create_no_allocate(self):
         """Tests that no networking is allocated for the server."""
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 66bedd9..08b2d19 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -17,8 +17,10 @@
 
 from tempest.api.compute import base
 from tempest.common.utils.linux import remote_client
+from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
 CONF = config.CONF
@@ -35,12 +37,6 @@
         super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
 
-    @classmethod
-    def resource_setup(cls):
-        cls.set_validation_resources()
-
-        super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
-
     @decorators.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
     @testtools.skipUnless(CONF.validation.run_validation,
                           'Instance validation tests are disabled.')
@@ -67,20 +63,30 @@
 
         admin_pass = self.image_ssh_password
 
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
         server_no_eph_disk = self.create_test_server(
             validatable=True,
+            validation_resources=validation_resources,
             wait_until='ACTIVE',
             adminPass=admin_pass,
             flavor=flavor_no_eph_disk_id)
 
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server_no_eph_disk['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.servers_client.delete_server,
+                        server_no_eph_disk['id'])
+
         # Get partition number of server without ephemeral disk.
         server_no_eph_disk = self.client.show_server(
             server_no_eph_disk['id'])['server']
         linux_client = remote_client.RemoteClient(
-            self.get_server_ip(server_no_eph_disk),
+            self.get_server_ip(server_no_eph_disk,
+                               validation_resources),
             self.ssh_user,
             admin_pass,
-            self.validation_resources['keypair']['private_key'],
+            validation_resources['keypair']['private_key'],
             server=server_no_eph_disk,
             servers_client=self.client)
         disks_num = len(linux_client.get_disks().split('\n'))
@@ -90,17 +96,25 @@
 
         server_with_eph_disk = self.create_test_server(
             validatable=True,
+            validation_resources=validation_resources,
             wait_until='ACTIVE',
             adminPass=admin_pass,
             flavor=flavor_with_eph_disk_id)
 
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server_with_eph_disk['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.servers_client.delete_server,
+                        server_with_eph_disk['id'])
+
         server_with_eph_disk = self.client.show_server(
             server_with_eph_disk['id'])['server']
         linux_client = remote_client.RemoteClient(
-            self.get_server_ip(server_with_eph_disk),
+            self.get_server_ip(server_with_eph_disk,
+                               validation_resources),
             self.ssh_user,
             admin_pass,
-            self.validation_resources['keypair']['private_key'],
+            validation_resources['keypair']['private_key'],
             server=server_with_eph_disk,
             servers_client=self.client)
         disks_num_eph = len(linux_client.get_disks().split('\n'))
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index d4e7bd5..4d27a22 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -53,12 +53,11 @@
             ephemeral=ephemeral,
             swap=swap,
             rxtx_factor=rxtx)['flavor']
-
-    @classmethod
-    def resource_cleanup(cls):
-        cls.admin_flavors_client.delete_flavor(cls.flavor['id'])
-        cls.admin_flavors_client.wait_for_resource_deletion(cls.flavor['id'])
-        super(FlavorsExtraSpecsTestJSON, cls).resource_cleanup()
+        cls.addClassResourceCleanup(
+            cls.admin_flavors_client.wait_for_resource_deletion,
+            cls.flavor['id'])
+        cls.addClassResourceCleanup(cls.admin_flavors_client.delete_flavor,
+                                    cls.flavor['id'])
 
     @decorators.idempotent_id('0b2f9d4b-1ca2-4b99-bb40-165d4bb94208')
     def test_flavor_set_get_update_show_unset_keys(self):
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index bc32346..5cde39e 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -55,12 +55,11 @@
             ephemeral=ephemeral,
             swap=swap,
             rxtx_factor=rxtx)['flavor']
-
-    @classmethod
-    def resource_cleanup(cls):
-        cls.admin_flavors_client.delete_flavor(cls.flavor['id'])
-        cls.admin_flavors_client.wait_for_resource_deletion(cls.flavor['id'])
-        super(FlavorsExtraSpecsNegativeTestJSON, cls).resource_cleanup()
+        cls.addClassResourceCleanup(
+            cls.admin_flavors_client.wait_for_resource_deletion,
+            cls.flavor['id'])
+        cls.addClassResourceCleanup(cls.admin_flavors_client.delete_flavor,
+                                    cls.flavor['id'])
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a00a3b81-5641-45a8-ab2b-4a8ec41e1d7d')
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 5894e80..c2bdf7e 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -17,6 +17,7 @@
 from testtools import matchers
 
 from tempest.api.compute import base
+from tempest.common import identity
 from tempest.common import tempest_fixtures as fixtures
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
@@ -93,10 +94,11 @@
         # Verify that GET shows the updated quota set of project
         project_name = data_utils.rand_name('cpu_quota_project')
         project_desc = project_name + '-desc'
-        project = self.identity_utils.create_project(name=project_name,
-                                                     description=project_desc)
+        project = identity.identity_utils(self.os_admin).create_project(
+            name=project_name, description=project_desc)
         project_id = project['id']
-        self.addCleanup(self.identity_utils.delete_project, project_id)
+        self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+                        project_id)
 
         self.adm_client.update_quota_set(project_id, ram='5120')
         quota_set = self.adm_client.show_quota_set(project_id)['quota_set']
@@ -106,12 +108,12 @@
         user_name = data_utils.rand_name('cpu_quota_user')
         password = data_utils.rand_password()
         email = user_name + '@testmail.tm'
-        user = self.identity_utils.create_user(username=user_name,
-                                               password=password,
-                                               project=project,
-                                               email=email)
+        user = identity.identity_utils(self.os_admin).create_user(
+            username=user_name, password=password, project=project,
+            email=email)
         user_id = user['id']
-        self.addCleanup(self.identity_utils.delete_user, user_id)
+        self.addCleanup(identity.identity_utils(self.os_admin).delete_user,
+                        user_id)
 
         self.adm_client.update_quota_set(project_id,
                                          user_id=user_id,
@@ -125,10 +127,11 @@
         # Admin can delete the resource quota set for a project
         project_name = data_utils.rand_name('ram_quota_project')
         project_desc = project_name + '-desc'
-        project = self.identity_utils.create_project(name=project_name,
-                                                     description=project_desc)
+        project = identity.identity_utils(self.os_admin).create_project(
+            name=project_name, description=project_desc)
         project_id = project['id']
-        self.addCleanup(self.identity_utils.delete_project, project_id)
+        self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+                        project_id)
         quota_set_default = (self.adm_client.show_quota_set(project_id)
                              ['quota_set'])
         ram_default = quota_set_default['ram']
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index 3656770..f720b84 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -61,7 +61,7 @@
         flavor_ref = self.create_flavor(ram=ram, vcpus=vcpus, disk=disk)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
                           self.client.resize_server,
-                          self.servers[0]['id'],
+                          self.s1_id,
                           flavor_ref['id'])
 
     @decorators.idempotent_id('7368a427-2f26-4ad9-9ba9-911a0ec2b0db')
@@ -83,7 +83,7 @@
         flavor_ref = self.create_flavor(ram=ram, vcpus=vcpus, disk=disk)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
                           self.client.resize_server,
-                          self.servers[0]['id'],
+                          self.s1_id,
                           flavor_ref['id'])
 
     @decorators.attr(type=['negative'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 3340d8c..683d3e9 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -116,42 +116,6 @@
         cls.ssh_user = CONF.validation.image_ssh_user
         cls.image_ssh_user = CONF.validation.image_ssh_user
         cls.image_ssh_password = CONF.validation.image_ssh_password
-        cls.servers = []
-        cls.images = []
-        cls.security_groups = []
-        cls.server_groups = []
-        cls.volumes = []
-
-    @classmethod
-    def resource_cleanup(cls):
-        cls.clear_resources('images', cls.images,
-                            cls.compute_images_client.delete_image)
-        cls.clear_servers()
-        cls.clear_resources('security groups', cls.security_groups,
-                            cls.security_groups_client.delete_security_group)
-        cls.clear_resources('server groups', cls.server_groups,
-                            cls.server_groups_client.delete_server_group)
-        cls.clear_volumes()
-        super(BaseV2ComputeTest, cls).resource_cleanup()
-
-    @classmethod
-    def clear_servers(cls):
-        LOG.debug('Clearing servers: %s', ','.join(
-            server['id'] for server in cls.servers))
-        for server in cls.servers:
-            try:
-                test_utils.call_and_ignore_notfound_exc(
-                    cls.servers_client.delete_server, server['id'])
-            except Exception:
-                LOG.exception('Deleting server %s failed', server['id'])
-
-        for server in cls.servers:
-            try:
-                waiters.wait_for_server_termination(cls.servers_client,
-                                                    server['id'])
-            except Exception:
-                LOG.exception('Waiting for deletion of server %s failed',
-                              server['id'])
 
     @classmethod
     def server_check_teardown(cls):
@@ -190,7 +154,7 @@
 
     @classmethod
     def create_test_server(cls, validatable=False, volume_backed=False,
-                           **kwargs):
+                           validation_resources=None, **kwargs):
         """Wrapper utility that returns a test server.
 
         This wrapper utility calls the common create test server and
@@ -200,6 +164,10 @@
 
         :param validatable: Whether the server will be pingable or sshable.
         :param volume_backed: Whether the instance is volume backed or not.
+        :param validation_resources: Dictionary of validation resources as
+            returned by `get_class_validation_resources`.
+        :param kwargs: Extra arguments are passed down to the
+            `compute.create_test_server` call.
         """
         if 'name' not in kwargs:
             kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server")
@@ -216,12 +184,20 @@
         body, servers = compute.create_test_server(
             cls.os_primary,
             validatable,
-            validation_resources=cls.validation_resources,
+            validation_resources=validation_resources,
             tenant_network=tenant_network,
             volume_backed=volume_backed,
             **kwargs)
 
-        cls.servers.extend(servers)
+        # For each server schedule wait and delete, so we first delete all
+        # and then wait for all
+        for server in servers:
+            cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+                                        cls.servers_client, server['id'])
+        for server in servers:
+            cls.addClassResourceCleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                cls.servers_client.delete_server, server['id'])
 
         return body
 
@@ -233,7 +209,10 @@
             description = data_utils.rand_name('description')
         body = cls.security_groups_client.create_security_group(
             name=name, description=description)['security_group']
-        cls.security_groups.append(body['id'])
+        cls.addClassResourceCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            cls.security_groups_client.delete_security_group,
+            body['id'])
 
         return body
 
@@ -245,7 +224,10 @@
             policy = ['affinity']
         body = cls.server_groups_client.create_server_group(
             name=name, policies=policy)['server_group']
-        cls.server_groups.append(body['id'])
+        cls.addClassResourceCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            cls.server_groups_client.delete_server_group,
+            body['id'])
         return body
 
     def wait_for(self, condition):
@@ -263,18 +245,6 @@
                 return
             time.sleep(self.build_interval)
 
-    @staticmethod
-    def _delete_volume(volumes_client, volume_id):
-        """Deletes the given volume and waits for it to be gone."""
-        try:
-            volumes_client.delete_volume(volume_id)
-            # TODO(mriedem): We should move the wait_for_resource_deletion
-            # into the delete_volume method as a convenience to the caller.
-            volumes_client.wait_for_resource_deletion(volume_id)
-        except lib_exc.NotFound:
-            LOG.warning("Unable to delete volume '%s' since it was not found. "
-                        "Maybe it was already deleted?", volume_id)
-
     @classmethod
     def prepare_instance_network(cls):
         if (CONF.validation.auth_method != 'disabled' and
@@ -293,7 +263,9 @@
         image = cls.compute_images_client.create_image(server_id, name=name,
                                                        **kwargs)
         image_id = data_utils.parse_image_id(image.response['location'])
-        cls.images.append(image_id)
+        cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+                                    cls.compute_images_client.delete_image,
+                                    image_id)
 
         if wait_until is not None:
             try:
@@ -326,13 +298,33 @@
 
     @classmethod
     def rebuild_server(cls, server_id, validatable=False, **kwargs):
-        # Destroy an existing server and creates a new one
+        """Destroy an existing class level server and creates a new one
+
+        Some test classes use a test server that can be used by multiple
+        tests. This is done to optimise runtime and test load.
+        If something goes wrong with the test server, it can be rebuilt
+        using this helper.
+
+        This helper can also be used for the initial provisioning if no
+        server_id is specified.
+
+        :param server_id: UUID of the server to be rebuilt. If None is
+            specified, a new server is provisioned.
+        :param validatable: whether to the server needs to be
+            validatable. When True, validation resources are acquired via
+            the `get_class_validation_resources` helper.
+        :param kwargs: extra paramaters are passed through to the
+            `create_test_server` call.
+        :return: the UUID of the created server.
+        """
         if server_id:
             cls.delete_server(server_id)
 
         cls.password = data_utils.rand_password()
         server = cls.create_test_server(
             validatable,
+            validation_resources=cls.get_class_validation_resources(
+                cls.os_primary),
             wait_until='ACTIVE',
             adminPass=cls.password,
             **kwargs)
@@ -360,17 +352,33 @@
     @classmethod
     def delete_volume(cls, volume_id):
         """Deletes the given volume and waits for it to be gone."""
-        cls._delete_volume(cls.volumes_client, volume_id)
+        try:
+            cls.volumes_client.delete_volume(volume_id)
+            # TODO(mriedem): We should move the wait_for_resource_deletion
+            # into the delete_volume method as a convenience to the caller.
+            cls.volumes_client.wait_for_resource_deletion(volume_id)
+        except lib_exc.NotFound:
+            LOG.warning("Unable to delete volume '%s' since it was not found. "
+                        "Maybe it was already deleted?", volume_id)
 
     @classmethod
-    def get_server_ip(cls, server):
+    def get_server_ip(cls, server, validation_resources=None):
         """Get the server fixed or floating IP.
 
         Based on the configuration we're in, return a correct ip
         address for validating that a guest is up.
+
+        :param server: The server dict as returned by the API
+        :param validation_resources: The dict of validation resources
+            provisioned for the server.
         """
         if CONF.validation.connect_method == 'floating':
-            return cls.validation_resources['floating_ip']['ip']
+            if validation_resources:
+                return validation_resources['floating_ip']['ip']
+            else:
+                msg = ('When validation.connect_method equals floating, '
+                       'validation_resources cannot be None')
+                raise exceptions.InvalidParam(invalid_param=msg)
         elif CONF.validation.connect_method == 'fixed':
             addresses = server['addresses'][CONF.validation.network_for_ssh]
             for address in addresses:
@@ -401,29 +409,15 @@
         if image_ref is not None:
             kwargs['imageRef'] = image_ref
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
-        cls.volumes.append(volume)
+        cls.addClassResourceCleanup(
+            cls.volumes_client.wait_for_resource_deletion, volume['id'])
+        cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+                                    cls.volumes_client.delete_volume,
+                                    volume['id'])
         waiters.wait_for_volume_resource_status(cls.volumes_client,
                                                 volume['id'], 'available')
         return volume
 
-    @classmethod
-    def clear_volumes(cls):
-        LOG.debug('Clearing volumes: %s', ','.join(
-            volume['id'] for volume in cls.volumes))
-        for volume in cls.volumes:
-            try:
-                test_utils.call_and_ignore_notfound_exc(
-                    cls.volumes_client.delete_volume, volume['id'])
-            except Exception:
-                LOG.exception('Deleting volume %s failed', volume['id'])
-
-        for volume in cls.volumes:
-            try:
-                cls.volumes_client.wait_for_resource_deletion(volume['id'])
-            except Exception:
-                LOG.exception('Waiting for deletion of volume %s failed',
-                              volume['id'])
-
     def attach_volume(self, server, volume, device=None, check_reserved=False):
         """Attaches volume to server and waits for 'in-use' volume status.
 
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index 9ee0ac9..86e244b 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -45,7 +45,6 @@
     @classmethod
     def resource_setup(cls):
         super(FloatingIPsTestJSON, cls).resource_setup()
-        cls.floating_ip_id = None
 
         # Server creation
         server = cls.create_test_server(wait_until='ACTIVE')
@@ -53,16 +52,10 @@
         # Floating IP creation
         body = cls.client.create_floating_ip(
             pool=CONF.network.floating_network_name)['floating_ip']
+        cls.addClassResourceCleanup(cls.client.delete_floating_ip, body['id'])
         cls.floating_ip_id = body['id']
         cls.floating_ip = body['ip']
 
-    @classmethod
-    def resource_cleanup(cls):
-        # Deleting the floating IP which is created in this method
-        if cls.floating_ip_id:
-            cls.client.delete_floating_ip(cls.floating_ip_id)
-        super(FloatingIPsTestJSON, cls).resource_cleanup()
-
     @decorators.idempotent_id('f7bfb946-297e-41b8-9e8c-aba8e9bb5194')
     def test_allocate_floating_ip(self):
         # Positive test:Allocation of a new floating IP to a project
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 2314433..516c544 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -41,18 +41,12 @@
     def resource_setup(cls):
         super(FloatingIPDetailsTestJSON, cls).resource_setup()
         cls.floating_ip = []
-        cls.floating_ip_id = []
         for _ in range(3):
             body = cls.client.create_floating_ip(
                 pool=CONF.network.floating_network_name)['floating_ip']
+            cls.addClassResourceCleanup(cls.client.delete_floating_ip,
+                                        body['id'])
             cls.floating_ip.append(body)
-            cls.floating_ip_id.append(body['id'])
-
-    @classmethod
-    def resource_cleanup(cls):
-        for f_id in cls.floating_ip_id:
-            cls.client.delete_floating_ip(f_id)
-        super(FloatingIPDetailsTestJSON, cls).resource_cleanup()
 
     @decorators.idempotent_id('16db31c3-fb85-40c9-bbe2-8cf7b67ff99f')
     def test_list_floating_ips(self):
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 8d503dc..b497626 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -20,6 +20,7 @@
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions
 
@@ -70,7 +71,9 @@
         body = cls.glance_client.create_image(**params)
         body = body['image'] if 'image' in body else body
         cls.image_id = body['id']
-        cls.images.append(cls.image_id)
+        cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+                                    cls.glance_client.delete_image,
+                                    cls.image_id)
         image_file = six.BytesIO((b'*' * 1024))
         if CONF.image_feature_enabled.api_v1:
             cls.glance_client.update_image(cls.image_id, data=image_file)
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 5987d39..e62e25e 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -74,7 +74,6 @@
 
         # Verify the image was deleted correctly
         self.client.delete_image(image['id'])
-        self.images.remove(image['id'])
         self.client.wait_for_resource_deletion(image['id'])
 
     @decorators.idempotent_id('3b7c6fe4-dfe7-477c-9243-b06359db51e6')
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index cf32ba3..7ecfa0a 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -107,7 +107,6 @@
 
         image_id = data_utils.parse_image_id(image.response['location'])
         self.client.delete_image(image_id)
-        self.images.remove(image_id)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('084f0cbc-500a-4963-8a4e-312905862581')
@@ -130,6 +129,5 @@
 
         # Do not wait, attempt to delete the image, ensure it's successful
         self.client.delete_image(image_id)
-        self.images.remove(image_id)
         self.assertRaises(lib_exc.NotFound,
                           self.client.show_image, image_id)
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index acc8b3e..d83d8df 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -23,6 +23,7 @@
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions
 
@@ -74,7 +75,10 @@
             body = cls.glance_client.create_image(**params)
             body = body['image'] if 'image' in body else body
             image_id = body['id']
-            cls.images.append(image_id)
+            cls.addClassResourceCleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                cls.compute_images_client.delete_image,
+                image_id)
             # Wait 1 second between creation and upload to ensure a delta
             # between created_at and updated_at.
             time.sleep(1)
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index eeb423e..62d5bea 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -52,8 +52,7 @@
             self.client.wait_for_resource_deletion(sg['id'])
         # Now check if all the created Security Groups are deleted
         fetched_list = self.client.list_security_groups()['security_groups']
-        deleted_sgs = \
-            [sg for sg in security_group_list if sg in fetched_list]
+        deleted_sgs = [sg for sg in security_group_list if sg in fetched_list]
         self.assertFalse(deleted_sgs,
                          "Failed to delete Security Group %s "
                          "list" % ', '.join(m_group['name']
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index e0a1d77..0248c65 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -103,7 +103,6 @@
                  ['interfaceAttachment'])
         iface = waiters.wait_for_interface_status(
             self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
-        self._check_interface(iface)
         return iface
 
     def _test_create_interface_by_network_id(self, server, ifs):
@@ -190,7 +189,6 @@
         server, ifs = self._create_server_get_interfaces()
         interface_count = len(ifs)
         self.assertGreater(interface_count, 0)
-        self._check_interface(ifs[0])
 
         try:
             iface = self._test_create_interface(server)
@@ -228,7 +226,6 @@
         server, ifs = self._create_server_get_interfaces()
         interface_count = len(ifs)
         self.assertGreater(interface_count, 0)
-        self._check_interface(ifs[0])
         network_id = ifs[0]['net_id']
         self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
         # Remove the fixed IP from server.
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index d8ce7ea..c660821 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -42,8 +42,9 @@
 
     @classmethod
     def resource_setup(cls):
-        cls.set_validation_resources()
         super(ServersTestJSON, cls).resource_setup()
+        validation_resources = cls.get_class_validation_resources(
+            cls.os_primary)
         cls.meta = {'hello': 'world'}
         cls.accessIPv4 = '1.1.1.1'
         cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
@@ -52,6 +53,7 @@
         disk_config = cls.disk_config
         server_initial = cls.create_test_server(
             validatable=True,
+            validation_resources=validation_resources,
             wait_until='ACTIVE',
             name=cls.name,
             metadata=cls.meta,
@@ -105,11 +107,13 @@
         # Verify that the number of vcpus reported by the instance matches
         # the amount stated by the flavor
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
         linux_client = remote_client.RemoteClient(
-            self.get_server_ip(self.server),
+            self.get_server_ip(self.server, validation_resources),
             self.ssh_user,
             self.password,
-            self.validation_resources['keypair']['private_key'],
+            validation_resources['keypair']['private_key'],
             server=self.server,
             servers_client=self.client)
         output = linux_client.exec_command('grep -c ^processor /proc/cpuinfo')
@@ -120,11 +124,13 @@
                           'Instance validation tests are disabled.')
     def test_host_name_is_same_as_server_name(self):
         # Verify the instance host name is the same as the server name
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
         linux_client = remote_client.RemoteClient(
-            self.get_server_ip(self.server),
+            self.get_server_ip(self.server, validation_resources),
             self.ssh_user,
             self.password,
-            self.validation_resources['keypair']['private_key'],
+            validation_resources['keypair']['private_key'],
             server=self.server,
             servers_client=self.client)
         hostname = linux_client.exec_command("hostname").rstrip()
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index dbf6713..a126fd6 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -66,11 +66,6 @@
                                   dhcp=True)
         super(DeviceTaggingTest, cls).setup_credentials()
 
-    @classmethod
-    def resource_setup(cls):
-        cls.set_validation_resources()
-        super(DeviceTaggingTest, cls).resource_setup()
-
     def verify_device_metadata(self, md_json):
         md_dict = json.loads(md_json)
         for d in md_dict['devices']:
@@ -139,9 +134,12 @@
         # Create server
         admin_pass = data_utils.rand_password()
         config_drive_enabled = CONF.compute_feature_enabled.config_drive
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
 
         server = self.create_test_server(
             validatable=True,
+            validation_resources=validation_resources,
             config_drive=config_drive_enabled,
             adminPass=admin_pass,
             name=data_utils.rand_name('device-tagging-server'),
@@ -208,10 +206,10 @@
         self.addCleanup(self.delete_server, server['id'])
 
         self.ssh_client = remote_client.RemoteClient(
-            self.get_server_ip(server),
+            self.get_server_ip(server, validation_resources),
             CONF.validation.image_ssh_user,
             admin_pass,
-            self.validation_resources['keypair']['private_key'],
+            validation_resources['keypair']['private_key'],
             server=server,
             servers_client=self.servers_client)
 
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index b5fc39c..4cfc665 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -44,8 +44,13 @@
                                            self.server_id, 'ACTIVE')
         except lib_exc.NotFound:
             # The server was deleted by previous test, create a new one
+            # Use class level validation resources to avoid them being
+            # deleted once a test is over
+            validation_resources = self.get_class_validation_resources(
+                self.os_primary)
             server = self.create_test_server(
                 validatable=True,
+                validation_resources=validation_resources,
                 wait_until='ACTIVE')
             self.__class__.server_id = server['id']
         except Exception:
@@ -69,8 +74,6 @@
 
     @classmethod
     def resource_setup(cls):
-        cls.set_validation_resources()
-
         super(ServerActionsTestJSON, cls).resource_setup()
         cls.server_id = cls.rebuild_server(None, validatable=True)
 
@@ -80,8 +83,11 @@
     def test_change_server_password(self):
         # Since this test messes with the password and makes the
         # server unreachable, it should create its own server
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
         newserver = self.create_test_server(
             validatable=True,
+            validation_resources=validation_resources,
             wait_until='ACTIVE')
         # The server's password should be set to the provided password
         new_password = 'Newpass1234'
@@ -92,7 +98,7 @@
             # Verify that the user can authenticate with the new password
             server = self.client.show_server(newserver['id'])['server']
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server),
+                self.get_server_ip(server, validation_resources),
                 self.ssh_user,
                 new_password,
                 server=server,
@@ -101,13 +107,15 @@
 
     def _test_reboot_server(self, reboot_type):
         if CONF.validation.run_validation:
+            validation_resources = self.get_class_validation_resources(
+                self.os_primary)
             # Get the time the server was last rebooted,
             server = self.client.show_server(self.server_id)['server']
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server),
+                self.get_server_ip(server, validation_resources),
                 self.ssh_user,
                 self.password,
-                self.validation_resources['keypair']['private_key'],
+                validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             boot_time = linux_client.get_boot_time()
@@ -122,10 +130,10 @@
         if CONF.validation.run_validation:
             # Log in and verify the boot time has changed
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server),
+                self.get_server_ip(server, validation_resources),
                 self.ssh_user,
                 self.password,
-                self.validation_resources['keypair']['private_key'],
+                validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             new_boot_time = linux_client.get_boot_time()
@@ -201,6 +209,8 @@
         self.assertEqual(original_addresses, server['addresses'])
 
         if CONF.validation.run_validation:
+            validation_resources = self.get_class_validation_resources(
+                self.os_primary)
             # Authentication is attempted in the following order of priority:
             # 1.The key passed in, if one was passed in.
             # 2.Any key we can find through an SSH agent (if allowed).
@@ -208,10 +218,10 @@
             #   ~/.ssh/ (if allowed).
             # 4.Plain username/password auth, if a password was given.
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(rebuilt_server),
+                self.get_server_ip(rebuilt_server, validation_resources),
                 self.ssh_user,
                 password,
-                self.validation_resources['keypair']['private_key'],
+                validation_resources['keypair']['private_key'],
                 server=rebuilt_server,
                 servers_client=self.client)
             linux_client.validate_authentication()
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 90b9da4..2f0f5ee 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -20,6 +20,7 @@
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
@@ -34,11 +35,6 @@
         super(ServerPersonalityTestJSON, cls).setup_credentials()
 
     @classmethod
-    def resource_setup(cls):
-        cls.set_validation_resources()
-        super(ServerPersonalityTestJSON, cls).resource_setup()
-
-    @classmethod
     def skip_checks(cls):
         super(ServerPersonalityTestJSON, cls).skip_checks()
         if not CONF.compute_feature_enabled.personality:
@@ -57,16 +53,23 @@
         personality = [{'path': file_path,
                         'contents': base64.encode_as_text(file_contents)}]
         password = data_utils.rand_password()
-        created_server = self.create_test_server(personality=personality,
-                                                 adminPass=password,
-                                                 wait_until='ACTIVE',
-                                                 validatable=True)
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        created_server = self.create_test_server(
+            personality=personality, adminPass=password, wait_until='ACTIVE',
+            validatable=True,
+            validation_resources=validation_resources)
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, created_server['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.servers_client.delete_server,
+                        created_server['id'])
         server = self.client.show_server(created_server['id'])['server']
         if CONF.validation.run_validation:
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server),
+                self.get_server_ip(server, validation_resources),
                 self.ssh_user, password,
-                self.validation_resources['keypair']['private_key'],
+                validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             self.assertEqual(file_contents,
@@ -75,8 +78,16 @@
 
     @decorators.idempotent_id('128966d8-71fc-443c-8cab-08e24114ecc9')
     def test_rebuild_server_with_personality(self):
-        server = self.create_test_server(wait_until='ACTIVE', validatable=True)
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            wait_until='ACTIVE', validatable=True,
+            validation_resources=validation_resources)
         server_id = server['id']
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server_id)
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.servers_client.delete_server, server_id)
         file_contents = 'Test server rebuild.'
         personality = [{'path': 'rebuild.txt',
                         'contents': base64.encode_as_text(file_contents)}]
@@ -126,16 +137,22 @@
                 'contents': base64.encode_as_text(file_contents + str(i)),
             })
         password = data_utils.rand_password()
-        created_server = self.create_test_server(personality=person,
-                                                 adminPass=password,
-                                                 wait_until='ACTIVE',
-                                                 validatable=True)
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        created_server = self.create_test_server(
+            personality=person, adminPass=password, wait_until='ACTIVE',
+            validatable=True, validation_resources=validation_resources)
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, created_server['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.servers_client.delete_server,
+                        created_server['id'])
         server = self.client.show_server(created_server['id'])['server']
         if CONF.validation.run_validation:
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server),
+                self.get_server_ip(server, validation_resources),
                 self.ssh_user, password,
-                self.validation_resources['keypair']['private_key'],
+                validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             for i in person:
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 7fd1dd1..c9ee671 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -19,6 +19,7 @@
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
 CONF = config.CONF
@@ -31,10 +32,6 @@
         super(ServersTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
 
-    def tearDown(self):
-        self.clear_servers()
-        super(ServersTestJSON, self).tearDown()
-
     @decorators.idempotent_id('b92d5ec7-b1dd-44a2-87e4-45e888c46ef0')
     @testtools.skipUnless(CONF.compute_feature_enabled.
                           enable_instance_password,
@@ -43,6 +40,11 @@
         # If an admin password is provided on server creation, the server's
         # root password should be set to that password.
         server = self.create_test_server(adminPass='testpassword')
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.servers_client.delete_server, server['id'])
 
         # Verify the password is set correctly in the response
         self.assertEqual('testpassword', server['adminPass'])
@@ -57,9 +59,19 @@
         server = self.create_test_server(name=server_name,
                                          wait_until='ACTIVE')
         id1 = server['id']
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, id1)
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.servers_client.delete_server, id1)
         server = self.create_test_server(name=server_name,
                                          wait_until='ACTIVE')
         id2 = server['id']
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, id2)
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.servers_client.delete_server, id2)
         self.assertNotEqual(id1, id2, "Did not create a new server")
         server = self.client.show_server(id1)['server']
         name1 = server['name']
@@ -76,6 +88,11 @@
         self.addCleanup(self.keypairs_client.delete_keypair, key_name)
         self.keypairs_client.list_keypairs()
         server = self.create_test_server(key_name=key_name)
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.servers_client.delete_server, server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
         server = self.client.show_server(server['id'])['server']
         self.assertEqual(key_name, server['key_name'])
@@ -98,6 +115,11 @@
     def test_update_server_name(self):
         # The server name should be changed to the provided value
         server = self.create_test_server(wait_until='ACTIVE')
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.servers_client.delete_server, server['id'])
         # Update instance name with non-ASCII characters
         prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
         self._update_server_name(server['id'], 'ACTIVE', prefix_name)
@@ -115,6 +137,11 @@
     def test_update_access_server_address(self):
         # The server's access addresses should reflect the provided values
         server = self.create_test_server(wait_until='ACTIVE')
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.servers_client.delete_server, server['id'])
 
         # Update the IPv4 and IPv6 access addresses
         self.client.update_server(server['id'],
@@ -131,6 +158,11 @@
     def test_create_server_with_ipv6_addr_only(self):
         # Create a server without an IPv4 address(only IPv6 address).
         server = self.create_test_server(accessIPv6='2001:2001::3')
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.servers_client.delete_server, server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
         server = self.client.show_server(server['id'])['server']
         self.assertEqual('2001:2001::3', server['accessIPv6'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index e0fed58..9bef80f 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -40,35 +40,37 @@
 
     @classmethod
     def resource_setup(cls):
-        cls.set_validation_resources()
         super(AttachVolumeTestJSON, cls).resource_setup()
         cls.device = CONF.compute.volume_device_name
 
     def _create_server(self):
         # Start a server and wait for it to become ready
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
         server = self.create_test_server(
             validatable=True,
+            validation_resources=validation_resources,
             wait_until='ACTIVE',
             adminPass=self.image_ssh_password)
         self.addCleanup(self.delete_server, server['id'])
         # Record addresses so that we can ssh later
         server['addresses'] = self.servers_client.list_addresses(
             server['id'])['addresses']
-        return server
+        return server, validation_resources
 
     @decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
     def test_attach_detach_volume(self):
         # Stop and Start a server with an attached volume, ensuring that
         # the volume remains attached.
-        server = self._create_server()
+        server, validation_resources = self._create_server()
 
         # NOTE(andreaf) Create one remote client used throughout the test.
         if CONF.validation.run_validation:
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server),
+                self.get_server_ip(server, validation_resources),
                 self.image_ssh_user,
                 self.image_ssh_password,
-                self.validation_resources['keypair']['private_key'],
+                validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.servers_client)
             # NOTE(andreaf) We need to ensure the ssh key has been
@@ -111,7 +113,7 @@
     @decorators.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
     def test_list_get_volume_attachments(self):
         # List volume attachment of the server
-        server = self._create_server()
+        server, _ = self._create_server()
         volume_1st = self.create_volume()
         attachment_1st = self.attach_volume(server, volume_1st,
                                             device=('/dev/%s' % self.device))
@@ -163,15 +165,15 @@
         if not CONF.compute_feature_enabled.shelve:
             raise cls.skipException('Shelve is not available.')
 
-    def _count_volumes(self, server):
+    def _count_volumes(self, server, validation_resources):
         # Count number of volumes on an instance
         volumes = 0
         if CONF.validation.run_validation:
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server),
+                self.get_server_ip(server, validation_resources),
                 self.image_ssh_user,
                 self.image_ssh_password,
-                self.validation_resources['keypair']['private_key'],
+                validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.servers_client)
 
@@ -179,7 +181,7 @@
             volumes = int(linux_client.exec_command(command).strip())
         return volumes
 
-    def _shelve_server(self, server):
+    def _shelve_server(self, server, validation_resources):
         # NOTE(andreaf) If we are going to shelve a server, we should
         # check first whether the server is ssh-able. Otherwise we
         # won't be able to distinguish failures introduced by shelve
@@ -188,10 +190,10 @@
         # avoid breaking the VM
         if CONF.validation.run_validation:
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server),
+                self.get_server_ip(server, validation_resources),
                 self.image_ssh_user,
                 self.image_ssh_password,
-                self.validation_resources['keypair']['private_key'],
+                validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.servers_client)
             linux_client.validate_authentication()
@@ -199,30 +201,34 @@
         # If validation went ok, or it was skipped, shelve the server
         compute.shelve_server(self.servers_client, server['id'])
 
-    def _unshelve_server_and_check_volumes(self, server, number_of_volumes):
+    def _unshelve_server_and_check_volumes(self, server,
+                                           validation_resources,
+                                           number_of_volumes):
         # Unshelve the instance and check that there are expected volumes
         self.servers_client.unshelve_server(server['id'])
         waiters.wait_for_server_status(self.servers_client,
                                        server['id'],
                                        'ACTIVE')
         if CONF.validation.run_validation:
-            counted_volumes = self._count_volumes(server)
+            counted_volumes = self._count_volumes(
+                server, validation_resources)
             self.assertEqual(number_of_volumes, counted_volumes)
 
     @decorators.idempotent_id('13a940b6-3474-4c3c-b03f-29b89112bfee')
     def test_attach_volume_shelved_or_offload_server(self):
         # Create server, count number of volumes on it, shelve
         # server and attach pre-created volume to shelved server
-        server = self._create_server()
+        server, validation_resources = self._create_server()
         volume = self.create_volume()
-        num_vol = self._count_volumes(server)
-        self._shelve_server(server)
+        num_vol = self._count_volumes(server, validation_resources)
+        self._shelve_server(server, validation_resources)
         attachment = self.attach_volume(server, volume,
                                         device=('/dev/%s' % self.device),
                                         check_reserved=True)
 
         # Unshelve the instance and check that attached volume exists
-        self._unshelve_server_and_check_volumes(server, num_vol + 1)
+        self._unshelve_server_and_check_volumes(
+            server, validation_resources, num_vol + 1)
 
         # Get volume attachment of the server
         volume_attachment = self.servers_client.show_volume_attachment(
@@ -238,10 +244,10 @@
     def test_detach_volume_shelved_or_offload_server(self):
         # Count number of volumes on instance, shelve
         # server and attach pre-created volume to shelved server
-        server = self._create_server()
+        server, validation_resources = self._create_server()
         volume = self.create_volume()
-        num_vol = self._count_volumes(server)
-        self._shelve_server(server)
+        num_vol = self._count_volumes(server, validation_resources)
+        self._shelve_server(server, validation_resources)
 
         # Attach and then detach the volume
         self.attach_volume(server, volume, device=('/dev/%s' % self.device),
@@ -252,4 +258,5 @@
 
         # Unshelve the instance and check that we have the expected number of
         # volume(s)
-        self._unshelve_server_and_check_volumes(server, num_vol)
+        self._unshelve_server_and_check_volumes(
+            server, validation_resources, num_vol)
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 5c3cd26..6343ea8 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -161,16 +161,14 @@
                                 manager_project_id]
 
         # Get available project scopes
-        available_projects =\
-            self.client.list_auth_projects()['projects']
+        available_projects = self.client.list_auth_projects()['projects']
 
         # create list to save fetched project's id
         fetched_project_ids = [i['id'] for i in available_projects]
 
         # verifying the project ids in list
         missing_project_ids = \
-            [p for p in assigned_project_ids
-             if p not in fetched_project_ids]
+            [p for p in assigned_project_ids if p not in fetched_project_ids]
         self.assertEmpty(missing_project_ids,
                          "Failed to find project_id %s in fetched list" %
                          ', '.join(missing_project_ids))
diff --git a/tempest/api/identity/v3/test_catalog.py b/tempest/api/identity/v3/test_catalog.py
old mode 100755
new mode 100644
diff --git a/tempest/api/identity/v3/test_projects.py b/tempest/api/identity/v3/test_projects.py
index 0ae35ea..bbb4013 100644
--- a/tempest/api/identity/v3/test_projects.py
+++ b/tempest/api/identity/v3/test_projects.py
@@ -24,8 +24,7 @@
 
     @decorators.idempotent_id('86128d46-e170-4644-866a-cc487f699e1d')
     def test_list_projects_returns_only_authorized_projects(self):
-        alt_project_name =\
-            self.os_alt.credentials.project_name
+        alt_project_name = self.os_alt.credentials.project_name
         resp = self.non_admin_users_client.list_user_projects(
             self.os_primary.credentials.user_id)
 
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index f69d7c5..cf4236d 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 from tempest.api.network import base
+from tempest.common import identity
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
@@ -46,10 +47,11 @@
         # Add a project to conduct the test
         project = data_utils.rand_name('test_project_')
         description = data_utils.rand_name('desc_')
-        project = self.identity_utils.create_project(name=project,
-                                                     description=description)
+        project = identity.identity_utils(self.os_admin).create_project(
+            name=project, description=description)
         project_id = project['id']
-        self.addCleanup(self.identity_utils.delete_project, project_id)
+        self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+                        project_id)
 
         # Change quotas for project
         quota_set = self.admin_quotas_client.update_quotas(
diff --git a/tempest/api/network/admin/test_routers.py b/tempest/api/network/admin/test_routers.py
index c97efe9..8cdb41e 100644
--- a/tempest/api/network/admin/test_routers.py
+++ b/tempest/api/network/admin/test_routers.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.api.network import base
+from tempest.common import identity
 from tempest.common import utils
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -41,19 +42,6 @@
         self.addCleanup(self._cleanup_router, router)
         return router
 
-    def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
-        interface = self.routers_client.add_router_interface(
-            router_id, subnet_id=subnet_id)
-        self.addCleanup(self._remove_router_interface_with_subnet_id,
-                        router_id, subnet_id)
-        self.assertEqual(subnet_id, interface['subnet_id'])
-        return interface
-
-    def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
-        body = self.routers_client.remove_router_interface(router_id,
-                                                           subnet_id=subnet_id)
-        self.assertEqual(subnet_id, body['subnet_id'])
-
     @classmethod
     def skip_checks(cls):
         super(RoutersAdminTest, cls).skip_checks()
@@ -66,10 +54,11 @@
         # Test creating router from admin user setting project_id.
         project = data_utils.rand_name('test_tenant_')
         description = data_utils.rand_name('desc_')
-        project = self.identity_utils.create_project(name=project,
-                                                     description=description)
+        project = identity.identity_utils(self.os_admin).create_project(
+            name=project, description=description)
         project_id = project['id']
-        self.addCleanup(self.identity_utils.delete_project, project_id)
+        self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+                        project_id)
 
         name = data_utils.rand_name('router-')
         create_body = self.admin_routers_client.create_router(
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 6bec0d7..8308e34 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -96,6 +96,12 @@
         cls.metering_labels = []
         cls.metering_label_rules = []
         cls.ethertype = "IPv" + str(cls._ip_version)
+        if cls._ip_version == 4:
+            cls.cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+            cls.mask_bits = CONF.network.project_network_mask_bits
+        elif cls._ip_version == 6:
+            cls.cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
+            cls.mask_bits = CONF.network.project_network_v6_mask_bits
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index a471bd6..3075047 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import netaddr
 import six
 
 from tempest.api.network import base
@@ -41,8 +40,6 @@
         api_extensions
     """
 
-    _project_network_cidr = CONF.network.project_network_cidr
-
     @classmethod
     def skip_checks(cls):
         super(AllowedAddressPairTestJSON, cls).skip_checks()
@@ -105,8 +102,7 @@
     @decorators.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4')
     def test_update_port_with_cidr_address_pair(self):
         # Update allowed address pair with cidr
-        cidr = str(netaddr.IPNetwork(self._project_network_cidr))
-        self._update_port_with_address(cidr)
+        self._update_port_with_address(str(self.cidr))
 
     @decorators.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
     def test_update_port_with_multiple_ip_mac_address_pair(self):
@@ -135,4 +131,3 @@
 
 class AllowedAddressPairIpV6TestJSON(AllowedAddressPairTestJSON):
     _ip_version = 6
-    _project_network_cidr = CONF.network.project_network_v6_cidr
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 88340c1..1c59556 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -34,8 +34,7 @@
     def resource_setup(cls):
         super(BaseNetworkTestResources, cls).resource_setup()
         cls.network = cls.create_network()
-        cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
-                                                               cls._ip_version)
+        cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network)
         cls._subnet_data = {6: {'gateway':
                                 str(cls._get_gateway_from_tempest_conf(6)),
                                 'allocation_pools':
@@ -64,20 +63,13 @@
                                 'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
 
     @classmethod
-    def _create_subnet_with_last_subnet_block(cls, network, ip_version):
+    def _create_subnet_with_last_subnet_block(cls, network):
         # Derive last subnet CIDR block from project CIDR and
         # create the subnet with that derived CIDR
-        if ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            mask_bits = CONF.network.project_network_mask_bits
-        elif ip_version == 6:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
-            mask_bits = CONF.network.project_network_v6_mask_bits
-
-        subnet_cidr = list(cidr.subnet(mask_bits))[-1]
+        subnet_cidr = list(cls.cidr.subnet(cls.mask_bits))[-1]
         gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
         return cls.create_subnet(network, gateway=gateway_ip,
-                                 cidr=subnet_cidr, mask_bits=mask_bits)
+                                 cidr=subnet_cidr, mask_bits=cls.mask_bits)
 
     @classmethod
     def _get_gateway_from_tempest_conf(cls, ip_version):
@@ -487,14 +479,8 @@
     def test_bulk_create_delete_subnet(self):
         networks = [self.create_network(), self.create_network()]
         # Creates 2 subnets in one request
-        if self._ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            mask_bits = CONF.network.project_network_mask_bits
-        else:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
-            mask_bits = CONF.network.project_network_v6_mask_bits
-
-        cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
+        cidrs = [subnet_cidr
+                 for subnet_cidr in self.cidr.subnet(self.mask_bits)]
 
         names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
         subnets_list = []
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 5c36747..eb53fbb 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -84,25 +84,13 @@
         self.assertTrue(port1['admin_state_up'])
         self.assertTrue(port2['admin_state_up'])
 
-    @classmethod
-    def _get_ipaddress_from_tempest_conf(cls):
-        """Return subnet with mask bits for configured CIDR """
-        if cls._ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            cidr.prefixlen = CONF.network.project_network_mask_bits
-
-        elif cls._ip_version == 6:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
-            cidr.prefixlen = CONF.network.project_network_v6_mask_bits
-
-        return cidr
-
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
     def test_create_port_in_allowed_allocation_pools(self):
         network = self.create_network()
         net_id = network['id']
-        address = self._get_ipaddress_from_tempest_conf()
+        address = self.cidr
+        address.prefixlen = self.mask_bits
         if ((address.version == 4 and address.prefixlen >= 30) or
            (address.version == 6 and address.prefixlen >= 126)):
             msg = ("Subnet %s isn't large enough for the test" % address.cidr)
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 3883cc2..99ffaa8 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -59,13 +59,6 @@
             msg = "router extension not enabled."
             raise cls.skipException(msg)
 
-    @classmethod
-    def resource_setup(cls):
-        super(RoutersTest, cls).resource_setup()
-        cls.tenant_cidr = (CONF.network.project_network_cidr
-                           if cls._ip_version == 4 else
-                           CONF.network.project_network_v6_cidr)
-
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('f64403e2-8483-4b34-8ccd-b09a87bcc68c')
     @testtools.skipUnless(CONF.network.public_network_id,
@@ -139,33 +132,6 @@
         self.assertEqual(show_port_body['port']['device_id'],
                          router['id'])
 
-    def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
-        show_body = self.admin_routers_client.show_router(router_id)
-        actual_ext_gw_info = show_body['router']['external_gateway_info']
-        if exp_ext_gw_info is None:
-            self.assertIsNone(actual_ext_gw_info)
-            return
-        # Verify only keys passed in exp_ext_gw_info
-        for k, v in exp_ext_gw_info.items():
-            self.assertEqual(v, actual_ext_gw_info[k])
-
-    def _verify_gateway_port(self, router_id):
-        list_body = self.admin_ports_client.list_ports(
-            network_id=CONF.network.public_network_id,
-            device_id=router_id)
-        self.assertEqual(len(list_body['ports']), 1)
-        gw_port = list_body['ports'][0]
-        fixed_ips = gw_port['fixed_ips']
-        self.assertNotEmpty(fixed_ips)
-        # Assert that all of the IPs from the router gateway port
-        # are allocated from a valid public subnet.
-        public_net_body = self.admin_networks_client.show_network(
-            CONF.network.public_network_id)
-        public_subnet_ids = public_net_body['network']['subnets']
-        for fixed_ip in fixed_ips:
-            subnet_id = fixed_ip['subnet_id']
-            self.assertIn(subnet_id, public_subnet_ids)
-
     @decorators.idempotent_id('cbe42f84-04c2-11e7-8adb-fa163e4fa634')
     @utils.requires_ext(extension='ext-gw-mode', service='network')
     @testtools.skipUnless(CONF.network.public_network_id,
@@ -202,7 +168,7 @@
     def test_update_delete_extra_route(self):
         # Create different cidr for each subnet to avoid cidr duplicate
         # The cidr starts from project_cidr
-        next_cidr = netaddr.IPNetwork(self.tenant_cidr)
+        next_cidr = self.cidr
         # Prepare to build several routes
         test_routes = []
         routes_num = 4
@@ -278,7 +244,7 @@
         network02 = self.create_network(
             network_name=data_utils.rand_name('router-network02-'))
         subnet01 = self.create_subnet(network01)
-        sub02_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
+        sub02_cidr = self.cidr.next()
         subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
         router = self._create_router()
         interface01 = self._add_router_interface_with_subnet_id(router['id'],
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index 60b9de7..c9ce55c 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import netaddr
-
 from tempest.api.network import base
 from tempest.common import utils
 from tempest import config
@@ -40,9 +38,6 @@
         cls.router = cls.create_router()
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
-        cls.tenant_cidr = (CONF.network.project_network_cidr
-                           if cls._ip_version == 4 else
-                           CONF.network.project_network_v6_cidr)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
@@ -57,7 +52,7 @@
     @decorators.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
     def test_router_add_gateway_net_not_external_returns_400(self):
         alt_network = self.create_network()
-        sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
+        sub_cidr = self.cidr.next()
         self.create_subnet(alt_network, cidr=sub_cidr)
         self.assertRaises(lib_exc.BadRequest,
                           self.routers_client.update_router,
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 4c49b2a..24c9c24 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -43,7 +43,7 @@
     for cont in containers:
         try:
             params = {'limit': 9999, 'format': 'json'}
-            _, objlist = container_client.list_container_contents(cont, params)
+            _, objlist = container_client.list_container_objects(cont, params)
             # delete every object in the container
             for obj in objlist:
                 test_utils.call_and_ignore_notfound_exc(
diff --git a/tempest/api/object_storage/test_account_bulk.py b/tempest/api/object_storage/test_account_bulk.py
index 9abd59e..6599e43 100644
--- a/tempest/api/object_storage/test_account_bulk.py
+++ b/tempest/api/object_storage/test_account_bulk.py
@@ -96,7 +96,7 @@
         self.assertIn(container_name, [b['name'] for b in body])
 
         param = {'format': 'json'}
-        resp, contents_list = self.container_client.list_container_contents(
+        resp, contents_list = self.container_client.list_container_objects(
             container_name, param)
 
         self.assertHeaders(resp, 'Container', 'GET')
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index 4b66ebf..765bc6d 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -41,10 +41,11 @@
         tenant_name = self.os_roles_operator_alt.credentials.tenant_name
         username = self.os_roles_operator_alt.credentials.username
         cont_headers = {'X-Container-Read': tenant_name + ':' + username}
+        container_client = self.os_roles_operator.container_client
         resp_meta, _ = (
-            self.os_roles_operator.container_client.update_container_metadata(
-                self.container_name, metadata=cont_headers,
-                metadata_prefix=''))
+            container_client.create_update_or_delete_container_metadata(
+                self.container_name, create_update_metadata=cont_headers,
+                create_update_metadata_prefix=''))
         self.assertHeaders(resp_meta, 'Container', 'POST')
         # create object
         object_name = data_utils.rand_name(name='Object')
@@ -68,10 +69,11 @@
         tenant_name = self.os_roles_operator_alt.credentials.tenant_name
         username = self.os_roles_operator_alt.credentials.username
         cont_headers = {'X-Container-Write': tenant_name + ':' + username}
+        container_client = self.os_roles_operator.container_client
         resp_meta, _ = (
-            self.os_roles_operator.container_client.update_container_metadata(
-                self.container_name, metadata=cont_headers,
-                metadata_prefix=''))
+            container_client.create_update_or_delete_container_metadata(
+                self.container_name, create_update_metadata=cont_headers,
+                create_update_metadata_prefix=''))
         self.assertHeaders(resp_meta, 'Container', 'POST')
         # set alternative authentication data; cannot simply use the
         # other object client.
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index e064753..03a5879 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -133,9 +133,10 @@
         # attempt to read object using non-authorized user
         # update X-Container-Read metadata ACL
         cont_headers = {'X-Container-Read': 'badtenant:baduser'}
-        resp_meta, _ = self.container_client.update_container_metadata(
-            self.container_name, metadata=cont_headers,
-            metadata_prefix='')
+        resp_meta, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                self.container_name, create_update_metadata=cont_headers,
+                create_update_metadata_prefix=''))
         self.assertHeaders(resp_meta, 'Container', 'POST')
         # create object
         object_name = data_utils.rand_name(name='Object')
@@ -157,9 +158,10 @@
         # attempt to write object using non-authorized user
         # update X-Container-Write metadata ACL
         cont_headers = {'X-Container-Write': 'badtenant:baduser'}
-        resp_meta, _ = self.container_client.update_container_metadata(
-            self.container_name, metadata=cont_headers,
-            metadata_prefix='')
+        resp_meta, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                self.container_name, create_update_metadata=cont_headers,
+                create_update_metadata_prefix=''))
         self.assertHeaders(resp_meta, 'Container', 'POST')
         # Trying to write the object without rights
         self.object_client.auth_provider.set_alt_auth_data(
@@ -182,9 +184,10 @@
         cont_headers = {'X-Container-Read':
                         tenant_name + ':' + username,
                         'X-Container-Write': ''}
-        resp_meta, _ = self.container_client.update_container_metadata(
-            self.container_name, metadata=cont_headers,
-            metadata_prefix='')
+        resp_meta, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                self.container_name, create_update_metadata=cont_headers,
+                create_update_metadata_prefix=''))
         self.assertHeaders(resp_meta, 'Container', 'POST')
         # Trying to write the object without write rights
         self.object_client.auth_provider.set_alt_auth_data(
@@ -207,9 +210,10 @@
         cont_headers = {'X-Container-Read':
                         tenant_name + ':' + username,
                         'X-Container-Write': ''}
-        resp_meta, _ = self.container_client.update_container_metadata(
-            self.container_name, metadata=cont_headers,
-            metadata_prefix='')
+        resp_meta, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                self.container_name, create_update_metadata=cont_headers,
+                create_update_metadata_prefix=''))
         self.assertHeaders(resp_meta, 'Container', 'POST')
         # create object
         object_name = data_utils.rand_name(name='Object')
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index c87bed5..982c4a1 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -40,8 +40,8 @@
         self.container_name = self.create_container()
         metadata = {"quota-bytes": str(QUOTA_BYTES),
                     "quota-count": str(QUOTA_COUNT), }
-        self.container_client.update_container_metadata(
-            self.container_name, metadata)
+        self.container_client.create_update_or_delete_container_metadata(
+            self.container_name, create_update_metadata=metadata)
 
     def tearDown(self):
         """Cleans the container of any object after each test."""
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index 76fe8d4..c6f21ec 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -130,7 +130,7 @@
         container_name = self.create_container()
         object_name, _ = self.create_object(container_name)
 
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name)
         self.assertHeaders(resp, 'Container', 'GET')
         self.assertEqual([object_name], object_list)
@@ -140,7 +140,7 @@
         # get empty container contents list
         container_name = self.create_container()
 
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name)
         self.assertHeaders(resp, 'Container', 'GET')
         self.assertEmpty(object_list)
@@ -153,7 +153,7 @@
         self.create_object(container_name, object_name)
 
         params = {'delimiter': '/'}
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name,
             params=params)
         self.assertHeaders(resp, 'Container', 'GET')
@@ -166,7 +166,7 @@
         object_name, _ = self.create_object(container_name)
 
         params = {'end_marker': object_name + 'zzzz'}
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name,
             params=params)
         self.assertHeaders(resp, 'Container', 'GET')
@@ -179,7 +179,7 @@
         self.create_object(container_name)
 
         params = {'format': 'json'}
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name,
             params=params)
         self.assertHeaders(resp, 'Container', 'GET')
@@ -198,7 +198,7 @@
         self.create_object(container_name)
 
         params = {'format': 'xml'}
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name,
             params=params)
         self.assertHeaders(resp, 'Container', 'GET')
@@ -222,7 +222,7 @@
         object_name, _ = self.create_object(container_name)
 
         params = {'limit': data_utils.rand_int_id(1, 10000)}
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name,
             params=params)
         self.assertHeaders(resp, 'Container', 'GET')
@@ -235,7 +235,7 @@
         object_name, _ = self.create_object(container_name)
 
         params = {'marker': 'AaaaObject1234567890'}
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name,
             params=params)
         self.assertHeaders(resp, 'Container', 'GET')
@@ -250,7 +250,7 @@
         self.create_object(container_name, object_name)
 
         params = {'path': 'Swift'}
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name,
             params=params)
         self.assertHeaders(resp, 'Container', 'GET')
@@ -264,7 +264,7 @@
 
         prefix_key = object_name[0:8]
         params = {'prefix': prefix_key}
-        resp, object_list = self.container_client.list_container_contents(
+        resp, object_list = self.container_client.list_container_objects(
             container_name,
             params=params)
         self.assertHeaders(resp, 'Container', 'GET')
@@ -277,9 +277,9 @@
         container_name = self.create_container()
 
         metadata = {'name': 'Pictures'}
-        self.container_client.update_container_metadata(
+        self.container_client.create_update_or_delete_container_metadata(
             container_name,
-            metadata=metadata)
+            create_update_metadata=metadata)
 
         resp, _ = self.container_client.list_container_metadata(
             container_name)
@@ -307,10 +307,11 @@
         self.containers.append(container_name)
 
         metadata_2 = {'test-container-meta2': 'Meta2'}
-        resp, _ = self.container_client.update_container_metadata(
-            container_name,
-            metadata=metadata_2,
-            remove_metadata=metadata_1)
+        resp, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                container_name,
+                create_update_metadata=metadata_2,
+                delete_metadata=metadata_1))
         self.assertHeaders(resp, 'Container', 'POST')
 
         resp, _ = self.container_client.list_container_metadata(
@@ -326,9 +327,10 @@
         container_name = self.create_container()
 
         metadata = {'test-container-meta1': 'Meta1'}
-        resp, _ = self.container_client.update_container_metadata(
-            container_name,
-            metadata=metadata)
+        resp, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                container_name,
+                create_update_metadata=metadata))
         self.assertHeaders(resp, 'Container', 'POST')
 
         resp, _ = self.container_client.list_container_metadata(
@@ -346,9 +348,10 @@
                                                metadata=metadata)
         self.containers.append(container_name)
 
-        resp, _ = self.container_client.delete_container_metadata(
-            container_name,
-            metadata=metadata)
+        resp, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                container_name,
+                delete_metadata=metadata))
         self.assertHeaders(resp, 'Container', 'POST')
 
         resp, _ = self.container_client.list_container_metadata(
@@ -361,9 +364,10 @@
         container_name = self.create_container()
 
         metadata = {'test-container-meta1': ''}
-        resp, _ = self.container_client.update_container_metadata(
-            container_name,
-            metadata=metadata)
+        resp, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                container_name,
+                create_update_metadata=metadata))
         self.assertHeaders(resp, 'Container', 'POST')
 
         resp, _ = self.container_client.list_container_metadata(
@@ -380,9 +384,10 @@
         self.containers.append(container_name)
 
         metadata = {'test-container-meta1': ''}
-        resp, _ = self.container_client.delete_container_metadata(
-            container_name,
-            metadata=metadata)
+        resp, _ = (
+            self.container_client.create_update_or_delete_container_metadata(
+                container_name,
+                delete_metadata=metadata))
         self.assertHeaders(resp, 'Container', 'POST')
 
         resp, _ = self.container_client.list_container_metadata(container_name)
diff --git a/tempest/api/object_storage/test_container_services_negative.py b/tempest/api/object_storage/test_container_services_negative.py
index 387b7b6..707c016 100644
--- a/tempest/api/object_storage/test_container_services_negative.py
+++ b/tempest/api/object_storage/test_container_services_negative.py
@@ -120,9 +120,10 @@
         # Attempts to update metadata using a nonexistent container name.
         metadata = {'animal': 'penguin'}
 
-        self.assertRaises(exceptions.NotFound,
-                          self.container_client.update_container_metadata,
-                          'nonexistent_container_name', metadata)
+        self.assertRaises(
+            exceptions.NotFound,
+            self.container_client.create_update_or_delete_container_metadata,
+            'nonexistent_container_name', create_update_metadata=metadata)
 
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('65387dbf-a0e2-4aac-9ddc-16eb3f1f69ba')
@@ -130,9 +131,10 @@
         # Attempts to delete metadata using a nonexistent container name.
         metadata = {'animal': 'penguin'}
 
-        self.assertRaises(exceptions.NotFound,
-                          self.container_client.delete_container_metadata,
-                          'nonexistent_container_name', metadata)
+        self.assertRaises(
+            exceptions.NotFound,
+            self.container_client.create_update_or_delete_container_metadata,
+            'nonexistent_container_name', delete_metadata=metadata)
 
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('14331d21-1e81-420a-beea-19cb5e5207f5')
@@ -141,7 +143,7 @@
         # that doesn't exist.
         params = {'limit': 9999, 'format': 'json'}
         self.assertRaises(exceptions.NotFound,
-                          self.container_client.list_container_contents,
+                          self.container_client.list_container_objects,
                           'nonexistent_container_name', params)
 
     @decorators.attr(type=["negative"])
@@ -155,7 +157,7 @@
         self.assertHeaders(resp, 'Container', 'DELETE')
         params = {'limit': 9999, 'format': 'json'}
         self.assertRaises(exceptions.NotFound,
-                          self.container_client.list_container_contents,
+                          self.container_client.list_container_objects,
                           container_name, params)
 
     @decorators.attr(type=["negative"])
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 92fa690..1243b83 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -34,10 +34,10 @@
         cls.object_name, cls.object_data = cls.create_object(
             cls.container_name)
 
-        cls.container_client.update_container_metadata(
+        cls.container_client.create_update_or_delete_container_metadata(
             cls.container_name,
-            metadata=headers_public_read_acl,
-            metadata_prefix="X-Container-")
+            create_update_metadata=headers_public_read_acl,
+            create_update_metadata_prefix="X-Container-")
 
     @classmethod
     def resource_cleanup(cls):
@@ -49,8 +49,8 @@
     def test_web_index(self):
         headers = {'web-index': self.object_name}
 
-        self.container_client.update_container_metadata(
-            self.container_name, metadata=headers)
+        self.container_client.create_update_or_delete_container_metadata(
+            self.container_name, create_update_metadata=headers)
 
         # Maintain original headers, no auth added
         self.account_client.auth_provider.set_alt_auth_data(
@@ -68,8 +68,9 @@
         self.assertEqual(body, self.object_data)
 
         # clean up before exiting
-        self.container_client.update_container_metadata(self.container_name,
-                                                        {'web-index': ""})
+        self.container_client.create_update_or_delete_container_metadata(
+            self.container_name,
+            create_update_metadata={'web-index': ""})
 
         _, body = self.container_client.list_container_metadata(
             self.container_name)
@@ -80,8 +81,8 @@
     def test_web_listing(self):
         headers = {'web-listings': 'true'}
 
-        self.container_client.update_container_metadata(
-            self.container_name, metadata=headers)
+        self.container_client.create_update_or_delete_container_metadata(
+            self.container_name, create_update_metadata=headers)
 
         # test GET on http://account_url/container_name
         # we should retrieve a listing of objects
@@ -100,9 +101,9 @@
         self.assertIn(self.object_name, body.decode())
 
         # clean up before exiting
-        self.container_client.update_container_metadata(self.container_name,
-                                                        {'web-listings': ""})
-
+        self.container_client.create_update_or_delete_container_metadata(
+            self.container_name,
+            create_update_metadata={'web-listings': ""})
         _, body = self.container_client.list_container_metadata(
             self.container_name)
         self.assertNotIn('x-container-meta-web-listings', body)
@@ -113,8 +114,8 @@
         headers = {'web-listings': 'true',
                    'web-listings-css': 'listings.css'}
 
-        self.container_client.update_container_metadata(
-            self.container_name, metadata=headers)
+        self.container_client.create_update_or_delete_container_metadata(
+            self.container_name, create_update_metadata=headers)
 
         # Maintain original headers, no auth added
         self.account_client.auth_provider.set_alt_auth_data(
@@ -136,8 +137,8 @@
         headers = {'web-listings': 'true',
                    'web-error': self.object_name}
 
-        self.container_client.update_container_metadata(
-            self.container_name, metadata=headers)
+        self.container_client.create_update_or_delete_container_metadata(
+            self.container_name, create_update_metadata=headers)
 
         # Create object to return when requested object not found
         object_name_404 = "404" + self.object_name
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 7665b48..042d288 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -102,7 +102,7 @@
         while self.attempts > 0:
             object_lists = []
             for c_client, cont in zip(cont_client, self.containers):
-                resp, object_list = c_client.list_container_contents(
+                resp, object_list = c_client.list_container_objects(
                     cont, params=params)
                 object_lists.append(dict(
                     (obj['name'], obj) for obj in object_list))
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index d3cdb72..836a875 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -990,8 +990,11 @@
 
         # update container metadata to make it publicly readable
         cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
-        resp_meta, body = self.container_client.update_container_metadata(
-            self.container_name, metadata=cont_headers, metadata_prefix='')
+        resp_meta, body = (
+            self.container_client.create_update_or_delete_container_metadata(
+                self.container_name,
+                create_update_metadata=cont_headers,
+                create_update_metadata_prefix=''))
         self.assertHeaders(resp_meta, 'Container', 'POST')
 
         # create object
@@ -1025,9 +1028,10 @@
         # make container public-readable and access an object in it using
         # another user's credentials
         cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
-        resp_meta, body = self.container_client.update_container_metadata(
-            self.container_name, metadata=cont_headers,
-            metadata_prefix='')
+        resp_meta, body = (
+            self.container_client.create_update_or_delete_container_metadata(
+                self.container_name, create_update_metadata=cont_headers,
+                create_update_metadata_prefix=''))
         self.assertHeaders(resp_meta, 'Container', 'POST')
 
         # create object
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index 65da63d..c66776e 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -172,6 +172,6 @@
         # Check only the format of common headers with custom matcher
         self.assertThat(resp, custom_matchers.AreAllWellFormatted())
 
-        resp, body = self.container_client.list_container_contents(
+        resp, body = self.container_client.list_container_objects(
             self.container_name)
         self.assertEqual(int(resp['x-container-object-count']), 0)
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index baea37b..3a91642 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -23,10 +23,7 @@
 CONF = config.CONF
 
 
-class GroupsTest(base.BaseVolumeAdminTest):
-    _api_version = 3
-    min_microversion = '3.14'
-    max_microversion = 'latest'
+class BaseGroupsTest(base.BaseVolumeAdminTest):
 
     def _delete_group(self, grp_id, delete_volumes=True):
         self.groups_client.delete_group(grp_id, delete_volumes)
@@ -37,8 +34,7 @@
         self.groups_client.wait_for_resource_deletion(grp_id)
 
     def _delete_group_snapshot(self, group_snapshot_id, grp_id):
-        self.group_snapshots_client.delete_group_snapshot(
-            group_snapshot_id)
+        self.group_snapshots_client.delete_group_snapshot(group_snapshot_id)
         vols = self.volumes_client.list_volumes(detail=True)['volumes']
         snapshots = self.snapshots_client.list_snapshots(
             detail=True)['snapshots']
@@ -65,6 +61,12 @@
         self.assertEqual(grp_name, grp['name'])
         return grp
 
+
+class GroupsTest(BaseGroupsTest):
+    min_microversion = '3.14'
+    max_microversion = 'latest'
+    _api_version = 3
+
     @decorators.idempotent_id('4b111d28-b73d-4908-9bd2-03dc2992e4d4')
     def test_group_create_show_list_delete(self):
         # Create volume type
@@ -126,8 +128,7 @@
         self._delete_group(grp1_id)
         # grp2 is empty so delete_volumes flag can be set to False
         self._delete_group(grp2_id, delete_volumes=False)
-        grps = self.groups_client.list_groups(
-            detail=True)['groups']
+        grps = self.groups_client.list_groups(detail=True)['groups']
         self.assertEmpty(grps)
 
     @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
@@ -151,6 +152,9 @@
             self.group_snapshots_client.create_group_snapshot(
                 group_id=grp['id'],
                 name=group_snapshot_name)['group_snapshot'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self._delete_group_snapshot,
+                        group_snapshot['id'], grp['id'])
         snapshots = self.snapshots_client.list_snapshots(
             detail=True)['snapshots']
         for snap in snapshots:
@@ -167,18 +171,20 @@
             group_snapshot['id'])['group_snapshot']
         self.assertEqual(group_snapshot_name, group_snapshot['name'])
 
-        # Get all group snapshots with detail
-        group_snapshots = (
-            self.group_snapshots_client.list_group_snapshots(
-                detail=True)['group_snapshots'])
+        # Get all group snapshots with details, check some detail-specific
+        # elements, and look for the created group snapshot
+        group_snapshots = (self.group_snapshots_client.list_group_snapshots(
+            detail=True)['group_snapshots'])
+        for grp_snapshot in group_snapshots:
+            self.assertIn('created_at', grp_snapshot)
+            self.assertIn('group_id', grp_snapshot)
         self.assertIn((group_snapshot['name'], group_snapshot['id']),
                       [(m['name'], m['id']) for m in group_snapshots])
 
         # Delete group snapshot
         self._delete_group_snapshot(group_snapshot['id'], grp['id'])
-        group_snapshots = (
-            self.group_snapshots_client.list_group_snapshots(
-                detail=True)['group_snapshots'])
+        group_snapshots = (self.group_snapshots_client.list_group_snapshots()
+                           ['group_snapshots'])
         self.assertEmpty(group_snapshots)
 
     @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
@@ -212,14 +218,12 @@
                 waiters.wait_for_volume_resource_status(
                     self.snapshots_client, snap['id'], 'available')
         waiters.wait_for_volume_resource_status(
-            self.group_snapshots_client,
-            group_snapshot['id'], 'available')
+            self.group_snapshots_client, group_snapshot['id'], 'available')
 
         # Create Group from Group snapshot
         grp_name2 = data_utils.rand_name('Group_from_snap')
         grp2 = self.groups_client.create_group_from_source(
-            group_snapshot_id=group_snapshot['id'],
-            name=grp_name2)['group']
+            group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
         self.addCleanup(self._delete_group, grp2['id'])
         self.assertEqual(grp_name2, grp2['name'])
         vols = self.volumes_client.list_volumes(detail=True)['volumes']
@@ -250,8 +254,7 @@
             source_group_id=grp['id'], name=grp_name2)['group']
         self.addCleanup(self._delete_group, grp2['id'])
         self.assertEqual(grp_name2, grp2['name'])
-        vols = self.volumes_client.list_volumes(
-            detail=True)['volumes']
+        vols = self.volumes_client.list_volumes(detail=True)['volumes']
         for vol in vols:
             if vol['group_id'] == grp2['id']:
                 waiters.wait_for_volume_resource_status(
@@ -298,10 +301,7 @@
         # Get volumes in the group
         vols = self.volumes_client.list_volumes(
             detail=True)['volumes']
-        grp_vols = []
-        for vol in vols:
-            if vol['group_id'] == grp['id']:
-                grp_vols.append(vol)
+        grp_vols = [v for v in vols if v['group_id'] == grp['id']]
         self.assertEqual(1, len(grp_vols))
 
         # Add a volume to the group
@@ -313,10 +313,33 @@
             self.groups_client, grp['id'], 'available')
 
         # Get volumes in the group
-        vols = self.volumes_client.list_volumes(
-            detail=True)['volumes']
-        grp_vols = []
-        for vol in vols:
-            if vol['group_id'] == grp['id']:
-                grp_vols.append(vol)
+        vols = self.volumes_client.list_volumes(detail=True)['volumes']
+        grp_vols = [v for v in vols if v['group_id'] == grp['id']]
         self.assertEqual(2, len(grp_vols))
+
+
+class GroupsV320Test(BaseGroupsTest):
+    _api_version = 3
+    min_microversion = '3.20'
+    max_microversion = 'latest'
+
+    @decorators.idempotent_id('b20c696b-0cbc-49a5-8b3a-b1fb9338f45c')
+    def test_reset_group_status(self):
+        # Create volume type
+        volume_type = self.create_volume_type()
+
+        # Create group type
+        group_type = self.create_group_type()
+
+        # Create group
+        group = self._create_group(group_type, volume_type)
+
+        # Reset group status
+        self.addCleanup(waiters.wait_for_volume_resource_status,
+                        self.groups_client, group['id'], 'available')
+        self.addCleanup(self.admin_groups_client.reset_group_status,
+                        group['id'], 'available')
+        for status in ['creating', 'available', 'error']:
+            self.admin_groups_client.reset_group_status(group['id'], status)
+            waiters.wait_for_volume_resource_status(
+                self.groups_client, group['id'], status)
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
old mode 100755
new mode 100644
diff --git a/tempest/api/volume/admin/test_volume_quota_classes.py b/tempest/api/volume/admin/test_volume_quota_classes.py
index f551575..75dca41 100644
--- a/tempest/api/volume/admin/test_volume_quota_classes.py
+++ b/tempest/api/volume/admin/test_volume_quota_classes.py
@@ -19,6 +19,7 @@
 from testtools import matchers
 
 from tempest.api.volume import base
+from tempest.common import identity
 from tempest.common import tempest_fixtures as fixtures
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
@@ -92,9 +93,10 @@
         # Verify a new project's default quotas.
         project_name = data_utils.rand_name('quota_class_tenant')
         description = data_utils.rand_name('desc_')
-        project_id = self.identity_utils.create_project(
+        project_id = identity.identity_utils(self.os_admin).create_project(
             name=project_name, description=description)['id']
-        self.addCleanup(self.identity_utils.delete_project, project_id)
+        self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+                        project_id)
         default_quotas = self.admin_quotas_client.show_default_quota_set(
             project_id)['quota_set']
         self.assertThat(default_quotas.items(),
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 754104e..d56f1de 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 from tempest.api.volume import base
+from tempest.common import identity
 from tempest.common import tempest_fixtures as fixtures
 from tempest.common import waiters
 from tempest.lib.common.utils import data_utils
@@ -100,7 +101,7 @@
 
         volume = self.create_volume()
         self.addCleanup(self.delete_volume,
-                        self.admin_volume_client, volume['id'])
+                        self.volumes_client, volume['id'])
 
         new_quota_usage = self.admin_quotas_client.show_quota_set(
             self.demo_tenant_id, params={'usage': True})['quota_set']
@@ -117,10 +118,11 @@
         # Admin can delete the resource quota set for a project
         project_name = data_utils.rand_name('quota_tenant')
         description = data_utils.rand_name('desc_')
-        project = self.identity_utils.create_project(project_name,
-                                                     description=description)
+        project = identity.identity_utils(self.os_admin).create_project(
+            project_name, description=description)
         project_id = project['id']
-        self.addCleanup(self.identity_utils.delete_project, project_id)
+        self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+                        project_id)
         quota_set_default = self.admin_quotas_client.show_default_quota_set(
             project_id)['quota_set']
         volume_default = quota_set_default['volumes']
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 4cad52a..ae29049 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -22,15 +22,6 @@
 class VolumeTypesNegativeTest(base.BaseVolumeAdminTest):
 
     @decorators.attr(type=['negative'])
-    @decorators.idempotent_id('b48c98f2-e662-4885-9b71-032256906314')
-    def test_create_with_nonexistent_volume_type(self):
-        # Should not be able to create volume with nonexistent volume_type.
-        params = {'name': data_utils.rand_uuid(),
-                  'volume_type': data_utils.rand_uuid()}
-        self.assertRaises(lib_exc.NotFound,
-                          self.volumes_client.create_volume, **params)
-
-    @decorators.attr(type=['negative'])
     @decorators.idempotent_id('878b4e57-faa2-4659-b0d1-ce740a06ae81')
     def test_create_with_empty_name(self):
         # Should not be able to create volume type with an empty name.
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 7d745f2..f139283 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -35,7 +35,6 @@
 
         # Create a test shared instance and volume for attach/detach tests
         cls.volume = cls.create_volume()
-        cls.mountpoint = "/dev/vdc"
 
     def create_image(self):
         # Create image
@@ -176,7 +175,7 @@
                           self.volumes_client.attach_volume,
                           data_utils.rand_uuid(),
                           instance_uuid=server['id'],
-                          mountpoint=self.mountpoint)
+                          mountpoint="/dev/vdc")
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index df0f5a5..86fe3f5 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -128,6 +128,8 @@
                    "this stage.")
             raise ValueError(msg)
 
+        LOG.debug("Provisioning test server with validation resources %s",
+                  validation_resources)
         if 'security_groups' in kwargs:
             kwargs['security_groups'].append(
                 {'name': validation_resources['security_group']['name']})
diff --git a/tempest/common/identity.py b/tempest/common/identity.py
index 469defe..6e496d3 100644
--- a/tempest/common/identity.py
+++ b/tempest/common/identity.py
@@ -13,8 +13,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest import config
+from tempest.lib.common import cred_client
 from tempest.lib import exceptions as lib_exc
 
+CONF = config.CONF
+
 
 def get_tenant_by_name(client, tenant_name):
     tenants = client.list_tenants()['tenants']
@@ -30,3 +34,37 @@
         if user['name'] == username:
             return user
     raise lib_exc.NotFound('No such user(%s) in %s' % (username, users))
+
+
+def identity_utils(clients):
+    """A client that abstracts v2 and v3 identity operations.
+
+    This can be used for creating and tearing down projects in tests. It
+    should not be used for testing identity features.
+
+    :param clients: a client manager.
+    :return
+    """
+    if CONF.identity.auth_version == 'v2':
+        client = clients.identity_client
+        users_client = clients.users_client
+        project_client = clients.tenants_client
+        roles_client = clients.roles_client
+        domains_client = None
+    else:
+        client = clients.identity_v3_client
+        users_client = clients.users_v3_client
+        project_client = clients.projects_client
+        roles_client = clients.roles_v3_client
+        domains_client = clients.domains_client
+
+    try:
+        domain = client.auth_provider.credentials.project_domain_name
+    except AttributeError:
+        domain = CONF.auth.default_credentials_domain_name
+
+    return cred_client.get_creds_client(client, project_client,
+                                        users_client,
+                                        roles_client,
+                                        domains_client,
+                                        project_domain_name=domain)
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
deleted file mode 100644
index ae9d584..0000000
--- a/tempest/common/validation_resources.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-from oslo_log import log as logging
-
-from tempest.lib.common.utils import data_utils
-from tempest.lib import exceptions as lib_exc
-
-LOG = logging.getLogger(__name__)
-
-
-def _network_service(clients, use_neutron):
-    # Internal helper to select the right network clients
-    if use_neutron:
-        return clients.network
-    else:
-        return clients.compute
-
-
-def create_ssh_security_group(clients, add_rule=False, ethertype='IPv4',
-                              use_neutron=True):
-    """Create a security group for ping/ssh testing
-
-    Create a security group to be attached to a VM using the nova or neutron
-    clients. If rules are added, the group can be attached to a VM to enable
-    connectivity validation over ICMP and further testing over SSH.
-
-    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
-        or of a subclass of it. Resources are provisioned using clients from
-        `clients`.
-    :param add_rule: Whether security group rules are provisioned or not.
-        Defaults to `False`.
-    :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
-    :param use_neutron: When True resources are provisioned via neutron, when
-        False resources are provisioned via nova.
-    :returns: A dictionary with the security group as returned by the API.
-
-    Examples::
-
-        from tempest.common import validation_resources as vr
-        from tempest.lib import auth
-        from tempest.lib.services import clients
-
-        creds = auth.get_credentials('http://mycloud/identity/v3',
-                                     username='me', project_name='me',
-                                     password='secret', domain_name='Default')
-        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
-        # Security group for IPv4 tests
-        sg4 = vr.create_ssh_security_group(osclients, add_rule=True)
-        # Security group for IPv6 tests
-        sg6 = vr.create_ssh_security_group(osclients, ethertype='IPv6',
-                                           add_rule=True)
-    """
-    network_service = _network_service(clients, use_neutron)
-    security_groups_client = network_service.SecurityGroupsClient()
-    security_group_rules_client = network_service.SecurityGroupRulesClient()
-    # Security Group clients for nova and neutron behave the same
-    sg_name = data_utils.rand_name('securitygroup-')
-    sg_description = data_utils.rand_name('description-')
-    security_group = security_groups_client.create_security_group(
-        name=sg_name, description=sg_description)['security_group']
-    # Security Group Rules clients require different parameters depending on
-    # the network service in use
-    if add_rule:
-        if use_neutron:
-            security_group_rules_client.create_security_group_rule(
-                security_group_id=security_group['id'],
-                protocol='tcp',
-                ethertype=ethertype,
-                port_range_min=22,
-                port_range_max=22,
-                direction='ingress')
-            security_group_rules_client.create_security_group_rule(
-                security_group_id=security_group['id'],
-                protocol='icmp',
-                ethertype=ethertype,
-                direction='ingress')
-        else:
-            security_group_rules_client.create_security_group_rule(
-                parent_group_id=security_group['id'], ip_protocol='tcp',
-                from_port=22, to_port=22)
-            security_group_rules_client.create_security_group_rule(
-                parent_group_id=security_group['id'], ip_protocol='icmp',
-                from_port=-1, to_port=-1)
-    LOG.debug("SSH Validation resource security group with tcp and icmp "
-              "rules %s created", sg_name)
-    return security_group
-
-
-def create_validation_resources(clients, keypair=False, floating_ip=False,
-                                security_group=False,
-                                security_group_rules=False,
-                                ethertype='IPv4', use_neutron=True,
-                                floating_network_id=None,
-                                floating_network_name=None):
-    """Provision resources for VM ping/ssh testing
-
-    Create resources required to be able to ping / ssh a virtual machine:
-    keypair, security group, security group rules and a floating IP.
-    Which of those resources are required may depend on the cloud setup and on
-    the specific test and it can be controlled via the corresponding
-    arguments.
-
-    Provisioned resources are returned in a dictionary.
-
-    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
-        or of a subclass of it. Resources are provisioned using clients from
-        `clients`.
-    :param keypair: Whether to provision a keypair. Defaults to False.
-    :param floating_ip: Whether to provision a floating IP. Defaults to False.
-    :param security_group: Whether to provision a security group. Defaults to
-        False.
-    :param security_group_rules: Whether to provision security group rules.
-        Defaults to False.
-    :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
-    :param use_neutron: When True resources are provisioned via neutron, when
-        False resources are provisioned via nova.
-    :param floating_network_id: The id of the network used to provision a
-        floating IP. Only used if a floating IP is requested and with neutron.
-    :param floating_network_name: The name of the floating IP pool used to
-        provision the floating IP. Only used if a floating IP is requested and
-        with nova-net.
-    :returns: A dictionary with the same keys as the input
-        `validation_resources` and the resources for values in the format
-         they are returned by the API.
-
-    Examples::
-
-        from tempest.common import validation_resources as vr
-        from tempest.lib import auth
-        from tempest.lib.services import clients
-
-        creds = auth.get_credentials('http://mycloud/identity/v3',
-                                     username='me', project_name='me',
-                                     password='secret', domain_name='Default')
-        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
-        # Request keypair and floating IP
-        resources = dict(keypair=True, security_group=False,
-                         security_group_rules=False, floating_ip=True)
-        resources = vr.create_validation_resources(
-            osclients, use_neutron=True,
-            floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C',
-            **resources)
-
-        # The floating IP to be attached to the VM
-        floating_ip = resources['floating_ip']['ip']
-    """
-    # Create and Return the validation resources required to validate a VM
-    validation_data = {}
-    if keypair:
-        keypair_name = data_utils.rand_name('keypair')
-        validation_data.update(
-            clients.compute.KeyPairsClient().create_keypair(
-                name=keypair_name))
-        LOG.debug("Validation resource key %s created", keypair_name)
-    if security_group:
-        validation_data['security_group'] = create_ssh_security_group(
-            clients, add_rule=security_group_rules,
-            use_neutron=use_neutron, ethertype=ethertype)
-    if floating_ip:
-        floating_ip_client = _network_service(
-            clients, use_neutron).FloatingIPsClient()
-        if use_neutron:
-            floatingip = floating_ip_client.create_floatingip(
-                floating_network_id=floating_network_id)
-            # validation_resources['floating_ip'] has historically looked
-            # like a compute API POST /os-floating-ips response, so we need
-            # to mangle it a bit for a Neutron response with different
-            # fields.
-            validation_data['floating_ip'] = floatingip['floatingip']
-            validation_data['floating_ip']['ip'] = (
-                floatingip['floatingip']['floating_ip_address'])
-        else:
-            # NOTE(mriedem): The os-floating-ips compute API was deprecated
-            # in the 2.36 microversion. Any tests for CRUD operations on
-            # floating IPs using the compute API should be capped at 2.35.
-            validation_data.update(floating_ip_client.create_floating_ip(
-                pool=floating_network_name))
-    return validation_data
-
-
-def clear_validation_resources(clients, keypair=None, floating_ip=None,
-                               security_group=None, use_neutron=True):
-    """Cleanup resources for VM ping/ssh testing
-
-    Cleanup a set of resources provisioned via `create_validation_resources`.
-    In case of errors during cleanup, the exception is logged and the cleanup
-    process is continued. The first exception that was raised is re-raised
-    after the cleanup is complete.
-
-    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
-        or of a subclass of it. Resources are provisioned using clients from
-        `clients`.
-    :param keypair: A dictionary with the keypair to be deleted. Defaults to
-        None.
-    :param floating_ip: A dictionary with the floating_ip to be deleted.
-        Defaults to None.
-    :param security_group: A dictionary with the security_group to be deleted.
-        Defaults to None.
-    :param use_neutron: When True resources are provisioned via neutron, when
-        False resources are provisioned via nova.
-    :returns: A dictionary with the same keys as the input
-        `validation_resources` and the resources for values in the format
-         they are returned by the API.
-
-    Examples::
-
-        from tempest.common import validation_resources as vr
-        from tempest.lib import auth
-        from tempest.lib.services import clients
-
-        creds = auth.get_credentials('http://mycloud/identity/v3',
-                                     username='me', project_name='me',
-                                     password='secret', domain_name='Default')
-        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
-        # Request keypair and floating IP
-        resources = dict(keypair=True, security_group=False,
-                         security_group_rules=False, floating_ip=True)
-        resources = vr.create_validation_resources(
-            osclients, validation_resources=resources, use_neutron=True,
-            floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C')
-
-        # Now cleanup the resources
-        try:
-            vr.clear_validation_resources(osclients, use_neutron=True,
-                                          **resources)
-        except Exception as e:
-            LOG.exception('Something went wrong during cleanup, ignoring')
-    """
-    has_exception = None
-    if keypair:
-        keypair_client = clients.compute.KeyPairsClient()
-        keypair_name = keypair['name']
-        try:
-            keypair_client.delete_keypair(keypair_name)
-        except lib_exc.NotFound:
-            LOG.warning(
-                "Keypair %s is not found when attempting to delete",
-                keypair_name
-            )
-        except Exception as exc:
-            LOG.exception('Exception raised while deleting key %s',
-                          keypair_name)
-            if not has_exception:
-                has_exception = exc
-    network_service = _network_service(clients, use_neutron)
-    if security_group:
-        security_group_client = network_service.SecurityGroupsClient()
-        sec_id = security_group['id']
-        try:
-            security_group_client.delete_security_group(sec_id)
-            security_group_client.wait_for_resource_deletion(sec_id)
-        except lib_exc.NotFound:
-            LOG.warning("Security group %s is not found when attempting "
-                        "to delete", sec_id)
-        except lib_exc.Conflict as exc:
-            LOG.exception('Conflict while deleting security '
-                          'group %s VM might not be deleted', sec_id)
-            if not has_exception:
-                has_exception = exc
-        except Exception as exc:
-            LOG.exception('Exception raised while deleting security '
-                          'group %s', sec_id)
-            if not has_exception:
-                has_exception = exc
-    if floating_ip:
-        floating_ip_client = network_service.FloatingIPsClient()
-        fip_id = floating_ip['id']
-        try:
-            if use_neutron:
-                floating_ip_client.delete_floatingip(fip_id)
-            else:
-                floating_ip_client.delete_floating_ip(fip_id)
-        except lib_exc.NotFound:
-            LOG.warning('Floating ip %s not found while attempting to '
-                        'delete', fip_id)
-        except Exception as exc:
-            LOG.exception('Exception raised while deleting ip %s', fip_id)
-            if not has_exception:
-                has_exception = exc
-    if has_exception:
-        raise has_exception
diff --git a/tempest/config.py b/tempest/config.py
index e78a07f..4d0839a 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -15,15 +15,12 @@
 
 from __future__ import print_function
 
-import functools
 import os
 import tempfile
 
-import debtcollector.removals
 from oslo_concurrency import lockutils
 from oslo_config import cfg
 from oslo_log import log as logging
-import testtools
 
 from tempest.lib import exceptions
 from tempest.lib.services import clients
@@ -1284,79 +1281,6 @@
 CONF = TempestConfigProxy()
 
 
-@debtcollector.removals.remove(
-    message='use testtools.skipUnless instead', removal_version='Queens')
-def skip_unless_config(*args):
-    """Decorator to raise a skip if a config opt doesn't exist or is False
-
-    :param str group: The first arg, the option group to check
-    :param str name: The second arg, the option name to check
-    :param str msg: Optional third arg, the skip msg to use if a skip is raised
-    :raises testtools.TestCaseskipException: If the specified config option
-        doesn't exist or it exists and evaluates to False
-    """
-    def decorator(f):
-        group = args[0]
-        name = args[1]
-
-        @functools.wraps(f)
-        def wrapper(self, *func_args, **func_kwargs):
-            if not hasattr(CONF, group):
-                msg = "Config group %s doesn't exist" % group
-                raise testtools.TestCase.skipException(msg)
-
-            conf_group = getattr(CONF, group)
-            if not hasattr(conf_group, name):
-                msg = "Config option %s.%s doesn't exist" % (group,
-                                                             name)
-                raise testtools.TestCase.skipException(msg)
-
-            value = getattr(conf_group, name)
-            if not value:
-                if len(args) == 3:
-                    msg = args[2]
-                else:
-                    msg = "Config option %s.%s is false" % (group,
-                                                            name)
-                raise testtools.TestCase.skipException(msg)
-            return f(self, *func_args, **func_kwargs)
-        return wrapper
-    return decorator
-
-
-@debtcollector.removals.remove(
-    message='use testtools.skipIf instead', removal_version='Queens')
-def skip_if_config(*args):
-    """Raise a skipException if a config exists and is True
-
-    :param str group: The first arg, the option group to check
-    :param str name: The second arg, the option name to check
-    :param str msg: Optional third arg, the skip msg to use if a skip is raised
-    :raises testtools.TestCase.skipException: If the specified config option
-        exists and evaluates to True
-    """
-    def decorator(f):
-        group = args[0]
-        name = args[1]
-
-        @functools.wraps(f)
-        def wrapper(self, *func_args, **func_kwargs):
-            if hasattr(CONF, group):
-                conf_group = getattr(CONF, group)
-                if hasattr(conf_group, name):
-                    value = getattr(conf_group, name)
-                    if value:
-                        if len(args) == 3:
-                            msg = args[2]
-                        else:
-                            msg = "Config option %s.%s is false" % (group,
-                                                                    name)
-                        raise testtools.TestCase.skipException(msg)
-            return f(self, *func_args, **func_kwargs)
-        return wrapper
-    return decorator
-
-
 def service_client_config(service_client_name=None):
     """Return a dict with the parameters to init service clients
 
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 9a6c8f5..4f1a883 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -28,6 +28,43 @@
 
 
 class DynamicCredentialProvider(cred_provider.CredentialProvider):
+    """Creates credentials dynamically for tests
+
+    A credential provider that, based on an initial set of
+    admin credentials, creates new credentials on the fly for
+    tests to use and then discard.
+
+    :param str identity_version: identity API version to use `v2` or `v3`
+    :param str admin_role: name of the admin role added to admin users
+    :param str name: names of dynamic resources include this parameter
+                     when specified
+    :param str credentials_domain: name of the domain where the users
+                                   are created. If not defined, the project
+                                   domain from admin_credentials is used
+    :param dict network_resources: network resources to be created for
+                                   the created credentials
+    :param Credentials admin_creds: initial admin credentials
+    :param bool identity_admin_domain_scope: Set to true if admin should be
+                                             scoped to the domain. By
+                                             default this is False and the
+                                             admin role is scoped to the
+                                             project.
+    :param str identity_admin_role: The role name to use for admin
+    :param list extra_roles: A list of strings for extra roles that should
+                             be assigned to all created users
+    :param bool neutron_available: Whether we are running in an environemnt
+                                   with neutron
+    :param bool create_networks: Whether dynamic project networks should be
+                                 created or not
+    :param project_network_cidr: The CIDR to use for created project
+                                 networks
+    :param project_network_mask_bits: The network mask bits to use for
+                                      created project networks
+    :param public_network_id: The id for the public network to use
+    :param identity_admin_endpoint_type: The endpoint type for identity
+                                         admin clients. Defaults to public.
+    :param identity_uri: Identity URI of the target cloud
+    """
 
     def __init__(self, identity_version, name=None, network_resources=None,
                  credentials_domain=None, admin_role=None, admin_creds=None,
@@ -37,43 +74,6 @@
                  project_network_cidr=None, project_network_mask_bits=None,
                  public_network_id=None, resource_prefix=None,
                  identity_admin_endpoint_type='public', identity_uri=None):
-        """Creates credentials dynamically for tests
-
-        A credential provider that, based on an initial set of
-        admin credentials, creates new credentials on the fly for
-        tests to use and then discard.
-
-        :param str identity_version: identity API version to use `v2` or `v3`
-        :param str admin_role: name of the admin role added to admin users
-        :param str name: names of dynamic resources include this parameter
-                         when specified
-        :param str credentials_domain: name of the domain where the users
-                                       are created. If not defined, the project
-                                       domain from admin_credentials is used
-        :param dict network_resources: network resources to be created for
-                                       the created credentials
-        :param Credentials admin_creds: initial admin credentials
-        :param bool identity_admin_domain_scope: Set to true if admin should be
-                                                 scoped to the domain. By
-                                                 default this is False and the
-                                                 admin role is scoped to the
-                                                 project.
-        :param str identity_admin_role: The role name to use for admin
-        :param list extra_roles: A list of strings for extra roles that should
-                                 be assigned to all created users
-        :param bool neutron_available: Whether we are running in an environemnt
-                                       with neutron
-        :param bool create_networks: Whether dynamic project networks should be
-                                     created or not
-        :param project_network_cidr: The CIDR to use for created project
-                                     networks
-        :param project_network_mask_bits: The network mask bits to use for
-                                          created project networks
-        :param public_network_id: The id for the public network to use
-        :param identity_admin_endpoint_type: The endpoint type for identity
-                                             admin clients. Defaults to public.
-        :param identity_uri: Identity URI of the target cloud
-        """
         super(DynamicCredentialProvider, self).__init__(
             identity_version=identity_version, identity_uri=identity_uri,
             admin_role=admin_role, name=name,
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index cd3a10e..83db513 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -41,6 +41,35 @@
 
 
 class PreProvisionedCredentialProvider(cred_provider.CredentialProvider):
+    """Credentials provider using pre-provisioned accounts
+
+    This credentials provider loads the details of pre-provisioned
+    accounts from a YAML file, in the format specified by
+    ``etc/accounts.yaml.sample``. It locks accounts while in use, using the
+    external locking mechanism, allowing for multiple python processes
+    to share a single account file, and thus running tests in parallel.
+
+    The accounts_lock_dir must be generated using `lockutils.get_lock_path`
+    from the oslo.concurrency library. For instance::
+
+        accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
+                                         'test_accounts')
+
+    Role names for object storage are optional as long as the
+    `operator` and `reseller_admin` credential types are not used in the
+    accounts file.
+
+    :param identity_version: identity version of the credentials
+    :param admin_role: name of the admin role
+    :param test_accounts_file: path to the accounts YAML file
+    :param accounts_lock_dir: the directory for external locking
+    :param name: name of the hash file (optional)
+    :param credentials_domain: name of the domain credentials belong to
+                               (if no domain is configured)
+    :param object_storage_operator_role: name of the role
+    :param object_storage_reseller_admin_role: name of the role
+    :param identity_uri: Identity URI of the target cloud
+    """
 
     # Exclude from the hash fields specific to v2 or v3 identity API
     # i.e. only include user*, project*, tenant* and password
@@ -51,35 +80,6 @@
                  accounts_lock_dir, name=None, credentials_domain=None,
                  admin_role=None, object_storage_operator_role=None,
                  object_storage_reseller_admin_role=None, identity_uri=None):
-        """Credentials provider using pre-provisioned accounts
-
-        This credentials provider loads the details of pre-provisioned
-        accounts from a YAML file, in the format specified by
-        `etc/accounts.yaml.sample`. It locks accounts while in use, using the
-        external locking mechanism, allowing for multiple python processes
-        to share a single account file, and thus running tests in parallel.
-
-        The accounts_lock_dir must be generated using `lockutils.get_lock_path`
-        from the oslo.concurrency library. For instance:
-
-            accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
-                                             'test_accounts')
-
-        Role names for object storage are optional as long as the
-        `operator` and `reseller_admin` credential types are not used in the
-        accounts file.
-
-        :param identity_version: identity version of the credentials
-        :param admin_role: name of the admin role
-        :param test_accounts_file: path to the accounts YAML file
-        :param accounts_lock_dir: the directory for external locking
-        :param name: name of the hash file (optional)
-        :param credentials_domain: name of the domain credentials belong to
-                                   (if no domain is configured)
-        :param object_storage_operator_role: name of the role
-        :param object_storage_reseller_admin_role: name of the role
-        :param identity_uri: Identity URI of the target cloud
-        """
         super(PreProvisionedCredentialProvider, self).__init__(
             identity_version=identity_version, name=name,
             admin_role=admin_role, credentials_domain=credentials_domain,
diff --git a/tempest/lib/common/validation_resources.py b/tempest/lib/common/validation_resources.py
new file mode 100644
index 0000000..c35a01a
--- /dev/null
+++ b/tempest/lib/common/validation_resources.py
@@ -0,0 +1,457 @@
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2017 IBM Corp.
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+import fixtures
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
+
+LOG = logging.getLogger(__name__)
+
+
+def _network_service(clients, use_neutron):
+    # Internal helper to select the right network clients
+    if use_neutron:
+        return clients.network
+    else:
+        return clients.compute
+
+
+def create_ssh_security_group(clients, add_rule=False, ethertype='IPv4',
+                              use_neutron=True):
+    """Create a security group for ping/ssh testing
+
+    Create a security group to be attached to a VM using the nova or neutron
+    clients. If rules are added, the group can be attached to a VM to enable
+    connectivity validation over ICMP and further testing over SSH.
+
+    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+        or of a subclass of it. Resources are provisioned using clients from
+        `clients`.
+    :param add_rule: Whether security group rules are provisioned or not.
+        Defaults to `False`.
+    :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
+    :param use_neutron: When True resources are provisioned via neutron, when
+        False resources are provisioned via nova.
+    :returns: A dictionary with the security group as returned by the API.
+
+    Examples::
+
+        from tempest.common import validation_resources as vr
+        from tempest.lib import auth
+        from tempest.lib.services import clients
+
+        creds = auth.get_credentials('http://mycloud/identity/v3',
+                                     username='me', project_name='me',
+                                     password='secret', domain_name='Default')
+        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+        # Security group for IPv4 tests
+        sg4 = vr.create_ssh_security_group(osclients, add_rule=True)
+        # Security group for IPv6 tests
+        sg6 = vr.create_ssh_security_group(osclients, ethertype='IPv6',
+                                           add_rule=True)
+    """
+    network_service = _network_service(clients, use_neutron)
+    security_groups_client = network_service.SecurityGroupsClient()
+    security_group_rules_client = network_service.SecurityGroupRulesClient()
+    # Security Group clients for nova and neutron behave the same
+    sg_name = data_utils.rand_name('securitygroup-')
+    sg_description = data_utils.rand_name('description-')
+    security_group = security_groups_client.create_security_group(
+        name=sg_name, description=sg_description)['security_group']
+    # Security Group Rules clients require different parameters depending on
+    # the network service in use
+    if add_rule:
+        try:
+            if use_neutron:
+                security_group_rules_client.create_security_group_rule(
+                    security_group_id=security_group['id'],
+                    protocol='tcp',
+                    ethertype=ethertype,
+                    port_range_min=22,
+                    port_range_max=22,
+                    direction='ingress')
+                security_group_rules_client.create_security_group_rule(
+                    security_group_id=security_group['id'],
+                    protocol='icmp',
+                    ethertype=ethertype,
+                    direction='ingress')
+            else:
+                security_group_rules_client.create_security_group_rule(
+                    parent_group_id=security_group['id'], ip_protocol='tcp',
+                    from_port=22, to_port=22)
+                security_group_rules_client.create_security_group_rule(
+                    parent_group_id=security_group['id'], ip_protocol='icmp',
+                    from_port=-1, to_port=-1)
+        except Exception as sgc_exc:
+            # If adding security group rules fails, we cleanup the SG before
+            # re-raising the failure up
+            with excutils.save_and_reraise_exception():
+                try:
+                    msg = ('Error while provisioning security group rules in '
+                           'security group %s. Trying to cleanup.')
+                    # The exceptions logging is already handled, so using
+                    # debug here just to provide more context
+                    LOG.debug(msg, sgc_exc)
+                    clear_validation_resources(
+                        clients, keypair=None, floating_ip=None,
+                        security_group=security_group,
+                        use_neutron=use_neutron)
+                except Exception as cleanup_exc:
+                    msg = ('Error during cleanup of a security group. '
+                           'The cleanup was triggered by an exception during '
+                           'the provisioning of security group rules.\n'
+                           'Provisioning exception: %s\n'
+                           'First cleanup exception: %s')
+                    LOG.exception(msg, sgc_exc, cleanup_exc)
+    LOG.debug("SSH Validation resource security group with tcp and icmp "
+              "rules %s created", sg_name)
+    return security_group
+
+
+def create_validation_resources(clients, keypair=False, floating_ip=False,
+                                security_group=False,
+                                security_group_rules=False,
+                                ethertype='IPv4', use_neutron=True,
+                                floating_network_id=None,
+                                floating_network_name=None):
+    """Provision resources for VM ping/ssh testing
+
+    Create resources required to be able to ping / ssh a virtual machine:
+    keypair, security group, security group rules and a floating IP.
+    Which of those resources are required may depend on the cloud setup and on
+    the specific test and it can be controlled via the corresponding
+    arguments.
+
+    Provisioned resources are returned in a dictionary.
+
+    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+        or of a subclass of it. Resources are provisioned using clients from
+        `clients`.
+    :param keypair: Whether to provision a keypair. Defaults to False.
+    :param floating_ip: Whether to provision a floating IP. Defaults to False.
+    :param security_group: Whether to provision a security group. Defaults to
+        False.
+    :param security_group_rules: Whether to provision security group rules.
+        Defaults to False.
+    :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
+    :param use_neutron: When True resources are provisioned via neutron, when
+        False resources are provisioned via nova.
+    :param floating_network_id: The id of the network used to provision a
+        floating IP. Only used if a floating IP is requested and with neutron.
+    :param floating_network_name: The name of the floating IP pool used to
+        provision the floating IP. Only used if a floating IP is requested and
+        with nova-net.
+    :returns: A dictionary with the resources in the format they are returned
+        by the API. Valid keys are 'keypair', 'floating_ip' and
+        'security_group'.
+
+    Examples::
+
+        from tempest.common import validation_resources as vr
+        from tempest.lib import auth
+        from tempest.lib.services import clients
+
+        creds = auth.get_credentials('http://mycloud/identity/v3',
+                                     username='me', project_name='me',
+                                     password='secret', domain_name='Default')
+        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+        # Request keypair and floating IP
+        resources = dict(keypair=True, security_group=False,
+                         security_group_rules=False, floating_ip=True)
+        resources = vr.create_validation_resources(
+            osclients, use_neutron=True,
+            floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C',
+            **resources)
+
+        # The floating IP to be attached to the VM
+        floating_ip = resources['floating_ip']['ip']
+    """
+    # Create and Return the validation resources required to validate a VM
+    msg = ('Requested validation resources keypair %s, floating IP %s, '
+           'security group %s')
+    LOG.debug(msg, keypair, floating_ip, security_group)
+    validation_data = {}
+    try:
+        if keypair:
+            keypair_name = data_utils.rand_name('keypair')
+            validation_data.update(
+                clients.compute.KeyPairsClient().create_keypair(
+                    name=keypair_name))
+            LOG.debug("Validation resource key %s created", keypair_name)
+        if security_group:
+            validation_data['security_group'] = create_ssh_security_group(
+                clients, add_rule=security_group_rules,
+                use_neutron=use_neutron, ethertype=ethertype)
+        if floating_ip:
+            floating_ip_client = _network_service(
+                clients, use_neutron).FloatingIPsClient()
+            if use_neutron:
+                floatingip = floating_ip_client.create_floatingip(
+                    floating_network_id=floating_network_id)
+                # validation_resources['floating_ip'] has historically looked
+                # like a compute API POST /os-floating-ips response, so we need
+                # to mangle it a bit for a Neutron response with different
+                # fields.
+                validation_data['floating_ip'] = floatingip['floatingip']
+                validation_data['floating_ip']['ip'] = (
+                    floatingip['floatingip']['floating_ip_address'])
+            else:
+                # NOTE(mriedem): The os-floating-ips compute API was deprecated
+                # in the 2.36 microversion. Any tests for CRUD operations on
+                # floating IPs using the compute API should be capped at 2.35.
+                validation_data.update(floating_ip_client.create_floating_ip(
+                    pool=floating_network_name))
+            LOG.debug("Validation resource floating IP %s created",
+                      validation_data['floating_ip'])
+    except Exception as prov_exc:
+        # If something goes wrong, cleanup as much as possible before we
+        # re-raise the exception
+        with excutils.save_and_reraise_exception():
+            if validation_data:
+                # Cleanup may fail as well
+                try:
+                    msg = ('Error while provisioning validation resources %s. '
+                           'Trying to cleanup what we provisioned so far: %s')
+                    # The exceptions logging is already handled, so using
+                    # debug here just to provide more context
+                    LOG.debug(msg, prov_exc, str(validation_data))
+                    clear_validation_resources(
+                        clients,
+                        keypair=validation_data.get('keypair', None),
+                        floating_ip=validation_data.get('floating_ip', None),
+                        security_group=validation_data.get('security_group',
+                                                           None),
+                        use_neutron=use_neutron)
+                except Exception as cleanup_exc:
+                    msg = ('Error during cleanup of validation resources. '
+                           'The cleanup was triggered by an exception during '
+                           'the provisioning step.\n'
+                           'Provisioning exception: %s\n'
+                           'First cleanup exception: %s')
+                    LOG.exception(msg, prov_exc, cleanup_exc)
+    return validation_data
+
+
+def clear_validation_resources(clients, keypair=None, floating_ip=None,
+                               security_group=None, use_neutron=True):
+    """Cleanup resources for VM ping/ssh testing
+
+    Cleanup a set of resources provisioned via `create_validation_resources`.
+    In case of errors during cleanup, the exception is logged and the cleanup
+    process is continued. The first exception that was raised is re-raised
+    after the cleanup is complete.
+
+    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+        or of a subclass of it. Resources are provisioned using clients from
+        `clients`.
+    :param keypair: A dictionary with the keypair to be deleted. Defaults to
+        None.
+    :param floating_ip: A dictionary with the floating_ip to be deleted.
+        Defaults to None.
+    :param security_group: A dictionary with the security_group to be deleted.
+        Defaults to None.
+    :param use_neutron: When True resources are provisioned via neutron, when
+        False resources are provisioned via nova.
+
+    Examples::
+
+        from tempest.common import validation_resources as vr
+        from tempest.lib import auth
+        from tempest.lib.services import clients
+
+        creds = auth.get_credentials('http://mycloud/identity/v3',
+                                     username='me', project_name='me',
+                                     password='secret', domain_name='Default')
+        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+        # Request keypair and floating IP
+        resources = dict(keypair=True, security_group=False,
+                         security_group_rules=False, floating_ip=True)
+        resources = vr.create_validation_resources(
+            osclients, validation_resources=resources, use_neutron=True,
+            floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C')
+
+        # Now cleanup the resources
+        try:
+            vr.clear_validation_resources(osclients, use_neutron=True,
+                                          **resources)
+        except Exception as e:
+            LOG.exception('Something went wrong during cleanup, ignoring')
+    """
+    has_exception = None
+    if keypair:
+        keypair_client = clients.compute.KeyPairsClient()
+        keypair_name = keypair['name']
+        try:
+            keypair_client.delete_keypair(keypair_name)
+        except lib_exc.NotFound:
+            LOG.warning(
+                "Keypair %s is not found when attempting to delete",
+                keypair_name
+            )
+        except Exception as exc:
+            LOG.exception('Exception raised while deleting key %s',
+                          keypair_name)
+            if not has_exception:
+                has_exception = exc
+    network_service = _network_service(clients, use_neutron)
+    if security_group:
+        security_group_client = network_service.SecurityGroupsClient()
+        sec_id = security_group['id']
+        try:
+            security_group_client.delete_security_group(sec_id)
+            security_group_client.wait_for_resource_deletion(sec_id)
+        except lib_exc.NotFound:
+            LOG.warning("Security group %s is not found when attempting "
+                        "to delete", sec_id)
+        except lib_exc.Conflict as exc:
+            LOG.exception('Conflict while deleting security '
+                          'group %s VM might not be deleted', sec_id)
+            if not has_exception:
+                has_exception = exc
+        except Exception as exc:
+            LOG.exception('Exception raised while deleting security '
+                          'group %s', sec_id)
+            if not has_exception:
+                has_exception = exc
+    if floating_ip:
+        floating_ip_client = network_service.FloatingIPsClient()
+        fip_id = floating_ip['id']
+        try:
+            if use_neutron:
+                floating_ip_client.delete_floatingip(fip_id)
+            else:
+                floating_ip_client.delete_floating_ip(fip_id)
+        except lib_exc.NotFound:
+            LOG.warning('Floating ip %s not found while attempting to '
+                        'delete', fip_id)
+        except Exception as exc:
+            LOG.exception('Exception raised while deleting ip %s', fip_id)
+            if not has_exception:
+                has_exception = exc
+    if has_exception:
+        raise has_exception
+
+
+class ValidationResourcesFixture(fixtures.Fixture):
+    """Fixture to provision and cleanup validation resources"""
+
+    DICT_KEYS = ['keypair', 'security_group', 'floating_ip']
+
+    def __init__(self, clients, keypair=False, floating_ip=False,
+                 security_group=False, security_group_rules=False,
+                 ethertype='IPv4', use_neutron=True, floating_network_id=None,
+                 floating_network_name=None):
+        """Create a ValidationResourcesFixture
+
+        Create a ValidationResourcesFixture fixtures, which provisions the
+        resources required to be able to ping / ssh a virtual machine upon
+        setUp and clears them out upon cleanup. Resources are  keypair,
+        security group, security group rules and a floating IP - depending
+        on the params.
+
+        The fixture exposes a dictionary that includes provisioned resources.
+
+        :param clients: `tempest.lib.services.clients.ServiceClients` or of a
+            subclass of it. Resources are provisioned using clients from
+            `clients`.
+        :param keypair: Whether to provision a keypair. Defaults to False.
+        :param floating_ip: Whether to provision a floating IP.
+            Defaults to False.
+        :param security_group: Whether to provision a security group.
+            Defaults to False.
+        :param security_group_rules: Whether to provision security group rules.
+            Defaults to False.
+        :param ethertype: 'IPv4' or 'IPv6'. Honoured only if neutron is used.
+        :param use_neutron: When True resources are provisioned via neutron,
+            when False resources are provisioned via nova.
+        :param floating_network_id: The id of the network used to provision a
+            floating IP. Only used if a floating IP is requested in case
+            neutron is used.
+        :param floating_network_name: The name of the floating IP pool used to
+            provision the floating IP. Only used if a floating IP is requested
+            and with nova-net.
+        :returns: A dictionary with the same keys as the input
+            `validation_resources` and the resources for values in the format
+             they are returned by the API.
+
+        Examples::
+
+            from tempest.common import validation_resources as vr
+            from tempest.lib import auth
+            from tempest.lib.services import clients
+            import testtools
+
+
+            class TestWithVR(testtools.TestCase):
+
+                def setUp(self):
+                    creds = auth.get_credentials(
+                        'http://mycloud/identity/v3',
+                         username='me', project_name='me',
+                         password='secret', domain_name='Default')
+
+                    osclients = clients.ServiceClients(
+                        creds, 'http://mycloud/identity/v3')
+                    # Request keypair and floating IP
+                    resources = dict(keypair=True, security_group=False,
+                                     security_group_rules=False,
+                                     floating_ip=True)
+                    network_id = '4240E68E-23DA-4C82-AC34-9FEFAA24521C'
+                    self.vr = self.useFixture(vr.ValidationResourcesFixture(
+                        osclients, use_neutron=True,
+                        floating_network_id=network_id,
+                        **resources)
+
+                def test_use_ip(self):
+                    # The floating IP to be attached to the VM
+                    floating_ip = self.vr['floating_ip']['ip']
+        """
+        self._clients = clients
+        self._keypair = keypair
+        self._floating_ip = floating_ip
+        self._security_group = security_group
+        self._security_group_rules = security_group_rules
+        self._ethertype = ethertype
+        self._use_neutron = use_neutron
+        self._floating_network_id = floating_network_id
+        self._floating_network_name = floating_network_name
+        self._validation_resources = None
+
+    def _setUp(self):
+        msg = ('Requested setup of ValidationResources keypair %s, floating '
+               'IP %s, security group %s')
+        LOG.debug(msg, self._keypair, self._floating_ip, self._security_group)
+        self._validation_resources = create_validation_resources(
+            self._clients, keypair=self._keypair,
+            floating_ip=self._floating_ip,
+            security_group=self._security_group,
+            security_group_rules=self._security_group_rules,
+            ethertype=self._ethertype, use_neutron=self._use_neutron,
+            floating_network_id=self._floating_network_id,
+            floating_network_name=self._floating_network_name)
+        # If provisioning raises an exception we won't have anything to
+        # cleanup here, so we don't need a try-finally around provisioning
+        vr = self._validation_resources
+        self.addCleanup(clear_validation_resources, self._clients,
+                        keypair=vr.get('keypair', None),
+                        floating_ip=vr.get('floating_ip', None),
+                        security_group=vr.get('security_group', None),
+                        use_neutron=self._use_neutron)
+
+    @property
+    def resources(self):
+        return self._validation_resources
diff --git a/tempest/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
similarity index 75%
rename from tempest/services/object_storage/account_client.py
rename to tempest/lib/services/object_storage/account_client.py
index 5a1737e..67f01a6 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -50,41 +50,20 @@
         return resp, body
 
     def list_account_metadata(self):
-        """HEAD on the storage URL
-
-        Returns all account metadata headers
-        """
+        """List all account metadata."""
         resp, body = self.head('')
         self.expected_success(204, resp.status)
         return resp, body
 
     def list_account_containers(self, params=None):
-        """GET on the (base) storage URL
+        """List all containers for the account.
 
         Given valid X-Auth-Token, returns a list of all containers for the
         account.
 
-        Optional Arguments:
-        limit=[integer value N]
-            Limits the number of results to at most N values
-            DEFAULT:  10,000
-
-        marker=[string value X]
-            Given string value X, return object names greater in value
-            than the specified marker.
-            DEFAULT: No Marker
-
-        prefix=[string value Y]
-            Given string value Y, return object names starting with that prefix
-
-        reverse=[boolean value Z]
-            Reverse the result order based on the boolean value Z
-            DEFAULT: False
-
-        format=[string value, either 'json' or 'xml']
-            Specify either json or xml to return the respective serialized
-            response.
-            DEFAULT:  Python-List returned in response body
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://developer.openstack.org/api-ref/object-store/#show-account-details-and-list-containers
         """
         url = '?%s' % urllib.urlencode(params) if params else ''
 
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
old mode 100755
new mode 100644
diff --git a/tempest/lib/services/volume/v2/encryption_types_client.py b/tempest/lib/services/volume/v2/encryption_types_client.py
old mode 100755
new mode 100644
diff --git a/tempest/lib/services/volume/v3/group_snapshots_client.py b/tempest/lib/services/volume/v3/group_snapshots_client.py
index e644f02..d74e68a 100644
--- a/tempest/lib/services/volume/v3/group_snapshots_client.py
+++ b/tempest/lib/services/volume/v3/group_snapshots_client.py
@@ -60,7 +60,7 @@
         self.expected_success(200, resp.status)
         return rest_client.ResponseBody(resp, body)
 
-    def list_group_snapshots(self, **params):
+    def list_group_snapshots(self, detail=False, **params):
         """Information for all the tenant's group snapshots.
 
         For more information, please refer to the official API reference:
@@ -68,6 +68,8 @@
         https://developer.openstack.org/api-ref/block-storage/v3/#list-group-snapshots-with-details
         """
         url = "group_snapshots"
+        if detail:
+            url += "/detail"
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
diff --git a/tempest/lib/services/volume/v3/groups_client.py b/tempest/lib/services/volume/v3/groups_client.py
index b463fdf..e2e477d 100644
--- a/tempest/lib/services/volume/v3/groups_client.py
+++ b/tempest/lib/services/volume/v3/groups_client.py
@@ -109,6 +109,17 @@
         self.expected_success(202, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def reset_group_status(self, group_id, status_to_set):
+        """Resets group status.
+
+        For more information, please refer to the official API reference:
+        https://developer.openstack.org/api-ref/block-storage/v3/#reset-group-status
+        """
+        post_body = json.dumps({'reset_status': {'status': status_to_set}})
+        resp, body = self.post('groups/%s/action' % group_id, post_body)
+        self.expected_success(202, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def is_resource_deleted(self, id):
         try:
             self.show_group(id)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 2843222..52ec9a3 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -858,22 +858,6 @@
                         floating_ip['id'])
         return floating_ip
 
-    def _associate_floating_ip(self, floating_ip, server):
-        port_id, _ = self._get_server_port_id_and_ip4(server)
-        kwargs = dict(port_id=port_id)
-        floating_ip = self.floating_ips_client.update_floatingip(
-            floating_ip['id'], **kwargs)['floatingip']
-        self.assertEqual(port_id, floating_ip['port_id'])
-        return floating_ip
-
-    def _disassociate_floating_ip(self, floating_ip):
-        """:param floating_ip: floating_ips_client.create_floatingip"""
-        kwargs = dict(port_id=None)
-        floating_ip = self.floating_ips_client.update_floatingip(
-            floating_ip['id'], **kwargs)['floatingip']
-        self.assertIsNone(floating_ip['port_id'])
-        return floating_ip
-
     def check_floating_ip_status(self, floating_ip, status):
         """Verifies floatingip reaches the given status
 
@@ -1183,12 +1167,6 @@
                         router['id'])
         return router
 
-    def _update_router_admin_state(self, router, admin_state_up):
-        kwargs = dict(admin_state_up=admin_state_up)
-        router = self.routers_client.update_router(
-            router['id'], **kwargs)['router']
-        self.assertEqual(admin_state_up, router['admin_state_up'])
-
     def create_networks(self, networks_client=None,
                         routers_client=None, subnets_client=None,
                         tenant_id=None, dns_nameservers=None,
@@ -1355,7 +1333,7 @@
             present_obj = []
         if not_present_obj is None:
             not_present_obj = []
-        _, object_list = self.container_client.list_container_contents(
+        _, object_list = self.container_client.list_container_objects(
             container_name)
         if present_obj:
             for obj in present_obj:
@@ -1367,8 +1345,8 @@
     def change_container_acl(self, container_name, acl):
         metadata_param = {'metadata_prefix': 'x-container-',
                           'metadata': {'read': acl}}
-        self.container_client.update_container_metadata(container_name,
-                                                        **metadata_param)
+        self.container_client.create_update_or_delete_container_metadata(
+            container_name, create_update_metadata=metadata_param)
         resp, _ = self.container_client.list_container_metadata(container_name)
         self.assertEqual(resp['x-container-read'], acl)
 
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 2d38b06..0c3bf23 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -213,17 +213,20 @@
 
     def _disassociate_floating_ips(self):
         floating_ip, _ = self.floating_ip_tuple
-        self._disassociate_floating_ip(floating_ip)
-        self.floating_ip_tuple = Floating_IP_tuple(
-            floating_ip, None)
+        floating_ip = self.floating_ips_client.update_floatingip(
+            floating_ip['id'], port_id=None)['floatingip']
+        self.assertIsNone(floating_ip['port_id'])
+        self.floating_ip_tuple = Floating_IP_tuple(floating_ip, None)
 
     def _reassociate_floating_ips(self):
         floating_ip, server = self.floating_ip_tuple
         # create a new server for the floating ip
         server = self._create_server(self.network)
-        self._associate_floating_ip(floating_ip, server)
-        self.floating_ip_tuple = Floating_IP_tuple(
-            floating_ip, server)
+        port_id, _ = self._get_server_port_id_and_ip4(server)
+        floating_ip = self.floating_ips_client.update_floatingip(
+            floating_ip['id'], port_id=port_id)['floatingip']
+        self.assertEqual(port_id, floating_ip['port_id'])
+        self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
 
     def _create_new_network(self, create_gateway=False):
         self.new_net = self._create_network()
@@ -355,6 +358,12 @@
             self.check_remote_connectivity(ssh_source, remote_ip,
                                            should_connect)
 
+    def _update_router_admin_state(self, router, admin_state_up):
+        kwargs = dict(admin_state_up=admin_state_up)
+        router = self.routers_client.update_router(
+            router['id'], **kwargs)['router']
+        self.assertEqual(admin_state_up, router['admin_state_up'])
+
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('f323b3ba-82f8-4db7-8ea6-6a895869ec49')
     @utils.services('compute', 'network')
diff --git a/tempest/services/object_storage/__init__.py b/tempest/services/object_storage/__init__.py
index a2f0992..771ed8f 100644
--- a/tempest/services/object_storage/__init__.py
+++ b/tempest/services/object_storage/__init__.py
@@ -12,11 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+from tempest.lib.services.object_storage.account_client import AccountClient
 from tempest.lib.services.object_storage.bulk_middleware_client import \
     BulkMiddlewareClient
 from tempest.lib.services.object_storage.capabilities_client import \
     CapabilitiesClient
-from tempest.services.object_storage.account_client import AccountClient
 from tempest.services.object_storage.container_client import ContainerClient
 from tempest.services.object_storage.object_client import ObjectClient
 
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index afedd36..a253599 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -15,6 +15,7 @@
 
 from xml.etree import ElementTree as etree
 
+import debtcollector.moves
 from oslo_serialization import jsonutils as json
 from six.moves.urllib import parse as urllib
 
@@ -54,83 +55,57 @@
         self.expected_success(204, resp.status)
         return resp, body
 
-    def update_container_metadata(
+    def create_update_or_delete_container_metadata(
             self, container_name,
-            metadata=None,
-            remove_metadata=None,
-            metadata_prefix='X-Container-Meta-',
-            remove_metadata_prefix='X-Remove-Container-Meta-'):
-        """Updates arbitrary metadata on container."""
+            create_update_metadata=None,
+            delete_metadata=None,
+            create_update_metadata_prefix='X-Container-Meta-',
+            delete_metadata_prefix='X-Remove-Container-Meta-'):
+        """Creates, Updates or deletes an containter metadata entry.
+
+        Container Metadata can be created, updated or deleted based on
+        metadata header or value. For detailed info, please refer to the
+        official API reference:
+        https://developer.openstack.org/api-ref/object-store/#create-update-or-delete-container-metadata
+        """
         url = str(container_name)
         headers = {}
+        if create_update_metadata:
+            for key in create_update_metadata:
+                metadata_header_name = create_update_metadata_prefix + key
+                headers[metadata_header_name] = create_update_metadata[key]
+        if delete_metadata:
+            for key in delete_metadata:
+                headers[delete_metadata_prefix + key] = delete_metadata[key]
 
-        if metadata is not None:
-            for key in metadata:
-                headers[metadata_prefix + key] = metadata[key]
-        if remove_metadata is not None:
-            for key in remove_metadata:
-                headers[remove_metadata_prefix + key] = remove_metadata[key]
-
-        resp, body = self.post(url, body=None, headers=headers)
+        resp, body = self.post(url, headers=headers, body=None)
         self.expected_success(204, resp.status)
         return resp, body
 
-    def delete_container_metadata(self, container_name, metadata,
-                                  metadata_prefix='X-Remove-Container-Meta-'):
-        """Deletes arbitrary metadata on container."""
-        url = str(container_name)
-        headers = {}
-
-        if metadata is not None:
-            for item in metadata:
-                headers[metadata_prefix + item] = metadata[item]
-
-        resp, body = self.post(url, body=None, headers=headers)
-        self.expected_success(204, resp.status)
-        return resp, body
+    update_container_metadata = debtcollector.moves.moved_function(
+        create_update_or_delete_container_metadata,
+        'update_container_metadata', __name__,
+        version='Queens', removal_version='Rocky')
 
     def list_container_metadata(self, container_name):
-        """Retrieves container metadata headers"""
+        """List all container metadata."""
         url = str(container_name)
         resp, body = self.head(url)
         self.expected_success(204, resp.status)
         return resp, body
 
-    def list_container_contents(self, container, params=None):
+    def list_container_objects(self, container_name, params=None):
         """List the objects in a container, given the container name
 
-           Returns the container object listing as a plain text list, or as
-           xml or json if that option is specified via the 'format' argument.
+        Returns the container object listing as a plain text list, or as
+        xml or json if that option is specified via the 'format' argument.
 
-           Optional Arguments:
-           limit = integer
-               For an integer value n, limits the number of results to at most
-               n values.
-
-           marker = 'string'
-               Given a string value x, return object names greater in value
-               than the specified marker.
-
-           prefix = 'string'
-               For a string value x, causes the results to be limited to names
-               beginning with the substring x.
-
-           format = 'json' or 'xml'
-               Specify either json or xml to return the respective serialized
-               response.
-               If json, returns a list of json objects
-               if xml, returns a string of xml
-
-           path = 'string'
-               For a string value x, return the object names nested in the
-               pseudo path (assuming preconditions are met - see below).
-
-           delimiter = 'character'
-               For a character c, return all the object names nested in the
-               container (without the need for the directory marker objects).
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://developer.openstack.org/api-ref/object-storage/?expanded=show-container-details-and-list-objects-detail
         """
 
-        url = str(container)
+        url = str(container_name)
         if params:
             url += '?'
             url += '&%s' % urllib.urlencode(params)
@@ -148,3 +123,7 @@
 
         self.expected_success([200, 204], resp.status)
         return resp, body
+
+    list_container_contents = debtcollector.moves.moved_function(
+        list_container_objects, 'list_container_contents', __name__,
+        version='Queens', removal_version='Rocky')
diff --git a/tempest/test.py b/tempest/test.py
index a4cc2cc..9da85d5 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,10 +26,9 @@
 from tempest import clients
 from tempest.common import credentials_factory as credentials
 from tempest.common import utils
-import tempest.common.validation_resources as vresources
 from tempest import config
-from tempest.lib.common import cred_client
 from tempest.lib.common import fixed_network
+from tempest.lib.common import validation_resources as vr
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
@@ -98,16 +97,24 @@
     - resource_cleanup
     """
 
-    setUpClassCalled = False
-
     # NOTE(andreaf) credentials holds a list of the credentials to be allocated
     # at class setup time. Credential types can be 'primary', 'alt', 'admin' or
     # a list of roles - the first element of the list being a label, and the
     # rest the actual roles
     credentials = []
+
+    # Track if setUpClass was invoked
+    __setupclass_called = False
+
+    # Network resources to be provisioned for the requested test credentials.
+    # Only used with the dynamic credentials provider.
+    _network_resources = {}
+
+    # Stack of resource cleanups
+    _class_cleanups = []
+
     # Resources required to validate a server using ssh
-    validation_resources = {}
-    network_resources = {}
+    _validation_resources = {}
 
     # NOTE(sdague): log_format is defined inline here instead of using the oslo
     # default because going through the config path recouples config to the
@@ -122,23 +129,39 @@
     TIMEOUT_SCALING_FACTOR = 1
 
     @classmethod
+    def _reset_class(cls):
+        cls.__setup_credentials_called = False
+        cls.__resource_cleanup_called = False
+        cls.__skip_checks_called = False
+        # Stack of callable to be invoked in reverse order
+        cls._class_cleanups = []
+        # Stack of (name, callable) to be invoked in reverse order at teardown
+        cls._teardowns = []
+
+    @classmethod
     def setUpClass(cls):
+        cls.__setupclass_called = True
+        # Reset state
+        cls._reset_class()
         # It should never be overridden by descendants
         if hasattr(super(BaseTestCase, cls), 'setUpClass'):
             super(BaseTestCase, cls).setUpClass()
-        cls.setUpClassCalled = True
-        # Stack of (name, callable) to be invoked in reverse order at teardown
-        cls.teardowns = []
         # All the configuration checks that may generate a skip
         cls.skip_checks()
+        if not cls.__skip_checks_called:
+            raise RuntimeError("skip_checks for %s did not call the super's "
+                               "skip_checks" % cls.__name__)
         try:
             # Allocation of all required credentials and client managers
-            cls.teardowns.append(('credentials', cls.clear_credentials))
+            cls._teardowns.append(('credentials', cls.clear_credentials))
             cls.setup_credentials()
+            if not cls.__setup_credentials_called:
+                raise RuntimeError("setup_credentials for %s did not call the "
+                                   "super's setup_credentials" % cls.__name__)
             # Shortcuts to clients
             cls.setup_clients()
             # Additional class-wide test resources
-            cls.teardowns.append(('resources', cls.resource_cleanup))
+            cls._teardowns.append(('resources', cls.resource_cleanup))
             cls.resource_setup()
         except Exception:
             etype, value, trace = sys.exc_info()
@@ -165,18 +188,29 @@
         # If there was no exception during setup we shall re-raise the first
         # exception in teardown
         re_raise = (etype is None)
-        while cls.teardowns:
-            name, teardown = cls.teardowns.pop()
+        while cls._teardowns:
+            name, teardown = cls._teardowns.pop()
             # Catch any exception in tearDown so we can re-raise the original
             # exception at the end
             try:
                 teardown()
+                if name == 'resources':
+                    if not cls.__resource_cleanup_called:
+                        raise RuntimeError(
+                            "resource_cleanup for %s did not call the "
+                            "super's resource_cleanup" % cls.__name__)
             except Exception as te:
                 sys_exec_info = sys.exc_info()
                 tetype = sys_exec_info[0]
-                # TODO(andreaf): Till we have the ability to cleanup only
-                # resources that were successfully setup in resource_cleanup,
-                # log AttributeError as info instead of exception.
+                # TODO(andreaf): Resource cleanup is often implemented by
+                # storing an array of resources at class level, and cleaning
+                # them up during `resource_cleanup`.
+                # In case of failure during setup, some resource arrays might
+                # not be defined at all, in which case the cleanup code might
+                # trigger an AttributeError. In such cases we log
+                # AttributeError as info instead of exception. Once all
+                # cleanups are migrated to addClassResourceCleanup we can
+                # remove this.
                 if tetype is AttributeError and name == 'resources':
                     LOG.info("tearDownClass of %s failed: %s", name, te)
                 else:
@@ -212,13 +246,37 @@
         """Class level skip checks.
 
         Subclasses verify in here all conditions that might prevent the
-        execution of the entire test class.
-        Checks implemented here may not make use API calls, and should rely on
-        configuration alone.
-        In general skip checks that require an API call are discouraged.
-        If one is really needed it may be implemented either in the
-        resource_setup or at test level.
+        execution of the entire test class. Skipping here prevents any other
+        class fixture from being executed i.e. no credentials or other
+        resource allocation will happen.
+
+        Tests defined in the test class will no longer appear in test results.
+        The `setUpClass` for the entire test class will be marked as SKIPPED
+        instead.
+
+        At this stage no test credentials are available, so skip checks
+        should rely on configuration alone. This is deliberate since skips
+        based on the result of an API call are discouraged.
+
+        The following checks are implemented in `test.py` already:
+        - check that alt credentials are available when requested by the test
+        - check that admin credentials are available when requested by the test
+        - check that the identity version specified by the test is marked as
+          enabled in the configuration
+
+        Overriders of skip_checks must always invoke skip_check on `super`
+        first.
+
+        Example::
+
+            @classmethod
+            def skip_checks(cls):
+                super(Example, cls).skip_checks()
+                if not CONF.service_available.my_service:
+                    skip_msg = ("%s skipped as my_service is not available")
+                    raise cls.skipException(skip_msg % cls.__name__)
         """
+        cls.__skip_checks_called = True
         identity_version = cls.get_identity_version()
         # setting force_tenant_isolation to True also needs admin credentials.
         if ('admin' in cls.credentials or
@@ -243,13 +301,67 @@
     def setup_credentials(cls):
         """Allocate credentials and create the client managers from them.
 
-        For every element of credentials param function creates tenant/user,
-        Then it creates client manager for that credential.
+        `setup_credentials` looks for the content of the `credentials`
+        attribute in the test class. If the value is a non-empty collection,
+        a credentials provider is setup, and credentials are provisioned or
+        allocated based on the content of the collection. Every set of
+        credentials is associated to an object of type `cls.client_manager`.
+        The client manager is accessible by tests via class attribute
+        `os_[type]`:
 
-        Network related tests must override this function with
-        set_network_resources() method, otherwise it will create
-        network resources(network resources are created in a later step).
+        Valid values in `credentials` are:
+        - 'primary':
+            A normal user is provisioned.
+            It can be used only once. Multiple entries will be ignored.
+            Clients are available at os_primary.
+        - 'alt':
+            A normal user other than 'primary' is provisioned.
+            It can be used only once. Multiple entries will be ignored.
+            Clients are available at os_alt.
+        - 'admin':
+            An admin user is provisioned.
+            It can be used only once. Multiple entries will be ignored.
+            Clients are available at os_admin.
+        - A list in the format ['any_label', 'role1', ... , 'roleN']:
+            A client with roles <list>[1:] is provisioned.
+            It can be used multiple times, with unique labels.
+            Clients are available at os_roles_<list>[0].
+
+        By default network resources are allocated (in case of dynamic
+        credentials). Tests that do not need network or that require a
+        custom network setup must specify which network resources shall
+        be provisioned using the `set_network_resources()` method (note
+        that it must be invoked before the `setup_credentials` is
+        invoked on super).
+
+        Example::
+
+            class TestWithCredentials(test.BaseTestCase):
+
+                credentials = ['primary', 'admin',
+                               ['special', 'special_role1']]
+
+                @classmethod
+                def setup_credentials(cls):
+                    # set_network_resources must be called first
+                    cls.set_network_resources(network=True)
+                    super(TestWithCredentials, cls).setup_credentials()
+
+                @classmethod
+                def setup_clients(cls):
+                    cls.servers = cls.os_primary.compute.ServersClient()
+                    cls.admin_servers = cls.os_admin.compute.ServersClient()
+                    # certain API calls may require a user with a specific
+                    # role assigned. In this example `special_role1` is
+                    # assigned to the user in `cls.os_roles_special`.
+                    cls.special_servers = (
+                        cls.os_roles_special.compute.ServersClient())
+
+                def test_special_servers(self):
+                    # Do something with servers
+                    pass
         """
+        cls.__setup_credentials_called = True
         for credentials_type in cls.credentials:
             # This may raise an exception in case credentials are not available
             # In that case we want to let the exception through and the test
@@ -291,54 +403,184 @@
 
     @classmethod
     def setup_clients(cls):
-        """Create links to the clients into the test object."""
-        # TODO(andreaf) There is a fair amount of code that could me moved from
-        # base / test classes in here. Ideally tests should be able to only
-        # specify which client is `client` and nothing else.
+        """Create aliases to the clients in the client managers.
+
+        `setup_clients` is invoked after the credential provisioning step.
+        Client manager objects are available to tests already. The purpose
+        of this helper is to setup shortcuts to specific clients that are
+        useful for the tests implemented in the test class.
+
+        Its purpose is mostly for code readability, however it should be used
+        carefully to avoid doing exactly the opposite, i.e. making the code
+        unreadable and hard to debug. If aliases are defined in a super class
+        it won't be obvious what they refer to, so it's good practice to define
+        all aliases used in the class. Aliases are meant to be shortcuts to
+        be used in tests, not shortcuts to avoid helper method attributes.
+        If an helper method starts relying on a client alias and a subclass
+        overrides that alias, it will become rather difficult to understand
+        what the helper method actually does.
+
+        Example::
+
+            class TestDoneItRight(test.BaseTestCase):
+
+                credentials = ['primary', 'alt']
+
+                @classmethod
+                def setup_clients(cls):
+                    super(TestDoneItRight, cls).setup_clients()
+                    cls.servers = cls.os_primary.ServersClient()
+                    cls.servers_alt = cls.os_alt.ServersClient()
+
+                def _a_good_helper(self, clients):
+                    # Some complex logic we're going to use many times
+                    servers = clients.ServersClient()
+                    vm = servers.create_server(...)
+
+                    def delete_server():
+                        test_utils.call_and_ignore_notfound_exc(
+                            servers.delete_server, vm['id'])
+
+                    self.addCleanup(self.delete_server)
+                    return vm
+
+                def test_with_servers(self):
+                    vm = self._a_good_helper(os.primary)
+                    vm_alt = self._a_good_helper(os.alt)
+                    cls.servers.show_server(vm['id'])
+                    cls.servers_alt.show_server(vm_alt['id'])
+        """
         pass
 
     @classmethod
     def resource_setup(cls):
-        """Class level resource setup for test cases."""
-        if (CONF.validation.ip_version_for_ssh not in (4, 6) and
-            CONF.service_available.neutron):
-            msg = "Invalid IP version %s in ip_version_for_ssh. Use 4 or 6"
-            raise lib_exc.InvalidConfiguration(
-                msg % CONF.validation.ip_version_for_ssh)
-        if hasattr(cls, "os_primary"):
-            vr = cls.validation_resources
-            cls.validation_resources = vresources.create_validation_resources(
-                cls.os_primary,
-                use_neutron=CONF.service_available.neutron,
-                ethertype='IPv' + str(CONF.validation.ip_version_for_ssh),
-                floating_network_id=CONF.network.public_network_id,
-                floating_network_name=CONF.network.floating_network_name,
-                **vr)
-        else:
-            LOG.warning("Client manager not found, validation resources not"
-                        " created")
+        """Class level resource setup for test cases.
+
+        `resource_setup` is invoked once all credentials (and related network
+        resources have been provisioned and after client aliases - if any -
+        have been defined.
+
+        The use case for `resource_setup` is test optimization: provisioning
+        of project-specific "expensive" resources that are not dirtied by tests
+        and can thus safely be re-used by multiple tests.
+
+        System wide resources shared by all tests could instead be provisioned
+        only once, before the test run.
+
+        Resources provisioned here must be cleaned up during
+        `resource_cleanup`. This is best achieved by scheduling a cleanup via
+        `addClassResourceCleanup`.
+
+        Some test resources have an asynchronous delete process. It's best
+        practice for them to schedule a wait for delete via
+        `addClassResourceCleanup` to avoid having resources in process of
+        deletion when we reach the credentials cleanup step.
+
+        Example::
+
+            @classmethod
+            def resource_setup(cls):
+                super(MyTest, cls).resource_setup()
+                servers = cls.os_primary.compute.ServersClient()
+                # Schedule delete and wait so that we can first delete the
+                # two servers and then wait for both to delete
+                # Create server 1
+                cls.shared_server = servers.create_server()
+                # Create server 2. If something goes wrong we schedule cleanup
+                # of server 1 anyways.
+                try:
+                    cls.shared_server2 = servers.create_server()
+                    # Wait server 2
+                    cls.addClassResourceCleanup(
+                        waiters.wait_for_server_termination,
+                        servers, cls.shared_server2['id'],
+                        ignore_error=False)
+                finally:
+                    # Wait server 1
+                    cls.addClassResourceCleanup(
+                        waiters.wait_for_server_termination,
+                        servers, cls.shared_server['id'],
+                        ignore_error=False)
+                        # Delete server 1
+                    cls.addClassResourceCleanup(
+                        test_utils.call_and_ignore_notfound_exc,
+                        servers.delete_server,
+                        cls.shared_server['id'])
+                    # Delete server 2 (if it was created)
+                    if hasattr(cls, 'shared_server2'):
+                        cls.addClassResourceCleanup(
+                            test_utils.call_and_ignore_notfound_exc,
+                            servers.delete_server,
+                            cls.shared_server2['id'])
+        """
+        pass
 
     @classmethod
     def resource_cleanup(cls):
         """Class level resource cleanup for test cases.
 
-        Resource cleanup must be able to handle the case of partially setup
-        resources, in case a failure during `resource_setup` should happen.
+        Resource cleanup processes the stack of cleanups produced by
+        `addClassResourceCleanup` and then cleans up validation resources
+        if any were provisioned.
+
+        All cleanups are processed whatever the outcome. Exceptions are
+        accumulated and re-raised as a `MultipleExceptions` at the end.
+
+        In most cases test cases won't need to override `resource_cleanup`,
+        but if they do they must invoke `resource_cleanup` on super.
+
+        Example::
+
+            class TestWithReallyComplexCleanup(test.BaseTestCase):
+
+                @classmethod
+                def resource_setup(cls):
+                    # provision resource A
+                    cls.addClassResourceCleanup(delete_resource, A)
+                    # provision resource B
+                    cls.addClassResourceCleanup(delete_resource, B)
+
+                @classmethod
+                def resource_cleanup(cls):
+                    # It's possible to override resource_cleanup but in most
+                    # cases it shouldn't be required. Nothing that may fail
+                    # should be executed before the call to super since it
+                    # might cause resource leak in case of error.
+                    super(TestWithReallyComplexCleanup, cls).resource_cleanup()
+                    # At this point test credentials are still available but
+                    # anything from the cleanup stack has been already deleted.
         """
-        if cls.validation_resources:
-            if hasattr(cls, "os_primary"):
-                vr = cls.validation_resources
-                vresources.clear_validation_resources(
-                    cls.os_primary,
-                    use_neutron=CONF.service_available.neutron, **vr)
-                cls.validation_resources = {}
-            else:
-                LOG.warning("Client manager not found, validation resources "
-                            "not deleted")
+        cls.__resource_cleanup_called = True
+        cleanup_errors = []
+        while cls._class_cleanups:
+            try:
+                fn, args, kwargs = cls._class_cleanups.pop()
+                fn(*args, **kwargs)
+            except Exception:
+                cleanup_errors.append(sys.exc_info())
+        if cleanup_errors:
+            raise testtools.MultipleExceptions(*cleanup_errors)
+
+    @classmethod
+    def addClassResourceCleanup(cls, fn, *arguments, **keywordArguments):
+        """Add a cleanup function to be called during resource_cleanup.
+
+        Functions added with addClassResourceCleanup will be called in reverse
+        order of adding at the beginning of resource_cleanup, before any
+        credential, networking or validation resources cleanup is processed.
+
+        If a function added with addClassResourceCleanup raises an exception,
+        the error will be recorded as a test error, and the next cleanup will
+        then be run.
+
+        Cleanup functions are always called during the test class tearDown
+        fixture, even if an exception occured during setUp or tearDown.
+        """
+        cls._class_cleanups.append((fn, arguments, keywordArguments))
 
     def setUp(self):
         super(BaseTestCase, self).setUp()
-        if not self.setUpClassCalled:
+        if not self.__setupclass_called:
             raise RuntimeError("setUpClass does not calls the super's"
                                "setUpClass in the "
                                + self.__class__.__name__)
@@ -369,37 +611,6 @@
     def credentials_provider(self):
         return self._get_credentials_provider()
 
-    @property
-    def identity_utils(self):
-        """A client that abstracts v2 and v3 identity operations.
-
-        This can be used for creating and tearing down projects in tests. It
-        should not be used for testing identity features.
-        """
-        if CONF.identity.auth_version == 'v2':
-            client = self.os_admin.identity_client
-            users_client = self.os_admin.users_client
-            project_client = self.os_admin.tenants_client
-            roles_client = self.os_admin.roles_client
-            domains_client = None
-        else:
-            client = self.os_admin.identity_v3_client
-            users_client = self.os_admin.users_v3_client
-            project_client = self.os_admin.projects_client
-            roles_client = self.os_admin.roles_v3_client
-            domains_client = self.os_admin.domains_client
-
-        try:
-            domain = client.auth_provider.credentials.project_domain_name
-        except AttributeError:
-            domain = 'Default'
-
-        return cred_client.get_creds_client(client, project_client,
-                                            users_client,
-                                            roles_client,
-                                            domains_client,
-                                            project_domain_name=domain)
-
     @classmethod
     def get_identity_version(cls):
         """Returns the identity version used by the test class"""
@@ -421,7 +632,7 @@
                                              False)
 
             cls._creds_provider = credentials.get_credentials_provider(
-                name=cls.__name__, network_resources=cls.network_resources,
+                name=cls.__name__, network_resources=cls._network_resources,
                 force_tenant_isolation=force_tenant_isolation)
         return cls._creds_provider
 
@@ -476,62 +687,131 @@
         if hasattr(cls, '_creds_provider'):
             cls._creds_provider.clear_creds()
 
+    @staticmethod
+    def _validation_resources_params_from_conf():
+        return dict(
+            keypair=(CONF.validation.auth_method.lower() == "keypair"),
+            floating_ip=(CONF.validation.connect_method.lower() == "floating"),
+            security_group=CONF.validation.security_group,
+            security_group_rules=CONF.validation.security_group_rules,
+            use_neutron=CONF.service_available.neutron,
+            ethertype='IPv' + str(CONF.validation.ip_version_for_ssh),
+            floating_network_id=CONF.network.public_network_id,
+            floating_network_name=CONF.network.floating_network_name)
+
     @classmethod
-    def set_validation_resources(cls, keypair=None, floating_ip=None,
-                                 security_group=None,
-                                 security_group_rules=None):
-        """Specify which ssh server validation resources should be created.
+    def get_class_validation_resources(cls, os_clients):
+        """Provision validation resources according to configuration
 
-        Each of the argument must be set to either None, True or False, with
-        None - use default from config (security groups and security group
-               rules get created when set to None)
-        False - Do not create the validation resource
-        True - create the validation resource
+        This is a wrapper around `create_validation_resources` from
+        `tempest.common.validation_resources` that passes parameters from
+        Tempest configuration. Only one instance of class level
+        validation resources is managed by the helper, so If resources
+        were already provisioned before, existing ones will be returned.
 
-        @param keypair
-        @param security_group
-        @param security_group_rules
-        @param floating_ip
+        Resources are returned as a dictionary. They are also scheduled for
+        automatic cleanup during class teardown using
+        `addClassResourcesCleanup`.
+
+        If `CONF.validation.run_validation` is False no resource will be
+        provisioned at all.
+
+        @param os_clients: Clients to be used to provision the resources.
         """
         if not CONF.validation.run_validation:
             return
 
-        if keypair is None:
-            keypair = (CONF.validation.auth_method.lower() == "keypair")
+        if os_clients in cls._validation_resources:
+            return cls._validation_resources[os_clients]
 
-        if floating_ip is None:
-            floating_ip = (CONF.validation.connect_method.lower() ==
-                           "floating")
+        if (CONF.validation.ip_version_for_ssh not in (4, 6) and
+                CONF.service_available.neutron):
+            msg = "Invalid IP version %s in ip_version_for_ssh. Use 4 or 6"
+            raise lib_exc.InvalidConfiguration(
+                msg % CONF.validation.ip_version_for_ssh)
 
-        if security_group is None:
-            security_group = CONF.validation.security_group
+        resources = vr.create_validation_resources(
+            os_clients,
+            **cls._validation_resources_params_from_conf())
 
-        if security_group_rules is None:
-            security_group_rules = CONF.validation.security_group_rules
+        cls.addClassResourceCleanup(
+            vr.clear_validation_resources, os_clients,
+            use_neutron=CONF.service_available.neutron,
+            **resources)
+        cls._validation_resources[os_clients] = resources
+        return resources
 
-        if not cls.validation_resources:
-            cls.validation_resources = {
-                'keypair': keypair,
-                'security_group': security_group,
-                'security_group_rules': security_group_rules,
-                'floating_ip': floating_ip}
+    def get_test_validation_resources(self, os_clients):
+        """Returns a dict of validation resources according to configuration
+
+        Initialise a validation resources fixture based on configuration.
+        Start the fixture and returns the validation resources.
+
+        If `CONF.validation.run_validation` is False no resource will be
+        provisioned at all.
+
+        @param os_clients: Clients to be used to provision the resources.
+        """
+
+        params = {}
+        # Test will try to use the fixture, so for this to be useful
+        # we must return a fixture. If validation is disabled though
+        # we don't need to provision anything, which is the default
+        # behavior for the fixture.
+        if CONF.validation.run_validation:
+            params = self._validation_resources_params_from_conf()
+
+        validation = self.useFixture(
+            vr.ValidationResourcesFixture(os_clients, **params))
+        return validation.resources
 
     @classmethod
     def set_network_resources(cls, network=False, router=False, subnet=False,
                               dhcp=False):
         """Specify which network resources should be created
 
+        The dynamic credentials provider by default provisions network
+        resources for each user/project that is provisioned. This behavior
+        can be altered using this method, which allows tests to define which
+        specific network resources to be provisioned - none if no parameter
+        is specified.
+
+        This method is designed so that only the network resources set on the
+        leaf class are honoured.
+
+        Credentials are provisioned as part of the class setup fixture,
+        during the `setup_credentials` step. For this to be effective this
+        helper must be invoked before super's `setup_credentials` is executed.
+
         @param network
         @param router
         @param subnet
         @param dhcp
+
+        Example::
+
+            @classmethod
+            def setup_credentials(cls):
+                # Do not setup network resources for this test
+                cls.set_network_resources()
+                super(MyTest, cls).setup_credentials()
         """
-        # network resources should be set only once from callers
+        # If this is invoked after the credentials are setup, it won't take
+        # any effect. To avoid this situation, fail the test in case this was
+        # invoked too late in the test lifecycle.
+        if cls.__setup_credentials_called:
+            raise RuntimeError(
+                "set_network_resources invoked after setup_credentials on the "
+                "super class has been already invoked. For "
+                "set_network_resources to have effect please invoke it before "
+                "the call to super().setup_credentials")
+
+        # Network resources should be set only once from callers
         # in order to ensure that even if it's called multiple times in
         # a chain of overloaded methods, the attribute is set only
-        # in the leaf class
-        if not cls.network_resources:
-            cls.network_resources = {
+        # in the leaf class.
+        if not cls._network_resources:
+            cls._network_resources = {
                 'network': network,
                 'router': router,
                 'subnet': subnet,
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index 6345728..5024100 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -37,14 +37,16 @@
         fake_image = mock.Mock(response={'location': image_id})
         compute_images_client.create_image.return_value = fake_image
         # call the utility method
-        image = compute_base.BaseV2ComputeTest.create_image_from_server(
-            mock.sentinel.server_id, name='fake-snapshot-name')
+        cleanup_path = 'tempest.test.BaseTestCase.addClassResourceCleanup'
+        with mock.patch(cleanup_path) as mock_cleanup:
+            image = compute_base.BaseV2ComputeTest.create_image_from_server(
+                mock.sentinel.server_id, name='fake-snapshot-name')
         self.assertEqual(fake_image, image)
         # make our assertions
         compute_images_client.create_image.assert_called_once_with(
             mock.sentinel.server_id, name='fake-snapshot-name')
-        self.assertEqual(1, len(compute_base.BaseV2ComputeTest.images))
-        self.assertEqual(image_id, compute_base.BaseV2ComputeTest.images[0])
+        mock_cleanup.assert_called_once()
+        self.assertIn(image_id, mock_cleanup.call_args[0])
 
     @mock.patch.multiple(compute_base.BaseV2ComputeTest,
                          compute_images_client=mock.DEFAULT,
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index 6aa7a42..ebcf5d1 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -40,6 +40,7 @@
 from tempest.tests import fake_config
 from tempest.tests.lib import fake_http
 from tempest.tests.lib import fake_identity
+from tempest.tests.lib.services import registry_fixture
 
 
 class TestDynamicCredentialProvider(base.TestCase):
@@ -62,6 +63,7 @@
     def setUp(self):
         super(TestDynamicCredentialProvider, self).setUp()
         self.useFixture(fake_config.ConfigFixture())
+        self.useFixture(registry_fixture.RegistryFixture())
         self.patchobject(config, 'TempestConfigPrivate',
                          fake_config.FakePrivate)
         self.patchobject(self.token_client_class, 'raw_request',
diff --git a/tempest/tests/lib/common/test_preprov_creds.py b/tempest/tests/lib/common/test_preprov_creds.py
index 5402e47..9b10159 100644
--- a/tempest/tests/lib/common/test_preprov_creds.py
+++ b/tempest/tests/lib/common/test_preprov_creds.py
@@ -32,6 +32,7 @@
 from tempest.tests import base
 from tempest.tests import fake_config
 from tempest.tests.lib import fake_identity
+from tempest.tests.lib.services import registry_fixture
 
 
 class TestPreProvisionedCredentials(base.TestCase):
@@ -92,9 +93,8 @@
             return_value=self.test_accounts))
         self.useFixture(fixtures.MockPatch(
             'os.path.isfile', return_value=True))
-        # NOTE(andreaf) Ensure config is loaded so service clients are
-        # registered in the registry before tests
-        config.service_client_config()
+        # Make sure we leave the registry clean
+        self.useFixture(registry_fixture.RegistryFixture())
 
     def tearDown(self):
         super(TestPreProvisionedCredentials, self).tearDown()
diff --git a/tempest/tests/lib/common/test_validation_resources.py b/tempest/tests/lib/common/test_validation_resources.py
new file mode 100644
index 0000000..d5139f4
--- /dev/null
+++ b/tempest/tests/lib/common/test_validation_resources.py
@@ -0,0 +1,344 @@
+# Copyright (c) 2017 IBM Corp.
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+import fixtures
+import mock
+import testtools
+
+from tempest.lib.common import validation_resources as vr
+from tempest.lib import exceptions as lib_exc
+from tempest.lib.services import clients
+from tempest.tests import base
+from tempest.tests.lib import fake_credentials
+from tempest.tests.lib.services import registry_fixture
+
+FAKE_SECURITY_GROUP = {'security_group': {'id': 'sg_id'}}
+FAKE_KEYPAIR = {'keypair': {'name': 'keypair_name'}}
+FAKE_FIP_NOVA_NET = {'floating_ip': {'ip': '1.2.3.4', 'id': '1234'}}
+FAKE_FIP_NEUTRON = {'floatingip': {'floating_ip_address': '1.2.3.4',
+                                   'id': '1234'}}
+
+SERVICES = 'tempest.lib.services'
+SG_CLIENT = (SERVICES + '.%s.security_groups_client.SecurityGroupsClient.%s')
+SGR_CLIENT = (SERVICES + '.%s.security_group_rules_client.'
+              'SecurityGroupRulesClient.create_security_group_rule')
+KP_CLIENT = (SERVICES + '.compute.keypairs_client.KeyPairsClient.%s')
+FIP_CLIENT = (SERVICES + '.%s.floating_ips_client.FloatingIPsClient.%s')
+
+
+class TestValidationResources(base.TestCase):
+
+    def setUp(self):
+        super(TestValidationResources, self).setUp()
+        self.useFixture(registry_fixture.RegistryFixture())
+        self.mock_sg_compute = self.useFixture(fixtures.MockPatch(
+            SG_CLIENT % ('compute', 'create_security_group'), autospec=True,
+            return_value=FAKE_SECURITY_GROUP))
+        self.mock_sg_network = self.useFixture(fixtures.MockPatch(
+            SG_CLIENT % ('network', 'create_security_group'), autospec=True,
+            return_value=FAKE_SECURITY_GROUP))
+        self.mock_sgr_compute = self.useFixture(fixtures.MockPatch(
+            SGR_CLIENT % 'compute', autospec=True))
+        self.mock_sgr_network = self.useFixture(fixtures.MockPatch(
+            SGR_CLIENT % 'network', autospec=True))
+        self.mock_kp = self.useFixture(fixtures.MockPatch(
+            KP_CLIENT % 'create_keypair', autospec=True,
+            return_value=FAKE_KEYPAIR))
+        self.mock_fip_compute = self.useFixture(fixtures.MockPatch(
+            FIP_CLIENT % ('compute', 'create_floating_ip'), autospec=True,
+            return_value=FAKE_FIP_NOVA_NET))
+        self.mock_fip_network = self.useFixture(fixtures.MockPatch(
+            FIP_CLIENT % ('network', 'create_floatingip'), autospec=True,
+            return_value=FAKE_FIP_NEUTRON))
+        self.os = clients.ServiceClients(
+            fake_credentials.FakeKeystoneV3Credentials(), 'fake_uri')
+
+    def test_create_ssh_security_group_nova_net(self):
+        expected_sg_id = FAKE_SECURITY_GROUP['security_group']['id']
+        sg = vr.create_ssh_security_group(self.os, add_rule=True,
+                                          use_neutron=False)
+        self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+        # Neutron clients have not been used
+        self.assertEqual(self.mock_sg_network.mock.call_count, 0)
+        self.assertEqual(self.mock_sgr_network.mock.call_count, 0)
+        # Nova-net clients assertions
+        self.assertGreater(self.mock_sg_compute.mock.call_count, 0)
+        self.assertGreater(self.mock_sgr_compute.mock.call_count, 0)
+        for call in self.mock_sgr_compute.mock.call_args_list[1:]:
+            self.assertIn(expected_sg_id, call[1].values())
+
+    def test_create_ssh_security_group_neutron(self):
+        expected_sg_id = FAKE_SECURITY_GROUP['security_group']['id']
+        expected_ethertype = 'fake_ethertype'
+        sg = vr.create_ssh_security_group(self.os, add_rule=True,
+                                          use_neutron=True,
+                                          ethertype=expected_ethertype)
+        self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+        # Nova-net clients have not been used
+        self.assertEqual(self.mock_sg_compute.mock.call_count, 0)
+        self.assertEqual(self.mock_sgr_compute.mock.call_count, 0)
+        # Nova-net clients assertions
+        self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+        self.assertGreater(self.mock_sgr_network.mock.call_count, 0)
+        # Check SG ID and ethertype are passed down to rules
+        for call in self.mock_sgr_network.mock.call_args_list[1:]:
+            self.assertIn(expected_sg_id, call[1].values())
+            self.assertIn(expected_ethertype, call[1].values())
+
+    def test_create_ssh_security_no_rules(self):
+        sg = vr.create_ssh_security_group(self.os, add_rule=False)
+        self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+        # SG Rules clients have not been used
+        self.assertEqual(self.mock_sgr_compute.mock.call_count, 0)
+        self.assertEqual(self.mock_sgr_network.mock.call_count, 0)
+
+    @mock.patch.object(vr, 'create_ssh_security_group',
+                       return_value=FAKE_SECURITY_GROUP['security_group'])
+    def test_create_validation_resources_nova_net(self, mock_create_sg):
+        expected_floating_network_id = 'my_fni'
+        expected_floating_network_name = 'my_fnn'
+        resources = vr.create_validation_resources(
+            self.os, keypair=True, floating_ip=True, security_group=True,
+            security_group_rules=True, ethertype='IPv6', use_neutron=False,
+            floating_network_id=expected_floating_network_id,
+            floating_network_name=expected_floating_network_name)
+        # Keypair calls
+        self.assertGreater(self.mock_kp.mock.call_count, 0)
+        # Floating IP calls
+        self.assertGreater(self.mock_fip_compute.mock.call_count, 0)
+        for call in self.mock_fip_compute.mock.call_args_list[1:]:
+            self.assertIn(expected_floating_network_name, call[1].values())
+            self.assertNotIn(expected_floating_network_id, call[1].values())
+        self.assertEqual(self.mock_fip_network.mock.call_count, 0)
+        # SG calls
+        mock_create_sg.assert_called_once()
+        # Resources
+        for resource in ['keypair', 'floating_ip', 'security_group']:
+            self.assertIn(resource, resources)
+        self.assertEqual(FAKE_KEYPAIR['keypair'], resources['keypair'])
+        self.assertEqual(FAKE_SECURITY_GROUP['security_group'],
+                         resources['security_group'])
+        self.assertEqual(FAKE_FIP_NOVA_NET['floating_ip'],
+                         resources['floating_ip'])
+
+    @mock.patch.object(vr, 'create_ssh_security_group',
+                       return_value=FAKE_SECURITY_GROUP['security_group'])
+    def test_create_validation_resources_neutron(self, mock_create_sg):
+        expected_floating_network_id = 'my_fni'
+        expected_floating_network_name = 'my_fnn'
+        resources = vr.create_validation_resources(
+            self.os, keypair=True, floating_ip=True, security_group=True,
+            security_group_rules=True, ethertype='IPv6', use_neutron=True,
+            floating_network_id=expected_floating_network_id,
+            floating_network_name=expected_floating_network_name)
+        # Keypair calls
+        self.assertGreater(self.mock_kp.mock.call_count, 0)
+        # Floating IP calls
+        self.assertEqual(self.mock_fip_compute.mock.call_count, 0)
+        self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+        for call in self.mock_fip_compute.mock.call_args_list[1:]:
+            self.assertIn(expected_floating_network_id, call[1].values())
+            self.assertNotIn(expected_floating_network_name, call[1].values())
+        # SG calls
+        mock_create_sg.assert_called_once()
+        # Resources
+        for resource in ['keypair', 'floating_ip', 'security_group']:
+            self.assertIn(resource, resources)
+        self.assertEqual(FAKE_KEYPAIR['keypair'], resources['keypair'])
+        self.assertEqual(FAKE_SECURITY_GROUP['security_group'],
+                         resources['security_group'])
+        self.assertIn('ip', resources['floating_ip'])
+        self.assertEqual(resources['floating_ip']['ip'],
+                         FAKE_FIP_NEUTRON['floatingip']['floating_ip_address'])
+        self.assertEqual(resources['floating_ip']['id'],
+                         FAKE_FIP_NEUTRON['floatingip']['id'])
+
+
+class TestClearValidationResourcesFixture(base.TestCase):
+
+    def setUp(self):
+        super(TestClearValidationResourcesFixture, self).setUp()
+        self.useFixture(registry_fixture.RegistryFixture())
+        self.mock_sg_compute = self.useFixture(fixtures.MockPatch(
+            SG_CLIENT % ('compute', 'delete_security_group'), autospec=True))
+        self.mock_sg_network = self.useFixture(fixtures.MockPatch(
+            SG_CLIENT % ('network', 'delete_security_group'), autospec=True))
+        self.mock_sg_wait_compute = self.useFixture(fixtures.MockPatch(
+            SG_CLIENT % ('compute', 'wait_for_resource_deletion'),
+            autospec=True))
+        self.mock_sg_wait_network = self.useFixture(fixtures.MockPatch(
+            SG_CLIENT % ('network', 'wait_for_resource_deletion'),
+            autospec=True))
+        self.mock_kp = self.useFixture(fixtures.MockPatch(
+            KP_CLIENT % 'delete_keypair', autospec=True))
+        self.mock_fip_compute = self.useFixture(fixtures.MockPatch(
+            FIP_CLIENT % ('compute', 'delete_floating_ip'), autospec=True))
+        self.mock_fip_network = self.useFixture(fixtures.MockPatch(
+            FIP_CLIENT % ('network', 'delete_floatingip'), autospec=True))
+        self.os = clients.ServiceClients(
+            fake_credentials.FakeKeystoneV3Credentials(), 'fake_uri')
+
+    def test_clear_validation_resources_nova_net(self):
+        vr.clear_validation_resources(
+            self.os,
+            floating_ip=FAKE_FIP_NOVA_NET['floating_ip'],
+            security_group=FAKE_SECURITY_GROUP['security_group'],
+            keypair=FAKE_KEYPAIR['keypair'],
+            use_neutron=False)
+        self.assertGreater(self.mock_kp.mock.call_count, 0)
+        for call in self.mock_kp.mock.call_args_list[1:]:
+            self.assertIn(FAKE_KEYPAIR['keypair']['name'], call[1].values())
+        self.assertGreater(self.mock_sg_compute.mock.call_count, 0)
+        for call in self.mock_sg_compute.mock.call_args_list[1:]:
+            self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+                          call[1].values())
+        self.assertGreater(self.mock_sg_wait_compute.mock.call_count, 0)
+        for call in self.mock_sg_wait_compute.mock.call_args_list[1:]:
+            self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+                          call[1].values())
+        self.assertEqual(self.mock_sg_network.mock.call_count, 0)
+        self.assertEqual(self.mock_sg_wait_network.mock.call_count, 0)
+        self.assertGreater(self.mock_fip_compute.mock.call_count, 0)
+        for call in self.mock_fip_compute.mock.call_args_list[1:]:
+            self.assertIn(FAKE_FIP_NOVA_NET['floating_ip']['id'],
+                          call[1].values())
+        self.assertEqual(self.mock_fip_network.mock.call_count, 0)
+
+    def test_clear_validation_resources_neutron(self):
+        vr.clear_validation_resources(
+            self.os,
+            floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+            security_group=FAKE_SECURITY_GROUP['security_group'],
+            keypair=FAKE_KEYPAIR['keypair'],
+            use_neutron=True)
+        self.assertGreater(self.mock_kp.mock.call_count, 0)
+        for call in self.mock_kp.mock.call_args_list[1:]:
+            self.assertIn(FAKE_KEYPAIR['keypair']['name'], call[1].values())
+        self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+        for call in self.mock_sg_network.mock.call_args_list[1:]:
+            self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+                          call[1].values())
+        self.assertGreater(self.mock_sg_wait_network.mock.call_count, 0)
+        for call in self.mock_sg_wait_network.mock.call_args_list[1:]:
+            self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+                          call[1].values())
+        self.assertEqual(self.mock_sg_compute.mock.call_count, 0)
+        self.assertEqual(self.mock_sg_wait_compute.mock.call_count, 0)
+        self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+        for call in self.mock_fip_network.mock.call_args_list[1:]:
+            self.assertIn(FAKE_FIP_NEUTRON['floatingip']['id'],
+                          call[1].values())
+        self.assertEqual(self.mock_fip_compute.mock.call_count, 0)
+
+    def test_clear_validation_resources_exceptions(self):
+        # Test that even with exceptions all cleanups are invoked and that only
+        # the first exception is reported.
+        # NOTE(andreaf) There's not way of knowing which exception is going to
+        # be raised first unless we enforce which resource is cleared first,
+        # which is not really interesting, but also not harmful. keypair first.
+        self.mock_kp.mock.side_effect = Exception('keypair exception')
+        self.mock_sg_network.mock.side_effect = Exception('sg exception')
+        self.mock_fip_network.mock.side_effect = Exception('fip exception')
+        with testtools.ExpectedException(Exception, value_re='keypair'):
+            vr.clear_validation_resources(
+                self.os,
+                floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+                security_group=FAKE_SECURITY_GROUP['security_group'],
+                keypair=FAKE_KEYPAIR['keypair'],
+                use_neutron=True)
+        # Clients calls are still made, but not the wait call
+        self.assertGreater(self.mock_kp.mock.call_count, 0)
+        self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+        self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+    def test_clear_validation_resources_wait_not_found_wait(self):
+        # Test that a not found on wait is not an exception
+        self.mock_sg_wait_network.mock.side_effect = lib_exc.NotFound('yay')
+        vr.clear_validation_resources(
+            self.os,
+            floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+            security_group=FAKE_SECURITY_GROUP['security_group'],
+            keypair=FAKE_KEYPAIR['keypair'],
+            use_neutron=True)
+        # Clients calls are still made, but not the wait call
+        self.assertGreater(self.mock_kp.mock.call_count, 0)
+        self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+        self.assertGreater(self.mock_sg_wait_network.mock.call_count, 0)
+        self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+    def test_clear_validation_resources_wait_not_found_delete(self):
+        # Test that a not found on delete is not an exception
+        self.mock_kp.mock.side_effect = lib_exc.NotFound('yay')
+        self.mock_sg_network.mock.side_effect = lib_exc.NotFound('yay')
+        self.mock_fip_network.mock.side_effect = lib_exc.NotFound('yay')
+        vr.clear_validation_resources(
+            self.os,
+            floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+            security_group=FAKE_SECURITY_GROUP['security_group'],
+            keypair=FAKE_KEYPAIR['keypair'],
+            use_neutron=True)
+        # Clients calls are still made, but not the wait call
+        self.assertGreater(self.mock_kp.mock.call_count, 0)
+        self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+        self.assertEqual(self.mock_sg_wait_network.mock.call_count, 0)
+        self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+
+class TestValidationResourcesFixture(base.TestCase):
+
+    @mock.patch.object(vr, 'create_validation_resources', autospec=True)
+    def test_use_fixture(self, mock_vr):
+        exp_vr = dict(keypair='keypair',
+                      floating_ip='floating_ip',
+                      security_group='security_group')
+        mock_vr.return_value = exp_vr
+        exp_clients = 'clients'
+        exp_parameters = dict(keypair=True, floating_ip=True,
+                              security_group=True, security_group_rules=True,
+                              ethertype='v6', use_neutron=True,
+                              floating_network_id='fnid',
+                              floating_network_name='fnname')
+        # First mock cleanup
+        self.useFixture(fixtures.MockPatchObject(
+            vr, 'clear_validation_resources', autospec=True))
+        # And then use vr fixture, so when the fixture is cleaned-up, the mock
+        # is still there
+        vr_fixture = self.useFixture(vr.ValidationResourcesFixture(
+            exp_clients, **exp_parameters))
+        # Assert vr have been provisioned
+        mock_vr.assert_called_once_with(exp_clients, **exp_parameters)
+        # Assert vr have been setup in the fixture
+        self.assertEqual(exp_vr, vr_fixture.resources)
+
+    @mock.patch.object(vr, 'clear_validation_resources', autospec=True)
+    @mock.patch.object(vr, 'create_validation_resources', autospec=True)
+    def test_use_fixture_context(self, mock_vr, mock_clear):
+        exp_vr = dict(keypair='keypair',
+                      floating_ip='floating_ip',
+                      security_group='security_group')
+        mock_vr.return_value = exp_vr
+        exp_clients = 'clients'
+        exp_parameters = dict(keypair=True, floating_ip=True,
+                              security_group=True, security_group_rules=True,
+                              ethertype='v6', use_neutron=True,
+                              floating_network_id='fnid',
+                              floating_network_name='fnname')
+        with vr.ValidationResourcesFixture(exp_clients,
+                                           **exp_parameters) as vr_fixture:
+            # Assert vr have been provisioned
+            mock_vr.assert_called_once_with(exp_clients, **exp_parameters)
+            # Assert vr have been setup in the fixture
+            self.assertEqual(exp_vr, vr_fixture.resources)
+        # After context manager is closed, clear is invoked
+        exp_vr['use_neutron'] = exp_parameters['use_neutron']
+        mock_clear.assert_called_once_with(exp_clients, **exp_vr)
diff --git a/tempest/tests/lib/services/registry_fixture.py b/tempest/tests/lib/services/registry_fixture.py
new file mode 100644
index 0000000..8484209
--- /dev/null
+++ b/tempest/tests/lib/services/registry_fixture.py
@@ -0,0 +1,65 @@
+# Copyright 2017 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import fixtures
+
+from tempest.lib.services import clients
+
+
+class RegistryFixture(fixtures.Fixture):
+    """A fixture to setup a test client registry
+
+    The clients registry is a singleton. In Tempest it's filled with
+    content from configuration. When testing Tempest lib classes without
+    configuration it's handy to have the registry setup to be able to access
+    service client factories.
+
+    This fixture sets up the registry using a fake plugin, which includes all
+    services specified at __init__ time. Any other plugin in the registry
+    is removed at setUp time. The fake plugin is removed from the registry
+    on cleanup.
+    """
+
+    PLUGIN_NAME = 'fake_plugin_for_test'
+
+    def __init__(self):
+        """Initialise the registry fixture"""
+        self.services = set(['compute', 'identity.v2', 'identity.v3',
+                             'image.v1', 'image.v2', 'network', 'volume.v1',
+                             'volume.v2', 'volume.v3'])
+
+    def _setUp(self):
+        # Cleanup the registry
+        registry = clients.ClientsRegistry()
+        registry._service_clients = {}
+        # Prepare the clients for registration
+        all_clients = []
+        service_clients = clients.tempest_modules()
+        for sc in self.services:
+            sc_module = service_clients[sc]
+            sc_unversioned = sc.split('.')[0]
+            sc_name = sc.replace('.', '_')
+            # Pass the bare minimum params to satisfy the clients interface
+            service_client_data = dict(
+                name=sc_name, service_version=sc, service=sc_unversioned,
+                module_path=sc_module.__name__,
+                client_names=sc_module.__all__)
+            all_clients.append(service_client_data)
+        registry.register_service_client(self.PLUGIN_NAME, all_clients)
+
+        def _cleanup():
+            del registry._service_clients[self.PLUGIN_NAME]
+
+        self.addCleanup(_cleanup)
diff --git a/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py b/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
index 5ac5c08..840617c 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
@@ -93,7 +93,8 @@
             bytes_body,
             group_snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
 
-    def _test_list_group_snapshots(self, bytes_body=False, detail=False):
+    def _test_list_group_snapshots(self, detail=False, bytes_body=False,
+                                   mock_args='group_snapshots', **params):
         resp_body = []
         if detail:
             resp_body = self.FAKE_LIST_GROUP_SNAPSHOTS
@@ -111,8 +112,10 @@
             self.client.list_group_snapshots,
             'tempest.lib.common.rest_client.RestClient.get',
             resp_body,
-            bytes_body,
-            detail=detail)
+            to_utf=bytes_body,
+            mock_args=[mock_args],
+            detail=detail,
+            **params)
 
     def test_create_group_snapshot_with_str_body(self):
         self._test_create_group_snapshot()
@@ -132,6 +135,25 @@
     def test_list_group_snapshots_with_bytes_body(self):
         self._test_list_group_snapshots(bytes_body=True)
 
+    def test_list_group_snapshots_with_detail_with_str_body(self):
+        mock_args = "group_snapshots/detail"
+        self._test_list_group_snapshots(detail=True, mock_args=mock_args)
+
+    def test_list_group_snapshots_with_detail_with_bytes_body(self):
+        mock_args = "group_snapshots/detail"
+        self._test_list_group_snapshots(detail=True, bytes_body=True,
+                                        mock_args=mock_args)
+
+    def test_list_group_snapshots_with_params(self):
+        # Run the test separately for each param, to avoid assertion error
+        # resulting from randomized params order.
+        mock_args = 'group_snapshots?sort_key=name'
+        self._test_list_group_snapshots(mock_args=mock_args, sort_key='name')
+
+        mock_args = 'group_snapshots/detail?limit=10'
+        self._test_list_group_snapshots(detail=True, bytes_body=True,
+                                        mock_args=mock_args, limit=10)
+
     def test_delete_group_snapshot(self):
         self.check_service_client_function(
             self.client.delete_group_snapshot,
diff --git a/tempest/tests/lib/services/volume/v3/test_groups_client.py b/tempest/tests/lib/services/volume/v3/test_groups_client.py
index 0884e5a..918e958 100644
--- a/tempest/tests/lib/services/volume/v3/test_groups_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_groups_client.py
@@ -184,3 +184,12 @@
             group_id='0e701ab8-1bec-4b9f-b026-a7ba4af13578',
             status=202,
             **self.FAKE_UPDATE_GROUP['group'])
+
+    def test_reset_group_status(self):
+        self.check_service_client_function(
+            self.client.reset_group_status,
+            'tempest.lib.common.rest_client.RestClient.post',
+            {},
+            status=202,
+            group_id='0e701ab8-1bec-4b9f-b026-a7ba4af13578',
+            status_to_set='error')
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index bf04280..6018441 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -176,96 +176,3 @@
                           self._test_requires_ext_helper,
                           extension='enabled_ext',
                           service='bad_service')
-
-
-class TestConfigDecorators(BaseDecoratorsTest):
-    def setUp(self):
-        super(TestConfigDecorators, self).setUp()
-        cfg.CONF.set_default('nova', True, 'service_available')
-        cfg.CONF.set_default('glance', False, 'service_available')
-
-    def _assert_skip_message(self, func, skip_msg):
-        try:
-            func()
-            self.fail()
-        except testtools.TestCase.skipException as skip_exc:
-            self.assertEqual(skip_exc.args[0], skip_msg)
-
-    def _test_skip_unless_config(self, expected_to_skip=True, *decorator_args):
-
-        class TestFoo(test.BaseTestCase):
-            @config.skip_unless_config(*decorator_args)
-            def test_bar(self):
-                return 0
-
-        t = TestFoo('test_bar')
-        if expected_to_skip:
-            self.assertRaises(testtools.TestCase.skipException, t.test_bar)
-            if (len(decorator_args) >= 3):
-                # decorator_args[2]: skip message specified
-                self._assert_skip_message(t.test_bar, decorator_args[2])
-        else:
-            try:
-                self.assertEqual(t.test_bar(), 0)
-            except testtools.TestCase.skipException:
-                # We caught a skipException but we didn't expect to skip
-                # this test so raise a hard test failure instead.
-                raise testtools.TestCase.failureException(
-                    "Not supposed to skip")
-
-    def _test_skip_if_config(self, expected_to_skip=True,
-                             *decorator_args):
-
-        class TestFoo(test.BaseTestCase):
-            @config.skip_if_config(*decorator_args)
-            def test_bar(self):
-                return 0
-
-        t = TestFoo('test_bar')
-        if expected_to_skip:
-            self.assertRaises(testtools.TestCase.skipException, t.test_bar)
-            if (len(decorator_args) >= 3):
-                # decorator_args[2]: skip message specified
-                self._assert_skip_message(t.test_bar, decorator_args[2])
-        else:
-            try:
-                self.assertEqual(t.test_bar(), 0)
-            except testtools.TestCase.skipException:
-                # We caught a skipException but we didn't expect to skip
-                # this test so raise a hard test failure instead.
-                raise testtools.TestCase.failureException(
-                    "Not supposed to skip")
-
-    def test_skip_unless_no_group(self):
-        self._test_skip_unless_config(True, 'fake_group', 'an_option')
-
-    def test_skip_unless_no_option(self):
-        self._test_skip_unless_config(True, 'service_available',
-                                      'not_an_option')
-
-    def test_skip_unless_false_option(self):
-        self._test_skip_unless_config(True, 'service_available', 'glance')
-
-    def test_skip_unless_false_option_msg(self):
-        self._test_skip_unless_config(True, 'service_available', 'glance',
-                                      'skip message')
-
-    def test_skip_unless_true_option(self):
-        self._test_skip_unless_config(False,
-                                      'service_available', 'nova')
-
-    def test_skip_if_no_group(self):
-        self._test_skip_if_config(False, 'fake_group', 'an_option')
-
-    def test_skip_if_no_option(self):
-        self._test_skip_if_config(False, 'service_available', 'not_an_option')
-
-    def test_skip_if_false_option(self):
-        self._test_skip_if_config(False, 'service_available', 'glance')
-
-    def test_skip_if_true_option(self):
-        self._test_skip_if_config(True, 'service_available', 'nova')
-
-    def test_skip_if_true_option_msg(self):
-        self._test_skip_if_config(True, 'service_available', 'nova',
-                                  'skip message')
diff --git a/tempest/tests/test_tempest_plugin.py b/tempest/tests/test_tempest_plugin.py
index 13e2499..ddadef5 100644
--- a/tempest/tests/test_tempest_plugin.py
+++ b/tempest/tests/test_tempest_plugin.py
@@ -17,9 +17,16 @@
 from tempest.test_discover import plugins
 from tempest.tests import base
 from tempest.tests import fake_tempest_plugin as fake_plugin
+from tempest.tests.lib.services import registry_fixture
 
 
 class TestPluginDiscovery(base.TestCase):
+
+    def setUp(self):
+        super(TestPluginDiscovery, self).setUp()
+        # Make sure we leave the registry clean
+        self.useFixture(registry_fixture.RegistryFixture())
+
     def test_load_tests_with_one_plugin(self):
         # we can't mock stevedore since it's a singleton and already executed
         # during test discovery. So basically this test covers the plugin loop
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
new file mode 100644
index 0000000..fc50736
--- /dev/null
+++ b/tempest/tests/test_test.py
@@ -0,0 +1,626 @@
+# Copyright 2017 IBM Corp
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import sys
+
+import mock
+from oslo_config import cfg
+import testtools
+
+from tempest import clients
+from tempest import config
+from tempest.lib.common import validation_resources as vr
+from tempest.lib import exceptions as lib_exc
+from tempest import test
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests.lib import fake_credentials
+from tempest.tests.lib.services import registry_fixture
+
+
+if sys.version_info >= (2, 7):
+    import unittest
+else:
+    import unittest2 as unittest
+
+
+class LoggingTestResult(testtools.TestResult):
+
+    def __init__(self, log, *args, **kwargs):
+        super(LoggingTestResult, self).__init__(*args, **kwargs)
+        self.log = log
+
+    def addError(self, test, err=None, details=None):
+        self.log.append((test, err, details))
+
+
+class TestValidationResources(base.TestCase):
+
+    validation_resources_module = 'tempest.lib.common.validation_resources'
+
+    def setUp(self):
+        super(TestValidationResources, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.useFixture(registry_fixture.RegistryFixture())
+        self.patchobject(config, 'TempestConfigPrivate',
+                         fake_config.FakePrivate)
+
+        class TestTestClass(test.BaseTestCase):
+            pass
+
+        self.test_test_class = TestTestClass
+
+    def test_validation_resources_no_validation(self):
+        cfg.CONF.set_default('run_validation', False, 'validation')
+        creds = fake_credentials.FakeKeystoneV3Credentials()
+        osclients = clients.Manager(creds)
+        vr = self.test_test_class.get_class_validation_resources(osclients)
+        self.assertIsNone(vr)
+
+    def test_validation_resources_exists(self):
+        cfg.CONF.set_default('run_validation', True, 'validation')
+        creds = fake_credentials.FakeKeystoneV3Credentials()
+        osclients = clients.Manager(creds)
+        expected_vr = 'expected_validation_resources'
+        self.test_test_class._validation_resources[osclients] = expected_vr
+        obtained_vr = self.test_test_class.get_class_validation_resources(
+            osclients)
+        self.assertEqual(expected_vr, obtained_vr)
+
+    @mock.patch(validation_resources_module + '.create_validation_resources',
+                autospec=True)
+    def test_validation_resources_new(self, mock_create_vr):
+        cfg.CONF.set_default('run_validation', True, 'validation')
+        cfg.CONF.set_default('neutron', True, 'service_available')
+        creds = fake_credentials.FakeKeystoneV3Credentials()
+        osclients = clients.Manager(creds)
+        expected_vr = {'expected_validation_resources': None}
+        mock_create_vr.return_value = expected_vr
+        with mock.patch.object(
+                self.test_test_class,
+                'addClassResourceCleanup') as mock_add_class_cleanup:
+            obtained_vr = self.test_test_class.get_class_validation_resources(
+                osclients)
+            self.assertEqual(1, mock_add_class_cleanup.call_count)
+            self.assertEqual(mock.call(vr.clear_validation_resources,
+                                       osclients,
+                                       use_neutron=True,
+                                       **expected_vr),
+                             mock_add_class_cleanup.call_args)
+        self.assertEqual(mock_create_vr.call_count, 1)
+        self.assertIn(osclients, mock_create_vr.call_args_list[0][0])
+        self.assertEqual(expected_vr, obtained_vr)
+        self.assertIn(osclients, self.test_test_class._validation_resources)
+        self.assertEqual(expected_vr,
+                         self.test_test_class._validation_resources[osclients])
+
+    def test_validation_resources_invalid_config(self):
+        invalid_version = 999
+        cfg.CONF.set_default('run_validation', True, 'validation')
+        cfg.CONF.set_default('ip_version_for_ssh', invalid_version,
+                             'validation')
+        cfg.CONF.set_default('neutron', True, 'service_available')
+        creds = fake_credentials.FakeKeystoneV3Credentials()
+        osclients = clients.Manager(creds)
+        with testtools.ExpectedException(
+                lib_exc.InvalidConfiguration,
+                value_re='^.*\n.*' + str(invalid_version)):
+            self.test_test_class.get_class_validation_resources(osclients)
+
+    @mock.patch(validation_resources_module + '.create_validation_resources',
+                autospec=True)
+    def test_validation_resources_invalid_config_nova_net(self,
+                                                          mock_create_vr):
+        invalid_version = 999
+        cfg.CONF.set_default('run_validation', True, 'validation')
+        cfg.CONF.set_default('ip_version_for_ssh', invalid_version,
+                             'validation')
+        cfg.CONF.set_default('neutron', False, 'service_available')
+        creds = fake_credentials.FakeKeystoneV3Credentials()
+        osclients = clients.Manager(creds)
+        expected_vr = {'expected_validation_resources': None}
+        mock_create_vr.return_value = expected_vr
+        obtained_vr = self.test_test_class.get_class_validation_resources(
+            osclients)
+        self.assertEqual(mock_create_vr.call_count, 1)
+        self.assertIn(osclients, mock_create_vr.call_args_list[0][0])
+        self.assertEqual(expected_vr, obtained_vr)
+        self.assertIn(osclients, self.test_test_class._validation_resources)
+        self.assertEqual(expected_vr,
+                         self.test_test_class._validation_resources[osclients])
+
+    @mock.patch(validation_resources_module + '.create_validation_resources',
+                autospec=True)
+    @mock.patch(validation_resources_module + '.clear_validation_resources',
+                autospec=True)
+    def test_validation_resources_fixture(self, mock_clean_vr, mock_create_vr):
+
+        class TestWithRun(self.test_test_class):
+
+            def runTest(self):
+                pass
+
+        cfg.CONF.set_default('run_validation', True, 'validation')
+        test_case = TestWithRun()
+        creds = fake_credentials.FakeKeystoneV3Credentials()
+        osclients = clients.Manager(creds)
+        test_case.get_test_validation_resources(osclients)
+        self.assertEqual(1, mock_create_vr.call_count)
+        self.assertEqual(0, mock_clean_vr.call_count)
+
+
+class TestSetNetworkResources(base.TestCase):
+
+    def setUp(self):
+        super(TestSetNetworkResources, self).setUp()
+
+        class ParentTest(test.BaseTestCase):
+
+            @classmethod
+            def setup_credentials(cls):
+                cls.set_network_resources(dhcp=True)
+                super(ParentTest, cls).setup_credentials()
+
+            def runTest(self):
+                pass
+
+        self.parent_class = ParentTest
+
+    def test_set_network_resources_child_only(self):
+
+        class ChildTest(self.parent_class):
+
+            @classmethod
+            def setup_credentials(cls):
+                cls.set_network_resources(router=True)
+                super(ChildTest, cls).setup_credentials()
+
+        child_test = ChildTest()
+        child_test.setUpClass()
+        # Assert that the parents network resources are not set
+        self.assertFalse(child_test._network_resources['dhcp'])
+        # Assert that the child network resources are set
+        self.assertTrue(child_test._network_resources['router'])
+
+    def test_set_network_resources_right_order(self):
+
+        class ChildTest(self.parent_class):
+
+            @classmethod
+            def setup_credentials(cls):
+                super(ChildTest, cls).setup_credentials()
+                cls.set_network_resources(router=True)
+
+        child_test = ChildTest()
+        with testtools.ExpectedException(RuntimeError,
+                                         value_re='set_network_resources'):
+            child_test.setUpClass()
+
+    def test_set_network_resources_children(self):
+
+        class ChildTest(self.parent_class):
+
+            @classmethod
+            def setup_credentials(cls):
+                cls.set_network_resources(router=True)
+                super(ChildTest, cls).setup_credentials()
+
+        class GrandChildTest(ChildTest):
+            pass
+
+        # Invoke setupClass on both and check that the setup_credentials
+        # call check mechanism does not report any false negative.
+        child_test = ChildTest()
+        child_test.setUpClass()
+        grandchild_test = GrandChildTest()
+        grandchild_test.setUpClass()
+
+
+class TestTempestBaseTestClass(base.TestCase):
+
+    def setUp(self):
+        super(TestTempestBaseTestClass, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.patchobject(config, 'TempestConfigPrivate',
+                         fake_config.FakePrivate)
+
+        class ParentTest(test.BaseTestCase):
+
+            def runTest(self):
+                pass
+
+        self.parent_test = ParentTest
+
+    def test_resource_cleanup(self):
+        cfg.CONF.set_default('neutron', False, 'service_available')
+        exp_args = (1, 2,)
+        exp_kwargs = {'a': 1, 'b': 2}
+        mock1 = mock.Mock()
+        mock2 = mock.Mock()
+        exp_functions = [mock1, mock2]
+
+        class TestWithCleanups(self.parent_test):
+
+            @classmethod
+            def resource_setup(cls):
+                for fn in exp_functions:
+                    cls.addClassResourceCleanup(fn, *exp_args,
+                                                **exp_kwargs)
+
+        test_cleanups = TestWithCleanups()
+        suite = unittest.TestSuite((test_cleanups,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # No exception raised - error log is empty
+        self.assertFalse(log)
+        # All stacked resource cleanups invoked
+        mock1.assert_called_once_with(*exp_args, **exp_kwargs)
+        mock2.assert_called_once_with(*exp_args, **exp_kwargs)
+        # Cleanup stack is empty
+        self.assertEqual(0, len(test_cleanups._class_cleanups))
+
+    def test_resource_cleanup_failures(self):
+        cfg.CONF.set_default('neutron', False, 'service_available')
+        exp_args = (1, 2,)
+        exp_kwargs = {'a': 1, 'b': 2}
+        mock1 = mock.Mock()
+        mock1.side_effect = Exception('mock1 resource cleanup failure')
+        mock2 = mock.Mock()
+        mock3 = mock.Mock()
+        mock3.side_effect = Exception('mock3 resource cleanup failure')
+        exp_functions = [mock1, mock2, mock3]
+
+        class TestWithFailingCleanups(self.parent_test):
+
+            @classmethod
+            def resource_setup(cls):
+                for fn in exp_functions:
+                    cls.addClassResourceCleanup(fn, *exp_args,
+                                                **exp_kwargs)
+
+        test_cleanups = TestWithFailingCleanups()
+        suite = unittest.TestSuite((test_cleanups,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # One multiple exception captured
+        self.assertEqual(1, len(log))
+        # [0]: test, err, details [1] -> exc_info
+        # Type, Exception, traceback [1] -> MultipleException
+        found_exc = log[0][1][1]
+        self.assertTrue(isinstance(found_exc, testtools.MultipleExceptions))
+        self.assertEqual(2, len(found_exc.args))
+        # Each arg is exc_info - match messages and order
+        self.assertIn('mock3 resource', str(found_exc.args[0][1]))
+        self.assertIn('mock1 resource', str(found_exc.args[1][1]))
+        # All stacked resource cleanups invoked
+        mock1.assert_called_once_with(*exp_args, **exp_kwargs)
+        mock2.assert_called_once_with(*exp_args, **exp_kwargs)
+        # Cleanup stack is empty
+        self.assertEqual(0, len(test_cleanups._class_cleanups))
+
+    def test_super_resource_cleanup_not_invoked(self):
+
+        class BadResourceCleanup(self.parent_test):
+
+            @classmethod
+            def resource_cleanup(cls):
+                pass
+
+        bad_class = BadResourceCleanup()
+        suite = unittest.TestSuite((bad_class,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # One multiple exception captured
+        self.assertEqual(1, len(log))
+        # [0]: test, err, details [1] -> exc_info
+        # Type, Exception, traceback [1] -> RuntimeError
+        found_exc = log[0][1][1]
+        self.assertTrue(isinstance(found_exc, RuntimeError))
+        self.assertIn(BadResourceCleanup.__name__, str(found_exc))
+
+    def test_super_skip_checks_not_invoked(self):
+
+        class BadSkipChecks(self.parent_test):
+
+            @classmethod
+            def skip_checks(cls):
+                pass
+
+        bad_class = BadSkipChecks()
+        with testtools.ExpectedException(
+                RuntimeError,
+                value_re='^.* ' + BadSkipChecks.__name__):
+            bad_class.setUpClass()
+
+    def test_super_setup_credentials_not_invoked(self):
+
+        class BadSetupCredentials(self.parent_test):
+
+            @classmethod
+            def skip_checks(cls):
+                pass
+
+        bad_class = BadSetupCredentials()
+        with testtools.ExpectedException(
+                RuntimeError,
+                value_re='^.* ' + BadSetupCredentials.__name__):
+            bad_class.setUpClass()
+
+    def test_grandparent_skip_checks_not_invoked(self):
+
+        class BadSkipChecks(self.parent_test):
+
+            @classmethod
+            def skip_checks(cls):
+                pass
+
+        class SonOfBadSkipChecks(BadSkipChecks):
+            pass
+
+        bad_class = SonOfBadSkipChecks()
+        with testtools.ExpectedException(
+                RuntimeError,
+                value_re='^.* ' + SonOfBadSkipChecks.__name__):
+            bad_class.setUpClass()
+
+    @mock.patch('tempest.common.credentials_factory.is_admin_available',
+                autospec=True, return_value=True)
+    def test_skip_checks_admin(self, mock_iaa):
+        identity_version = 'identity_version'
+
+        class NeedAdmin(self.parent_test):
+            credentials = ['admin']
+
+            @classmethod
+            def get_identity_version(cls):
+                return identity_version
+
+        NeedAdmin().skip_checks()
+        mock_iaa.assert_called_once_with('identity_version')
+
+    @mock.patch('tempest.common.credentials_factory.is_admin_available',
+                autospec=True, return_value=False)
+    def test_skip_checks_admin_not_available(self, mock_iaa):
+        identity_version = 'identity_version'
+
+        class NeedAdmin(self.parent_test):
+            credentials = ['admin']
+
+            @classmethod
+            def get_identity_version(cls):
+                return identity_version
+
+        with testtools.ExpectedException(testtools.testcase.TestSkipped):
+            NeedAdmin().skip_checks()
+        mock_iaa.assert_called_once_with('identity_version')
+
+    def test_skip_checks_identity_v2_not_available(self):
+        cfg.CONF.set_default('api_v2', False, 'identity-feature-enabled')
+
+        class NeedV2(self.parent_test):
+            identity_version = 'v2'
+
+        with testtools.ExpectedException(testtools.testcase.TestSkipped):
+            NeedV2().skip_checks()
+
+    def test_skip_checks_identity_v3_not_available(self):
+        cfg.CONF.set_default('api_v3', False, 'identity-feature-enabled')
+
+        class NeedV3(self.parent_test):
+            identity_version = 'v3'
+
+        with testtools.ExpectedException(testtools.testcase.TestSkipped):
+            NeedV3().skip_checks()
+
+    def test_setup_credentials_all(self):
+        expected_creds = ['string', ['list', 'role1', 'role2']]
+
+        class AllCredentials(self.parent_test):
+            credentials = expected_creds
+
+        expected_clients = 'clients'
+        with mock.patch.object(
+                AllCredentials,
+                'get_client_manager') as mock_get_client_manager:
+            mock_get_client_manager.return_value = expected_clients
+            all_creds = AllCredentials()
+            all_creds.setup_credentials()
+        self.assertTrue(hasattr(all_creds, 'os_string'))
+        self.assertEqual(expected_clients, all_creds.os_string)
+        self.assertTrue(hasattr(all_creds, 'os_roles_list'))
+        self.assertEqual(expected_clients, all_creds.os_roles_list)
+        self.assertEqual(2, mock_get_client_manager.call_count)
+        self.assertEqual(
+            expected_creds[0],
+            mock_get_client_manager.mock_calls[0][2]['credential_type'])
+        self.assertEqual(
+            expected_creds[1][1:],
+            mock_get_client_manager.mock_calls[1][2]['roles'])
+
+    def test_setup_class_overwritten(self):
+
+        class OverridesSetup(self.parent_test):
+
+            @classmethod
+            def setUpClass(cls):  # noqa
+                pass
+
+        overrides_setup = OverridesSetup()
+        suite = unittest.TestSuite((overrides_setup,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # Record 0, test (error holder). The error generates during test run.
+        self.assertIn('runTest', str(log[0][0]))
+        # Record 0, traceback
+        self.assertRegex(
+            str(log[0][2]['traceback']).replace('\n', ' '),
+            RuntimeError.__name__ + ': .* ' + OverridesSetup.__name__)
+
+
+class TestTempestBaseTestClassFixtures(base.TestCase):
+
+    SETUP_FIXTURES = [test.BaseTestCase.setUpClass.__name__,
+                      test.BaseTestCase.skip_checks.__name__,
+                      test.BaseTestCase.setup_credentials.__name__,
+                      test.BaseTestCase.setup_clients.__name__,
+                      test.BaseTestCase.resource_setup.__name__]
+    TEARDOWN_FIXTURES = [test.BaseTestCase.tearDownClass.__name__,
+                         test.BaseTestCase.resource_cleanup.__name__,
+                         test.BaseTestCase.clear_credentials.__name__]
+
+    def setUp(self):
+        super(TestTempestBaseTestClassFixtures, self).setUp()
+        self.mocks = {}
+        for fix in self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES:
+            self.mocks[fix] = mock.Mock()
+
+        def tracker_builder(name):
+
+            def tracker(cls):
+                # Track that the fixture was invoked
+                cls.fixtures_invoked.append(name)
+                # Run the fixture
+                getattr(super(TestWithClassFixtures, cls), name)()
+                # Run a mock we can use for side effects
+                self.mocks[name]()
+
+            return tracker
+
+        class TestWithClassFixtures(test.BaseTestCase):
+
+            credentials = []
+            fixtures_invoked = []
+
+            def runTest(_self):
+                pass
+
+        # Decorate all test class fixtures with tracker_builder
+        for method_name in self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES:
+            setattr(TestWithClassFixtures, method_name,
+                    classmethod(tracker_builder(method_name)))
+
+        self.test = TestWithClassFixtures()
+
+    def test_no_error_flow(self):
+        # If all setup fixtures are executed, all cleanup fixtures are
+        # executed too
+        suite = unittest.TestSuite((self.test,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        self.assertEqual(self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES,
+                         self.test.fixtures_invoked)
+
+    def test_skip_only(self):
+        # If a skip condition is hit in the test, no credentials or resource
+        # is provisioned / cleaned-up
+        self.mocks['skip_checks'].side_effect = (
+            testtools.testcase.TestSkipped())
+        suite = unittest.TestSuite((self.test,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # If we trigger a skip condition, teardown is not invoked at all
+        self.assertEqual(self.SETUP_FIXTURES[:2],
+                         self.test.fixtures_invoked)
+
+    def test_skip_credentials_fails(self):
+        expected_exc = 'sc exploded'
+        self.mocks['setup_credentials'].side_effect = Exception(expected_exc)
+        suite = unittest.TestSuite((self.test,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # If setup_credentials explodes, we invoked teardown class and
+        # clear credentials, and re-raise
+        self.assertEqual((self.SETUP_FIXTURES[:3] +
+                          [self.TEARDOWN_FIXTURES[i] for i in (0, 2)]),
+                         self.test.fixtures_invoked)
+        found_exc = log[0][1][1]
+        self.assertIn(expected_exc, str(found_exc))
+
+    def test_skip_credentials_fails_clear_fails(self):
+        # If cleanup fails on failure, we log the exception and do not
+        # re-raise it. Note that since the exception happens outside of
+        # the Tempest test setUp, logging is not captured on the Tempest
+        # test side, it will be captured by the unit test instead.
+        expected_exc = 'sc exploded'
+        clear_exc = 'clear exploded'
+        self.mocks['setup_credentials'].side_effect = Exception(expected_exc)
+        self.mocks['clear_credentials'].side_effect = Exception(clear_exc)
+        suite = unittest.TestSuite((self.test,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # If setup_credentials explodes, we invoked teardown class and
+        # clear credentials, and re-raise
+        self.assertEqual((self.SETUP_FIXTURES[:3] +
+                          [self.TEARDOWN_FIXTURES[i] for i in (0, 2)]),
+                         self.test.fixtures_invoked)
+        found_exc = log[0][1][1]
+        self.assertIn(expected_exc, str(found_exc))
+        # Since log capture depends on OS_LOG_CAPTURE, we can only assert if
+        # logging was captured
+        if os.environ.get('OS_LOG_CAPTURE'):
+            self.assertIn(clear_exc, self.log_fixture.logger.output)
+
+    def test_skip_credentials_clients_resources_credentials_clear_fails(self):
+        # If cleanup fails with no previous failure, we re-raise the exception.
+        expected_exc = 'clear exploded'
+        self.mocks['clear_credentials'].side_effect = Exception(expected_exc)
+        suite = unittest.TestSuite((self.test,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # If setup_credentials explodes, we invoked teardown class and
+        # clear credentials, and re-raise
+        self.assertEqual(self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES,
+                         self.test.fixtures_invoked)
+        found_exc = log[0][1][1]
+        self.assertIn(expected_exc, str(found_exc))
+
+    def test_skip_credentials_clients_fails(self):
+        expected_exc = 'clients exploded'
+        self.mocks['setup_clients'].side_effect = Exception(expected_exc)
+        suite = unittest.TestSuite((self.test,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # If setup_clients explodes, we invoked teardown class and
+        # clear credentials, and re-raise
+        self.assertEqual((self.SETUP_FIXTURES[:4] +
+                          [self.TEARDOWN_FIXTURES[i] for i in (0, 2)]),
+                         self.test.fixtures_invoked)
+        found_exc = log[0][1][1]
+        self.assertIn(expected_exc, str(found_exc))
+
+    def test_skip_credentials_clients_resources_fails(self):
+        expected_exc = 'resource setup exploded'
+        self.mocks['resource_setup'].side_effect = Exception(expected_exc)
+        suite = unittest.TestSuite((self.test,))
+        log = []
+        result = LoggingTestResult(log)
+        suite.run(result)
+        # If resource_setup explodes, we invoked teardown class and
+        # clear credentials and resource cleanup, and re-raise
+        self.assertEqual(self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES,
+                         self.test.fixtures_invoked)
+        found_exc = log[0][1][1]
+        self.assertIn(expected_exc, str(found_exc))
diff --git a/test-requirements.txt b/test-requirements.txt
index 29f0865..37644d0 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -4,7 +4,7 @@
 hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
 # needed for doc build
 sphinx>=1.6.2 # BSD
-openstackdocstheme>=1.16.0 # Apache-2.0
+openstackdocstheme>=1.17.0 # Apache-2.0
 reno>=2.5.0 # Apache-2.0
 mock>=2.0.0 # BSD
 coverage!=4.4,>=4.0 # Apache-2.0
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index e6aad86..20c99b2 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -33,8 +33,8 @@
 #   * network access to https://git.openstack.org/cgit
 #   ))
 #
-# If a file named data/tempest-plugins-registry.header or
-# data/tempest-plugins-registry.footer is found relative to the
+# If a file named doc/source/data/tempest-plugins-registry.header or
+# doc/source/data/tempest-plugins-registry.footer is found relative to the
 # current working directory, it will be prepended or appended to
 # the generated reStructuredText plugins table respectively.
 
@@ -43,8 +43,8 @@
 (
 declare -A plugins
 
-if [[ -r data/tempest-plugins-registry.header ]]; then
-    cat data/tempest-plugins-registry.header
+if [[ -r doc/source/data/tempest-plugins-registry.header ]]; then
+    cat doc/source/data/tempest-plugins-registry.header
 fi
 
 sorted_plugins=$(python tools/generate-tempest-plugins-list.py)
@@ -56,8 +56,8 @@
     printf "+----------------------------+-------------------------------------------------------------------------+\n"
 done
 
-if [[ -r data/tempest-plugins-registry.footer ]]; then
-    cat data/tempest-plugins-registry.footer
+if [[ -r doc/source/data/tempest-plugins-registry.footer ]]; then
+    cat doc/source/data/tempest-plugins-registry.footer
 fi
 ) > doc/source/plugin-registry.rst
 
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index a4f706e..44bf840 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -20,7 +20,7 @@
 # What it does:
 # * Creates the virtualenv
 # * Install tempest
-# * Retrive the project lists having tempest plugin if project name is
+# * Retrieve the project lists having tempest plugin if project name is
 #   given.
 # * For each project in a list, It does:
 #   * Clone the Project
diff --git a/tox.ini b/tox.ini
index 24e3b5d..7bdc580 100644
--- a/tox.ini
+++ b/tox.ini
@@ -17,6 +17,7 @@
 setenv =
     VIRTUAL_ENV={envdir}
     OS_TEST_PATH=./tempest/tests
+    OS_LOG_CAPTURE=1
     PYTHONWARNINGS=default::DeprecationWarning
     BRANCH_NAME=master
     CLIENT_NAME=tempest