Merge "Fix use of code name in services decorator"
diff --git a/HACKING.rst b/HACKING.rst
index 025bf74..29d5bf4 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -227,3 +227,48 @@
 
 2. The unit tests cannot use setUpClass, instead fixtures and testresources
    should be used for shared state between tests.
+
+
+.. _TestDocumentation:
+
+Test Documentation
+------------------
+For tests being added we need to require inline documentation in the form of
+docstings to explain what is being tested. In API tests for a new API a class
+level docstring should be added to an API reference doc. If one doesn't exist
+a TODO comment should be put indicating that the reference needs to be added.
+For individual API test cases a method level docstring should be used to
+explain the functionality being tested if the test name isn't descriptive
+enough. For example::
+
+    def test_get_role_by_id(self):
+        """Get a role by its id."""
+
+the docstring there is superfluous and shouldn't be added. but for a method
+like::
+
+    def test_volume_backup_create_get_detailed_list_restore_delete(self):
+        pass
+
+a docstring would be useful because while the test title is fairly descriptive
+the operations being performed are complex enough that a bit more explanation
+will help people figure out the intent of the test.
+
+For scenario tests a class level docstring describing the steps in the scenario
+is required. If there is more than one test case in the class individual
+docstrings for the workflow in each test methods can be used instead. A good
+example of this would be::
+
+    class TestVolumeBootPattern(manager.OfficialClientTest):
+    """
+    This test case attempts to reproduce the following steps:
+
+     * Create in Cinder some bootable volume importing a Glance image
+     * Boot an instance from the bootable volume
+     * Write content to the volume
+     * Delete an instance and Boot a new instance from the volume
+     * Check written content in the instance
+     * Create a volume snapshot while the instance is running
+     * Boot an additional instance from the new snapshot based volume
+     * Check written content in the instance booted from snapshot
+    """
diff --git a/REVIEWING.rst b/REVIEWING.rst
index d6dc83e..74bd2ad 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -51,6 +51,15 @@
 whether to skip or not.
 
 
+Test Documentation
+------------------
+When a new test is being added refer to the :ref:`TestDocumentation` section in
+hacking to see if the requirements are being met. With the exception of a class
+level docstring linking to the API ref doc in the API tests and a docstring for
+scenario tests this is up to the reviewers discretion whether a docstring is
+required or not.
+
+
 When to approve
 ---------------
  * Every patch needs two +2s before being approved.
diff --git a/doc/source/cleanup.rst b/doc/source/cleanup.rst
new file mode 100644
index 0000000..acd016c
--- /dev/null
+++ b/doc/source/cleanup.rst
@@ -0,0 +1,5 @@
+--------------------------------
+Post Tempest Run Cleanup Utility
+--------------------------------
+
+.. automodule:: tempest.cmd.cleanup
\ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index bd4e553..daa293c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -27,7 +27,6 @@
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc',
-              'sphinx.ext.intersphinx',
               'sphinx.ext.todo',
               'sphinx.ext.viewcode',
               'oslosphinx'
diff --git a/doc/source/index.rst b/doc/source/index.rst
index d3118ac..bc4fc46 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -29,6 +29,15 @@
    field_guide/thirdparty
    field_guide/unit_tests
 
+---------------------
+Command Documentation
+---------------------
+
+.. toctree::
+   :maxdepth: 1
+
+   cleanup
+
 ==================
 Indices and tables
 ==================
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 3b0b834..dfcbaba 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -709,6 +709,42 @@
 #ssh_user_regex=[["^.*[Cc]irros.*$", "root"]]
 
 
+[messaging]
+
+#
+# Options defined in tempest.config
+#
+
+# Catalog type of the Messaging service. (string value)
+#catalog_type=messaging
+
+# The maximum number of queue records per page when listing
+# queues (integer value)
+#max_queues_per_page=20
+
+# The maximum metadata size for a queue (integer value)
+#max_queue_metadata=65536
+
+# The maximum number of queue message per page when listing
+# (or) posting messages (integer value)
+#max_messages_per_page=20
+
+# The maximum size of a message body (integer value)
+#max_message_size=262144
+
+# The maximum number of messages per claim (integer value)
+#max_messages_per_claim=20
+
+# The maximum ttl for a message (integer value)
+#max_message_ttl=1209600
+
+# The maximum ttl for a claim (integer value)
+#max_claim_ttl=43200
+
+# The maximum grace period for a claim (integer value)
+#max_claim_grace=43200
+
+
 [negative]
 
 #
@@ -897,42 +933,6 @@
 #max_resources_per_stack=1000
 
 
-[queuing]
-
-#
-# Options defined in tempest.config
-#
-
-# Catalog type of the Queuing service. (string value)
-#catalog_type=queuing
-
-# The maximum number of queue records per page when listing
-# queues (integer value)
-#max_queues_per_page=20
-
-# The maximum metadata size for a queue (integer value)
-#max_queue_metadata=65536
-
-# The maximum number of queue message per page when listing
-# (or) posting messages (integer value)
-#max_messages_per_page=20
-
-# The maximum size of a message body (integer value)
-#max_message_size=262144
-
-# The maximum number of messages per claim (integer value)
-#max_messages_per_claim=20
-
-# The maximum ttl for a message (integer value)
-#max_message_ttl=1209600
-
-# The maximum ttl for a claim (integer value)
-#max_claim_ttl=43200
-
-# The maximum grace period for a claim (integer value)
-#max_claim_grace=43200
-
-
 [scenario]
 
 #
diff --git a/requirements.txt b/requirements.txt
index 9a3b74d..708ede3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,25 +1,28 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
 pbr>=0.6,!=0.7,<1.0
 anyjson>=0.3.3
 httplib2>=0.7.5
 jsonschema>=2.0.0,<3.0.0
 testtools>=0.9.34
 lxml>=2.3
-boto>=2.12.0,!=2.13.0
+boto>=2.32.1
 paramiko>=1.13.0
-netaddr>=0.7.6
+netaddr>=0.7.12
 python-ceilometerclient>=1.0.6
-python-glanceclient>=0.13.1
-python-keystoneclient>=0.9.0
-python-novaclient>=2.17.0
-python-neutronclient>=2.3.5,<3
-python-cinderclient>=1.0.7
+python-glanceclient>=0.14.0
+python-keystoneclient>=0.10.0
+python-novaclient>=2.18.0
+python-neutronclient>=2.3.6,<3
+python-cinderclient>=1.1.0
 python-heatclient>=0.2.9
-python-ironicclient
-python-saharaclient>=0.6.0
-python-swiftclient>=2.0.2
+python-ironicclient>=0.2.1
+python-saharaclient>=0.7.3
+python-swiftclient>=2.2.0
 testresources>=0.2.4
 testrepository>=0.0.18
-oslo.config>=1.2.1
+oslo.config>=1.4.0  # Apache-2.0
 six>=1.7.0
 iso8601>=0.1.9
 fixtures>=0.3.14
diff --git a/setup.cfg b/setup.cfg
index 5c62710..2e25ace 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,6 +22,7 @@
     verify-tempest-config = tempest.cmd.verify_tempest_config:main
     javelin2 = tempest.cmd.javelin:main
     run-tempest-stress = tempest.cmd.run_stress:main
+    tempest-cleanup = tempest.cmd.cleanup:main
 
 [build_sphinx]
 all_files = 1
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index bd49fb2..9aa489c 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -17,6 +17,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import tempest_fixtures as fixtures
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
@@ -58,6 +59,8 @@
                           'Resize not available.')
     @test.attr(type=['negative', 'gate'])
     def test_resize_server_using_overlimit_ram(self):
+        # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+        self.useFixture(fixtures.LockFixture('compute_quotas'))
         flavor_name = data_utils.rand_name("flavor-")
         flavor_id = self._get_unused_flavor_id()
         resp, quota_set = self.quotas_client.get_default_quota_set(
@@ -78,6 +81,8 @@
                           'Resize not available.')
     @test.attr(type=['negative', 'gate'])
     def test_resize_server_using_overlimit_vcpus(self):
+        # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+        self.useFixture(fixtures.LockFixture('compute_quotas'))
         flavor_name = data_utils.rand_name("flavor-")
         flavor_id = self._get_unused_flavor_id()
         ram = 512
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 7e9fe92..6c93d33 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -19,6 +19,7 @@
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
+from tempest.openstack.common import excutils
 from tempest.openstack.common import log as logging
 import tempest.test
 
@@ -144,14 +145,19 @@
         for server in cls.servers:
             try:
                 cls.servers_client.delete_server(server['id'])
-            except Exception:
+            except exceptions.NotFound:
+                # Something else already cleaned up the server, nothing to be
+                # worried about
                 pass
+            except Exception:
+                LOG.exception('Deleting server %s failed' % server['id'])
 
         for server in cls.servers:
             try:
                 cls.servers_client.wait_for_server_termination(server['id'])
             except Exception:
-                pass
+                LOG.exception('Waiting for deletion of server %s failed'
+                              % server['id'])
 
     @classmethod
     def server_check_teardown(cls):
@@ -242,15 +248,16 @@
                 try:
                     cls.servers_client.wait_for_server_status(
                         server['id'], kwargs['wait_until'])
-                except Exception as ex:
-                    if ('preserve_server_on_error' not in kwargs
-                        or kwargs['preserve_server_on_error'] is False):
-                        for server in servers:
-                            try:
-                                cls.servers_client.delete_server(server['id'])
-                            except Exception:
-                                pass
-                    raise ex
+                except Exception:
+                    with excutils.save_and_reraise_exception():
+                        if ('preserve_server_on_error' not in kwargs
+                            or kwargs['preserve_server_on_error'] is False):
+                            for server in servers:
+                                try:
+                                    cls.servers_client.delete_server(
+                                        server['id'])
+                                except Exception:
+                                    pass
 
         cls.servers.extend(servers)
 
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index b28124c..901c377 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -28,6 +28,13 @@
         cls.client = cls.security_groups_client
         cls.neutron_available = CONF.service_available.neutron
 
+    @classmethod
+    def setUpClass(self):
+        super(SecurityGroupRulesTestJSON, self).setUpClass()
+        self.ip_protocol = 'tcp'
+        self.from_port = 22
+        self.to_port = 22
+
     @test.attr(type='smoke')
     @test.services('network')
     def test_security_group_rules_create(self):
@@ -37,14 +44,11 @@
         resp, security_group = self.create_security_group()
         securitygroup_id = security_group['id']
         # Adding rules to the created Security Group
-        ip_protocol = 'tcp'
-        from_port = 22
-        to_port = 22
         resp, rule = \
             self.client.create_security_group_rule(securitygroup_id,
-                                                   ip_protocol,
-                                                   from_port,
-                                                   to_port)
+                                                   self.ip_protocol,
+                                                   self.from_port,
+                                                   self.to_port)
         self.addCleanup(self.client.delete_security_group_rule, rule['id'])
         self.assertEqual(200, resp.status)
 
@@ -65,16 +69,13 @@
         secgroup2 = security_group['id']
         # Adding rules to the created Security Group with optional arguments
         parent_group_id = secgroup1
-        ip_protocol = 'tcp'
-        from_port = 22
-        to_port = 22
         cidr = '10.2.3.124/24'
         group_id = secgroup2
         resp, rule = \
             self.client.create_security_group_rule(parent_group_id,
-                                                   ip_protocol,
-                                                   from_port,
-                                                   to_port,
+                                                   self.ip_protocol,
+                                                   self.from_port,
+                                                   self.to_port,
                                                    cidr=cidr,
                                                    group_id=group_id)
         self.assertEqual(200, resp.status)
@@ -89,13 +90,11 @@
         securitygroup_id = security_group['id']
 
         # Add a first rule to the created Security Group
-        ip_protocol1 = 'tcp'
-        from_port1 = 22
-        to_port1 = 22
         resp, rule = \
             self.client.create_security_group_rule(securitygroup_id,
-                                                   ip_protocol1,
-                                                   from_port1, to_port1)
+                                                   self.ip_protocol,
+                                                   self.from_port,
+                                                   self.to_port)
         rule1_id = rule['id']
 
         # Add a second rule to the created Security Group
@@ -127,14 +126,11 @@
         resp, security_group = self.create_security_group()
         sg2_id = security_group['id']
         # Adding rules to the Group1
-        ip_protocol = 'tcp'
-        from_port = 22
-        to_port = 22
         resp, rule = \
             self.client.create_security_group_rule(sg1_id,
-                                                   ip_protocol,
-                                                   from_port,
-                                                   to_port,
+                                                   self.ip_protocol,
+                                                   self.from_port,
+                                                   self.to_port,
                                                    group_id=sg2_id)
 
         self.assertEqual(200, resp.status)
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 634bc01..6a5da58 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -70,6 +70,18 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
+    @test.attr(type='gate')
+    def test_delete_server_while_in_suspended_state(self):
+        # Delete a server while it's VM state is Suspended
+        _, server = self.create_test_server(wait_until='ACTIVE')
+        self.client.suspend_server(server['id'])
+        self.client.wait_for_server_status(server['id'], 'SUSPENDED')
+        resp, _ = self.client.delete_server(server['id'])
+        self.assertEqual('204', resp['status'])
+        self.client.wait_for_server_termination(server['id'])
+
     @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
                           'Shelve is not available.')
     @test.attr(type='gate')
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 6032976..98fe387 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -233,6 +233,30 @@
         self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
 
     @test.attr(type='gate')
+    def test_list_servers_filtered_by_name_regex(self):
+        # list of regex that should match s1, s2 and s3
+        regexes = ['^.*\-instance\-[0-9]+$', '^.*\-instance\-.*$']
+        for regex in regexes:
+            params = {'name': regex}
+            resp, body = self.client.list_servers(params)
+            servers = body['servers']
+
+            self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
+            self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
+            self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
+
+        # Let's take random part of name and try to search it
+        part_name = self.s1_name[-10:]
+
+        params = {'name': part_name}
+        resp, body = self.client.list_servers(params)
+        servers = body['servers']
+
+        self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
+        self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
+        self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
+
+    @test.attr(type='gate')
     def test_list_servers_filtered_by_ip(self):
         # Filter servers by ip
         # Here should be listed 1 server
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 071bbfb..3aacf2a 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -75,9 +75,7 @@
                                                       new_password)
             linux_client.validate_authentication()
 
-    @test.attr(type='smoke')
-    def test_reboot_server_hard(self):
-        # The server should be power cycled
+    def _test_reboot_server(self, reboot_type):
         if self.run_ssh:
             # Get the time the server was last rebooted,
             resp, server = self.client.get_server(self.server_id)
@@ -85,7 +83,7 @@
                                                       self.password)
             boot_time = linux_client.get_boot_time()
 
-        resp, body = self.client.reboot(self.server_id, 'HARD')
+        resp, body = self.client.reboot(self.server_id, reboot_type)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'ACTIVE')
 
@@ -97,28 +95,16 @@
             self.assertTrue(new_boot_time > boot_time,
                             '%s > %s' % (new_boot_time, boot_time))
 
+    @test.attr(type='smoke')
+    def test_reboot_server_hard(self):
+        # The server should be power cycled
+        self._test_reboot_server('HARD')
+
     @test.skip_because(bug="1014647")
     @test.attr(type='smoke')
     def test_reboot_server_soft(self):
         # The server should be signaled to reboot gracefully
-        if self.run_ssh:
-            # Get the time the server was last rebooted,
-            resp, server = self.client.get_server(self.server_id)
-            linux_client = remote_client.RemoteClient(server, self.ssh_user,
-                                                      self.password)
-            boot_time = linux_client.get_boot_time()
-
-        resp, body = self.client.reboot(self.server_id, 'SOFT')
-        self.assertEqual(202, resp.status)
-        self.client.wait_for_server_status(self.server_id, 'ACTIVE')
-
-        if self.run_ssh:
-            # Log in and verify the boot time has changed
-            linux_client = remote_client.RemoteClient(server, self.ssh_user,
-                                                      self.password)
-            new_boot_time = linux_client.get_boot_time()
-            self.assertTrue(new_boot_time > boot_time,
-                            '%s > %s' % (new_boot_time, boot_time))
+        self._test_reboot_server('SOFT')
 
     @test.attr(type='smoke')
     def test_rebuild_server(self):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 25f24b9..a984ade 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -64,7 +64,8 @@
     def resource_cleanup(cls):
         # Deleting the floating IP which is created in this method
         cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
-        cls.delete_volume(cls.volume['id'])
+        if getattr(cls, 'volume', None):
+            cls.delete_volume(cls.volume['id'])
         resp, cls.sg = cls.security_groups_client.delete_security_group(
             cls.sg_id)
         super(ServerRescueTestJSON, cls).resource_cleanup()
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index aa406f7..0d29968 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -56,7 +56,8 @@
 
     @classmethod
     def resource_cleanup(cls):
-        cls.delete_volume(cls.volume['id'])
+        if getattr(cls, 'volume', None):
+            cls.delete_volume(cls.volume['id'])
         super(ServerRescueNegativeTestJSON, cls).resource_cleanup()
 
     def _detach(self, server_id, volume_id):
diff --git a/tempest/api/compute/v3/admin/test_servers_negative.py b/tempest/api/compute/v3/admin/test_servers_negative.py
index e48432b..f561ed3 100644
--- a/tempest/api/compute/v3/admin/test_servers_negative.py
+++ b/tempest/api/compute/v3/admin/test_servers_negative.py
@@ -17,6 +17,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import tempest_fixtures as fixtures
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
@@ -56,6 +57,8 @@
 
     @test.attr(type=['negative', 'gate'])
     def test_resize_server_using_overlimit_ram(self):
+        # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+        self.useFixture(fixtures.LockFixture('compute_quotas'))
         flavor_name = data_utils.rand_name("flavor-")
         flavor_id = self._get_unused_flavor_id()
         resp, quota_set = self.quotas_client.get_default_quota_set(
@@ -74,6 +77,8 @@
 
     @test.attr(type=['negative', 'gate'])
     def test_resize_server_using_overlimit_vcpus(self):
+        # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+        self.useFixture(fixtures.LockFixture('compute_quotas'))
         flavor_name = data_utils.rand_name("flavor-")
         flavor_id = self._get_unused_flavor_id()
         ram = 512
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index c875b2f..08767e3 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -31,9 +31,9 @@
     """Base test class for Image API tests."""
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         cls.set_network_resources()
-        super(BaseImageTest, cls).setUpClass()
+        super(BaseImageTest, cls).resource_setup()
         cls.created_images = []
         cls._interface = 'json'
         cls.isolated_creds = isolated_creds.IsolatedCreds(
@@ -47,7 +47,7 @@
             cls.os = clients.Manager()
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for image_id in cls.created_images:
             try:
                 cls.client.delete_image(image_id)
@@ -57,7 +57,7 @@
         for image_id in cls.created_images:
                 cls.client.wait_for_resource_deletion(image_id)
         cls.isolated_creds.clear_isolated_creds()
-        super(BaseImageTest, cls).tearDownClass()
+        super(BaseImageTest, cls).resource_cleanup()
 
     @classmethod
     def create_image(cls, **kwargs):
@@ -79,8 +79,8 @@
 class BaseV1ImageTest(BaseImageTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseV1ImageTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseV1ImageTest, cls).resource_setup()
         cls.client = cls.os.image_client
         if not CONF.image_feature_enabled.api_v1:
             msg = "Glance API v1 not supported"
@@ -89,8 +89,8 @@
 
 class BaseV1ImageMembersTest(BaseV1ImageTest):
     @classmethod
-    def setUpClass(cls):
-        super(BaseV1ImageMembersTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseV1ImageMembersTest, cls).resource_setup()
         if CONF.compute.allow_tenant_isolation:
             cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
         else:
@@ -113,8 +113,8 @@
 class BaseV2ImageTest(BaseImageTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseV2ImageTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseV2ImageTest, cls).resource_setup()
         cls.client = cls.os.image_client_v2
         if not CONF.image_feature_enabled.api_v2:
             msg = "Glance API v2 not supported"
@@ -124,8 +124,8 @@
 class BaseV2MemberImageTest(BaseV2ImageTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseV2MemberImageTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseV2MemberImageTest, cls).resource_setup()
         if CONF.compute.allow_tenant_isolation:
             creds = cls.isolated_creds.get_alt_creds()
             cls.os_alt = clients.Manager(creds)
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index bf55b89..38a623a 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -106,9 +106,8 @@
     """
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ListImagesTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ListImagesTest, cls).resource_setup()
         # We add a few images here to test the listing functionality of
         # the images API
         img1 = cls._create_remote_image('one', 'bare', 'raw')
@@ -235,8 +234,7 @@
 
 class ListSnapshotImagesTest(base.BaseV1ImageTest):
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
+    def resource_setup(cls):
         # This test class only uses nova v3 api to create snapshot
         # as the similar test which uses nova v2 api already exists
         # in nova v2 compute images api tests.
@@ -246,7 +244,7 @@
             skip_msg = ("%s skipped as nova v3 api is not available" %
                         cls.__name__)
             raise cls.skipException(skip_msg)
-        super(ListSnapshotImagesTest, cls).setUpClass()
+        super(ListSnapshotImagesTest, cls).resource_setup()
         cls.servers_client = cls.os.servers_v3_client
         cls.servers = []
         # We add a few images here to test the listing functionality of
@@ -265,10 +263,10 @@
         cls.client.wait_for_image_status(image['id'], 'active')
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for server in getattr(cls, "servers", []):
             cls.servers_client.delete_server(server['id'])
-        super(ListSnapshotImagesTest, cls).tearDownClass()
+        super(ListSnapshotImagesTest, cls).resource_cleanup()
 
     @classmethod
     def _create_snapshot(cls, name, image_id, flavor, **kwargs):
@@ -329,8 +327,8 @@
 
 class UpdateImageMetaTest(base.BaseV1ImageTest):
     @classmethod
-    def setUpClass(cls):
-        super(UpdateImageMetaTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(UpdateImageMetaTest, cls).resource_setup()
         cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
 
     @classmethod
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index a974ebb..7e018e5 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -125,9 +125,8 @@
     """
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ListImagesTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ListImagesTest, cls).resource_setup()
         # We add a few images here to test the listing functionality of
         # the images API
         cls._create_standard_image('bare', 'raw')
diff --git a/tempest/api/queuing/__init__.py b/tempest/api/messaging/__init__.py
similarity index 100%
rename from tempest/api/queuing/__init__.py
rename to tempest/api/messaging/__init__.py
diff --git a/tempest/api/queuing/base.py b/tempest/api/messaging/base.py
similarity index 78%
rename from tempest/api/queuing/base.py
rename to tempest/api/messaging/base.py
index 41a02f2..58511a9 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/messaging/base.py
@@ -23,25 +23,25 @@
 LOG = logging.getLogger(__name__)
 
 
-class BaseQueuingTest(test.BaseTestCase):
+class BaseMessagingTest(test.BaseTestCase):
 
     """
-    Base class for the Queuing tests that use the Tempest Zaqar REST client
+    Base class for the Messaging tests that use the Tempest Zaqar REST client
 
     It is assumed that the following option is defined in the
     [service_available] section of etc/tempest.conf
 
-        queuing as True
+        messaging as True
     """
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseQueuingTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseMessagingTest, cls).resource_setup()
         if not CONF.service_available.zaqar:
             raise cls.skipException("Zaqar support is required")
         os = cls.get_client_manager()
-        cls.queuing_cfg = CONF.queuing
-        cls.client = os.queuing_client
+        cls.messaging_cfg = CONF.messaging
+        cls.client = os.messaging_client
 
     @classmethod
     def create_queue(cls, queue_name):
@@ -93,42 +93,42 @@
 
     @classmethod
     def post_messages(cls, queue_name, rbody):
-        '''Wrapper utility that posts messages to a queue.'''
+        """Wrapper utility that posts messages to a queue."""
         resp, body = cls.client.post_messages(queue_name, rbody)
 
         return resp, body
 
     @classmethod
     def list_messages(cls, queue_name):
-        '''Wrapper utility that lists the messages in a queue.'''
+        """Wrapper utility that lists the messages in a queue."""
         resp, body = cls.client.list_messages(queue_name)
 
         return resp, body
 
     @classmethod
     def get_single_message(cls, message_uri):
-        '''Wrapper utility that gets a single message.'''
+        """Wrapper utility that gets a single message."""
         resp, body = cls.client.get_single_message(message_uri)
 
         return resp, body
 
     @classmethod
     def get_multiple_messages(cls, message_uri):
-        '''Wrapper utility that gets multiple messages.'''
+        """Wrapper utility that gets multiple messages."""
         resp, body = cls.client.get_multiple_messages(message_uri)
 
         return resp, body
 
     @classmethod
     def delete_messages(cls, message_uri):
-        '''Wrapper utility that deletes messages.'''
+        """Wrapper utility that deletes messages."""
         resp, body = cls.client.delete_messages(message_uri)
 
         return resp, body
 
     @classmethod
     def post_claims(cls, queue_name, rbody, url_params=False):
-        '''Wrapper utility that claims messages.'''
+        """Wrapper utility that claims messages."""
         resp, body = cls.client.post_claims(
             queue_name, rbody, url_params=False)
 
@@ -136,33 +136,34 @@
 
     @classmethod
     def query_claim(cls, claim_uri):
-        '''Wrapper utility that gets a claim.'''
+        """Wrapper utility that gets a claim."""
         resp, body = cls.client.query_claim(claim_uri)
 
         return resp, body
 
     @classmethod
     def update_claim(cls, claim_uri, rbody):
-        '''Wrapper utility that updates a claim.'''
+        """Wrapper utility that updates a claim."""
         resp, body = cls.client.update_claim(claim_uri, rbody)
 
         return resp, body
 
     @classmethod
     def release_claim(cls, claim_uri):
-        '''Wrapper utility that deletes a claim.'''
+        """Wrapper utility that deletes a claim."""
         resp, body = cls.client.release_claim(claim_uri)
 
         return resp, body
 
     @classmethod
     def generate_message_body(cls, repeat=1):
-        '''Wrapper utility that sets the metadata of a queue.'''
-        message_ttl = data_utils.rand_int_id(start=60,
-                                             end=CONF.queuing.max_message_ttl)
+        """Wrapper utility that sets the metadata of a queue."""
+        message_ttl = data_utils.\
+            rand_int_id(start=60, end=CONF.messaging.max_message_ttl)
 
-        key = data_utils.arbitrary_string(size=20, base_text='QueuingKey')
-        value = data_utils.arbitrary_string(size=20, base_text='QueuingValue')
+        key = data_utils.arbitrary_string(size=20, base_text='MessagingKey')
+        value = data_utils.arbitrary_string(size=20,
+                                            base_text='MessagingValue')
         message_body = {key: value}
 
         rbody = ([{'body': message_body, 'ttl': message_ttl}] * repeat)
diff --git a/tempest/api/queuing/test_claims.py b/tempest/api/messaging/test_claims.py
similarity index 87%
rename from tempest/api/queuing/test_claims.py
rename to tempest/api/messaging/test_claims.py
index a306623..1b004dd 100644
--- a/tempest/api/queuing/test_claims.py
+++ b/tempest/api/messaging/test_claims.py
@@ -16,7 +16,7 @@
 import logging
 import urlparse
 
-from tempest.api.queuing import base
+from tempest.api.messaging import base
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import test
@@ -26,12 +26,12 @@
 CONF = config.CONF
 
 
-class TestClaims(base.BaseQueuingTest):
+class TestClaims(base.BaseMessagingTest):
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
-        super(TestClaims, cls).setUpClass()
+    def resource_setup(cls):
+        super(TestClaims, cls).resource_setup()
         cls.queue_name = data_utils.rand_name('Queues-Test')
         # Create Queue
         cls.create_queue(cls.queue_name)
@@ -44,9 +44,9 @@
 
         # Post Claim
         claim_ttl = data_utils.rand_int_id(start=60,
-                                           end=CONF.queuing.max_claim_ttl)
-        claim_grace = data_utils.rand_int_id(start=60,
-                                             end=CONF.queuing.max_claim_grace)
+                                           end=CONF.messaging.max_claim_ttl)
+        claim_grace = data_utils.\
+            rand_int_id(start=60, end=CONF.messaging.max_claim_grace)
         claim_body = {"ttl": claim_ttl, "grace": claim_grace}
         resp, body = self.client.post_claims(queue_name=self.queue_name,
                                              rbody=claim_body)
@@ -90,7 +90,7 @@
 
         # Update Claim
         claim_ttl = data_utils.rand_int_id(start=60,
-                                           end=CONF.queuing.max_claim_ttl)
+                                           end=CONF.messaging.max_claim_ttl)
         update_rbody = {"ttl": claim_ttl}
 
         self.client.update_claim(claim_uri, rbody=update_rbody)
@@ -118,6 +118,6 @@
         self.client.delete_messages(message_uri)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_queue(cls.queue_name)
-        super(TestClaims, cls).tearDownClass()
+        super(TestClaims, cls).resource_cleanup()
diff --git a/tempest/api/queuing/test_messages.py b/tempest/api/messaging/test_messages.py
similarity index 92%
rename from tempest/api/queuing/test_messages.py
rename to tempest/api/messaging/test_messages.py
index 9546c91..3c27ac2 100644
--- a/tempest/api/queuing/test_messages.py
+++ b/tempest/api/messaging/test_messages.py
@@ -15,7 +15,7 @@
 
 import logging
 
-from tempest.api.queuing import base
+from tempest.api.messaging import base
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import test
@@ -25,17 +25,17 @@
 CONF = config.CONF
 
 
-class TestMessages(base.BaseQueuingTest):
+class TestMessages(base.BaseMessagingTest):
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
-        super(TestMessages, cls).setUpClass()
+    def resource_setup(cls):
+        super(TestMessages, cls).resource_setup()
         cls.queue_name = data_utils.rand_name('Queues-Test')
         # Create Queue
         cls.client.create_queue(cls.queue_name)
 
-    def _post_messages(self, repeat=CONF.queuing.max_messages_per_page):
+    def _post_messages(self, repeat=CONF.messaging.max_messages_per_page):
         message_body = self.generate_message_body(repeat=repeat)
         resp, body = self.post_messages(queue_name=self.queue_name,
                                         rbody=message_body)
@@ -117,6 +117,6 @@
         self.assertEqual('204', resp['status'])
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_queue(cls.queue_name)
-        super(TestMessages, cls).tearDownClass()
+        super(TestMessages, cls).resource_cleanup()
diff --git a/tempest/api/queuing/test_queues.py b/tempest/api/messaging/test_queues.py
similarity index 93%
rename from tempest/api/queuing/test_queues.py
rename to tempest/api/messaging/test_queues.py
index b340b60..ab099ff 100644
--- a/tempest/api/queuing/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -18,7 +18,7 @@
 from six import moves
 from testtools import matchers
 
-from tempest.api.queuing import base
+from tempest.api.messaging import base
 from tempest.common.utils import data_utils
 from tempest import test
 
@@ -26,7 +26,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestQueues(base.BaseQueuingTest):
+class TestQueues(base.BaseMessagingTest):
 
     @test.attr(type='smoke')
     def test_create_queue(self):
@@ -40,12 +40,12 @@
         self.assertEqual('', body)
 
 
-class TestManageQueue(base.BaseQueuingTest):
+class TestManageQueue(base.BaseMessagingTest):
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
-        super(TestManageQueue, cls).setUpClass()
+    def resource_setup(cls):
+        super(TestManageQueue, cls).resource_setup()
         cls.queues = list()
         for _ in moves.xrange(5):
             queue_name = data_utils.rand_name('Queues-Test')
@@ -125,7 +125,7 @@
         self.assertThat(body, matchers.Equals(req_body))
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for queue_name in cls.queues:
             cls.client.delete_queue(queue_name)
-        super(TestManageQueue, cls).tearDownClass()
+        super(TestManageQueue, cls).resource_cleanup()
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 834c010..d9b2848 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -83,6 +83,7 @@
         cls.fw_rules = []
         cls.fw_policies = []
         cls.ipsecpolicies = []
+        cls.ethertype = "IPv" + str(cls._ip_version)
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/network/common.py b/tempest/api/network/common.py
deleted file mode 100644
index 5ac8b5a..0000000
--- a/tempest/api/network/common.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import abc
-
-import six
-
-
-class AttributeDict(dict):
-
-    """
-    Provide attribute access (dict.key) to dictionary values.
-    """
-
-    def __getattr__(self, name):
-        """Allow attribute access for all keys in the dict."""
-        if name in self:
-            return self[name]
-        return super(AttributeDict, self).__getattribute__(name)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DeletableResource(AttributeDict):
-
-    """
-    Support deletion of neutron resources (networks, subnets) via a
-    delete() method, as is supported by keystone and nova resources.
-    """
-
-    def __init__(self, *args, **kwargs):
-        self.client = kwargs.pop('client', None)
-        super(DeletableResource, self).__init__(*args, **kwargs)
-
-    def __str__(self):
-        return '<%s id="%s" name="%s">' % (self.__class__.__name__,
-                                           self.id, self.name)
-
-    @abc.abstractmethod
-    def delete(self):
-        return
-
-    def __hash__(self):
-        return id(self)
-
-
-class DeletableNetwork(DeletableResource):
-
-    def delete(self):
-        self.client.delete_network(self.id)
-
-
-class DeletableSubnet(DeletableResource):
-
-    def __init__(self, *args, **kwargs):
-        super(DeletableSubnet, self).__init__(*args, **kwargs)
-        self._router_ids = set()
-
-    def update(self, *args, **kwargs):
-        body = dict(subnet=dict(*args, **kwargs))
-        result = self.client.update_subnet(subnet=self.id, body=body)
-        super(DeletableSubnet, self).update(**result['subnet'])
-
-    def add_to_router(self, router_id):
-        self._router_ids.add(router_id)
-        body = dict(subnet_id=self.id)
-        self.client.add_interface_router(router_id, body=body)
-
-    def delete(self):
-        for router_id in self._router_ids.copy():
-            body = dict(subnet_id=self.id)
-            self.client.remove_interface_router(router_id, body=body)
-            self._router_ids.remove(router_id)
-        self.client.delete_subnet(self.id)
-
-
-class DeletableRouter(DeletableResource):
-
-    def add_gateway(self, network_id):
-        body = dict(network_id=network_id)
-        self.client.add_gateway_router(self.id, body=body)
-
-    def delete(self):
-        self.client.remove_gateway_router(self.id)
-        self.client.delete_router(self.id)
-
-
-class DeletableFloatingIp(DeletableResource):
-
-    def update(self, *args, **kwargs):
-        result = self.client.update_floatingip(floatingip=self.id,
-                                               body=dict(
-                                                   floatingip=dict(*args,
-                                                                   **kwargs)
-                                               ))
-        super(DeletableFloatingIp, self).update(**result['floatingip'])
-
-    def __repr__(self):
-        return '<%s addr="%s">' % (self.__class__.__name__,
-                                   self.floating_ip_address)
-
-    def __str__(self):
-        return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
-                                                     self.id)
-
-    def delete(self):
-        self.client.delete_floatingip(self.id)
-
-
-class DeletablePort(DeletableResource):
-
-    def delete(self):
-        self.client.delete_port(self.id)
-
-
-class DeletableSecurityGroup(DeletableResource):
-
-    def delete(self):
-        self.client.delete_security_group(self.id)
-
-
-class DeletableSecurityGroupRule(DeletableResource):
-
-    def __repr__(self):
-        return '<%s id="%s">' % (self.__class__.__name__, self.id)
-
-    def delete(self):
-        self.client.delete_security_group_rule(self.id)
-
-
-class DeletablePool(DeletableResource):
-
-    def delete(self):
-        self.client.delete_pool(self.id)
-
-
-class DeletableMember(DeletableResource):
-
-    def delete(self):
-        self.client.delete_member(self.id)
-
-
-class DeletableVip(DeletableResource):
-
-    def delete(self):
-        self.client.delete_vip(self.id)
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 193bf76..11588d6 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -86,7 +86,6 @@
                  (fw_id, target_states))
             raise exceptions.TimeoutException(m)
 
-    @test.attr(type='smoke')
     def test_list_firewall_rules(self):
         # List firewall rules
         _, fw_rules = self.client.list_firewall_rules()
@@ -104,7 +103,6 @@
                         m['ip_version'],
                         m['enabled']) for m in fw_rules])
 
-    @test.attr(type='smoke')
     def test_create_update_delete_firewall_rule(self):
         # Create firewall rule
         _, body = self.client.create_firewall_rule(
@@ -125,14 +123,12 @@
         self.assertNotIn(fw_rule_id,
                          [m['id'] for m in fw_rules['firewall_rules']])
 
-    @test.attr(type='smoke')
     def test_show_firewall_rule(self):
         # show a created firewall rule
         _, fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
         for key, value in fw_rule['firewall_rule'].iteritems():
             self.assertEqual(self.fw_rule[key], value)
 
-    @test.attr(type='smoke')
     def test_list_firewall_policies(self):
         _, fw_policies = self.client.list_firewall_policies()
         fw_policies = fw_policies['firewall_policies']
@@ -143,7 +139,6 @@
                         m['name'],
                         m['firewall_rules']) for m in fw_policies])
 
-    @test.attr(type='smoke')
     def test_create_update_delete_firewall_policy(self):
         # Create firewall policy
         _, body = self.client.create_firewall_policy(
@@ -166,7 +161,6 @@
         fw_policies = fw_policies['firewall_policies']
         self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
 
-    @test.attr(type='smoke')
     def test_show_firewall_policy(self):
         # show a created firewall policy
         _, fw_policy = self.client.show_firewall_policy(self.fw_policy['id'])
@@ -174,7 +168,6 @@
         for key, value in fw_policy.iteritems():
             self.assertEqual(self.fw_policy[key], value)
 
-    @test.attr(type='smoke')
     def test_create_show_delete_firewall(self):
         # Create tenant network resources required for an ACTIVE firewall
         network = self.create_network()
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index e1eb48d..986a2c8 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -17,6 +17,7 @@
 import testtools
 
 from tempest.api.network import base
+from tempest.common import custom_matchers
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
@@ -65,13 +66,94 @@
         cls.name = cls.network['name']
         cls.subnet = cls.create_subnet(cls.network)
         cls.cidr = cls.subnet['cidr']
+        cls._subnet_data = {6: {'gateway':
+                                str(cls._get_gateway_from_tempest_conf(6)),
+                                'allocation_pools':
+                                cls._get_allocation_pools_from_gateway(6),
+                                'dns_nameservers': ['2001:4860:4860::8844',
+                                                    '2001:4860:4860::8888'],
+                                'host_routes': [{'destination': '2001::/64',
+                                                 'nexthop': '2003::1'}],
+                                'new_host_routes': [{'destination':
+                                                     '2001::/64',
+                                                     'nexthop': '2005::1'}],
+                                'new_dns_nameservers':
+                                ['2001:4860:4860::7744',
+                                 '2001:4860:4860::7888']},
+                            4: {'gateway':
+                                str(cls._get_gateway_from_tempest_conf(4)),
+                                'allocation_pools':
+                                cls._get_allocation_pools_from_gateway(4),
+                                'dns_nameservers': ['8.8.4.4', '8.8.8.8'],
+                                'host_routes': [{'destination': '10.20.0.0/32',
+                                                 'nexthop': '10.100.1.1'}],
+                                'new_host_routes': [{'destination':
+                                                     '10.20.0.0/32',
+                                                     'nexthop':
+                                                     '10.100.1.2'}],
+                                'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
+
+    @classmethod
+    def _get_gateway_from_tempest_conf(cls, ip_version):
+        """Return first subnet gateway for configured CIDR """
+        if ip_version == 4:
+            cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+            mask_bits = CONF.network.tenant_network_mask_bits
+        elif ip_version == 6:
+            cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
+            mask_bits = CONF.network.tenant_network_v6_mask_bits
+
+        if mask_bits >= cidr.prefixlen:
+            return netaddr.IPAddress(cidr) + 1
+        else:
+            for subnet in cidr.subnet(mask_bits):
+                return netaddr.IPAddress(subnet) + 1
+
+    @classmethod
+    def _get_allocation_pools_from_gateway(cls, ip_version):
+        """Return allocation range for subnet of given gateway"""
+        gateway = cls._get_gateway_from_tempest_conf(ip_version)
+        return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
+
+    def subnet_dict(self, include_keys):
+        """Return a subnet dict which has include_keys and their corresponding
+           value from self._subnet_data
+        """
+        return dict((key, self._subnet_data[self._ip_version][key])
+                    for key in include_keys)
+
+    def _compare_resource_attrs(self, actual, expected):
+        exclude_keys = set(actual).symmetric_difference(expected)
+        self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
+                        expected, exclude_keys))
+
+    def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
+                                     **kwargs):
+        network = self.create_network()
+        net_id = network['id']
+        gateway = kwargs.pop('gateway', None)
+        subnet = self.create_subnet(network, gateway, cidr, mask_bits,
+                                    **kwargs)
+        compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
+                                 mask_bits=mask_bits, **kwargs)
+        compare_args = dict((k, v) for k, v in compare_args_full.iteritems()
+                            if v is not None)
+
+        if 'dns_nameservers' in set(subnet).intersection(compare_args):
+            self.assertEqual(sorted(compare_args['dns_nameservers']),
+                             sorted(subnet['dns_nameservers']))
+            del subnet['dns_nameservers'], compare_args['dns_nameservers']
+
+        self._compare_resource_attrs(subnet, compare_args)
+        self.client.delete_network(net_id)
+        self.networks.pop()
+        self.subnets.pop()
 
     @test.attr(type='smoke')
     def test_create_update_delete_network_subnet(self):
         # Create a network
         name = data_utils.rand_name('network-')
-        _, body = self.client.create_network(name=name)
-        network = body['network']
+        network = self.create_network(network_name=name)
         net_id = network['id']
         self.assertEqual('ACTIVE', network['status'])
         # Verify network update
@@ -87,11 +169,6 @@
         _, body = self.client.update_subnet(subnet_id, name=new_name)
         updated_subnet = body['subnet']
         self.assertEqual(updated_subnet['name'], new_name)
-        # Delete subnet and network
-        _, body = self.client.delete_subnet(subnet_id)
-        # Remove subnet from cleanup list
-        self.subnets.pop()
-        _, body = self.client.delete_network(net_id)
 
     @test.attr(type='smoke')
     def test_show_network(self):
@@ -204,32 +281,65 @@
 
     @test.attr(type='smoke')
     def test_create_delete_subnet_with_gw(self):
-        gateway = '10.100.0.13'
-        name = data_utils.rand_name('network-')
-        _, body = self.client.create_network(name=name)
-        network = body['network']
-        net_id = network['id']
-        subnet = self.create_subnet(network, gateway)
-        # Verifies Subnet GW in IPv4
-        self.assertEqual(subnet['gateway_ip'], gateway)
-        # Delete network and subnet
-        self.client.delete_network(net_id)
-        self.subnets.pop()
+        self._create_verify_delete_subnet(
+            **self.subnet_dict(['gateway']))
 
     @test.attr(type='smoke')
-    def test_create_delete_subnet_without_gw(self):
-        net = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-        gateway_ip = str(netaddr.IPAddress(net.first + 1))
-        name = data_utils.rand_name('network-')
-        _, body = self.client.create_network(name=name)
-        network = body['network']
-        net_id = network['id']
-        subnet = self.create_subnet(network)
-        # Verifies Subnet GW in IPv4
-        self.assertEqual(subnet['gateway_ip'], gateway_ip)
-        # Delete network and subnet
-        self.client.delete_network(net_id)
-        self.subnets.pop()
+    def test_create_delete_subnet_with_allocation_pools(self):
+        self._create_verify_delete_subnet(
+            **self.subnet_dict(['allocation_pools']))
+
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_with_gw_and_allocation_pools(self):
+        self._create_verify_delete_subnet(**self.subnet_dict(
+            ['gateway', 'allocation_pools']))
+
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
+        self._create_verify_delete_subnet(
+            **self.subnet_dict(['host_routes', 'dns_nameservers']))
+
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_with_dhcp_enabled(self):
+        self._create_verify_delete_subnet(enable_dhcp=True)
+
+    @test.attr(type='smoke')
+    def test_update_subnet_gw_dns_host_routes_dhcp(self):
+        network = self.create_network()
+
+        subnet = self.create_subnet(
+            network, **self.subnet_dict(['gateway', 'host_routes',
+                                        'dns_nameservers',
+                                         'allocation_pools']))
+        subnet_id = subnet['id']
+        new_gateway = str(netaddr.IPAddress(
+                          self._subnet_data[self._ip_version]['gateway']) + 1)
+        # Verify subnet update
+        new_host_routes = self._subnet_data[self._ip_version][
+            'new_host_routes']
+
+        new_dns_nameservers = self._subnet_data[self._ip_version][
+            'new_dns_nameservers']
+        kwargs = {'host_routes': new_host_routes,
+                  'dns_nameservers': new_dns_nameservers,
+                  'gateway_ip': new_gateway, 'enable_dhcp': True}
+
+        new_name = "New_subnet"
+        _, body = self.client.update_subnet(subnet_id, name=new_name,
+                                            **kwargs)
+        updated_subnet = body['subnet']
+        kwargs['name'] = new_name
+        self.assertEqual(sorted(updated_subnet['dns_nameservers']),
+                         sorted(kwargs['dns_nameservers']))
+        del subnet['dns_nameservers'], kwargs['dns_nameservers']
+
+        self._compare_resource_attrs(updated_subnet, kwargs)
+
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_all_attributes(self):
+        self._create_verify_delete_subnet(
+            enable_dhcp=True,
+            **self.subnet_dict(['gateway', 'host_routes', 'dns_nameservers']))
 
 
 class NetworksTestXML(NetworksTestJSON):
@@ -376,51 +486,30 @@
         net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
         gateway = str(netaddr.IPAddress(net.first + 2))
         name = data_utils.rand_name('network-')
-        _, body = self.client.create_network(name=name)
-        network = body['network']
-        net_id = network['id']
+        network = self.create_network(network_name=name)
         subnet = self.create_subnet(network, gateway)
         # Verifies Subnet GW in IPv6
         self.assertEqual(subnet['gateway_ip'], gateway)
-        # Delete network and subnet
-        self.client.delete_network(net_id)
-        self.subnets.pop()
 
     @test.attr(type='smoke')
     def test_create_delete_subnet_without_gw(self):
         net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
         gateway_ip = str(netaddr.IPAddress(net.first + 1))
         name = data_utils.rand_name('network-')
-        _, body = self.client.create_network(name=name)
-        network = body['network']
-        net_id = network['id']
+        network = self.create_network(network_name=name)
         subnet = self.create_subnet(network)
         # Verifies Subnet GW in IPv6
         self.assertEqual(subnet['gateway_ip'], gateway_ip)
-        # Delete network and subnet
-        _, body = self.client.delete_network(net_id)
-        self.subnets.pop()
 
     @testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
                           "IPv6 extended attributes for subnets not "
                           "available")
     @test.attr(type='smoke')
     def test_create_delete_subnet_with_v6_attributes(self):
-        name = data_utils.rand_name('network-')
-        _, body = self.client.create_network(name=name)
-        network = body['network']
-        net_id = network['id']
-        subnet = self.create_subnet(network,
-                                    gateway='fe80::1',
-                                    ipv6_ra_mode='slaac',
-                                    ipv6_address_mode='slaac')
-        # Verifies Subnet GW in IPv6
-        self.assertEqual(subnet['gateway_ip'], 'fe80::1')
-        self.assertEqual(subnet['ipv6_ra_mode'], 'slaac')
-        self.assertEqual(subnet['ipv6_address_mode'], 'slaac')
-        # Delete network and subnet
-        self.client.delete_network(net_id)
-        self.subnets.pop()
+        self._create_verify_delete_subnet(
+            gateway=self._subnet_data[self._ip_version]['gateway'],
+            ipv6_ra_mode='slaac',
+            ipv6_address_mode='slaac')
 
 
 class NetworksIpV6TestXML(NetworksIpV6TestJSON):
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index ce0bb57..cdd3a29 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -128,7 +128,6 @@
         for port in ports:
             self.assertEqual(sorted(fields), sorted(port.keys()))
 
-    @test.skip_because(bug="1364166")
     @test.attr(type='smoke')
     def test_update_port_with_second_ip(self):
         # Create a network with two subnets
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 9764b4d..58ad39c 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -17,11 +17,15 @@
 
 from tempest.api.network import base_security_groups as base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import test
 
+CONF = config.CONF
+
 
 class SecGroupTest(base.BaseSecGroupTest):
     _interface = 'json'
+    _tenant_network_cidr = CONF.network.tenant_network_cidr
 
     @classmethod
     def resource_setup(cls):
@@ -30,6 +34,40 @@
             msg = "security-group extension not enabled."
             raise cls.skipException(msg)
 
+    def _create_verify_security_group_rule(self, sg_id, direction,
+                                           ethertype, protocol,
+                                           port_range_min,
+                                           port_range_max,
+                                           remote_group_id=None,
+                                           remote_ip_prefix=None):
+        # Create Security Group rule with the input params and validate
+        # that SG rule is created with the same parameters.
+        resp, rule_create_body = self.client.create_security_group_rule(
+            security_group_id=sg_id,
+            direction=direction,
+            ethertype=ethertype,
+            protocol=protocol,
+            port_range_min=port_range_min,
+            port_range_max=port_range_max,
+            remote_group_id=remote_group_id,
+            remote_ip_prefix=remote_ip_prefix
+        )
+
+        sec_group_rule = rule_create_body['security_group_rule']
+        self.addCleanup(self._delete_security_group_rule,
+                        sec_group_rule['id'])
+
+        expected = {'direction': direction, 'protocol': protocol,
+                    'ethertype': ethertype, 'port_range_min': port_range_min,
+                    'port_range_max': port_range_max,
+                    'remote_group_id': remote_group_id,
+                    'remote_ip_prefix': remote_ip_prefix}
+        for key, value in six.iteritems(expected):
+            self.assertEqual(value, sec_group_rule[key],
+                             "Field %s of the created security group "
+                             "rule does not match with %s." %
+                             (key, value))
+
     @test.attr(type='smoke')
     def test_list_security_groups(self):
         # Verify the that security group belonging to tenant exist in list
@@ -80,7 +118,8 @@
             _, rule_create_body = self.client.create_security_group_rule(
                 security_group_id=group_create_body['security_group']['id'],
                 protocol=protocol,
-                direction='ingress'
+                direction='ingress',
+                ethertype=self.ethertype
             )
 
             # Show details of the created security rule
@@ -102,30 +141,93 @@
 
     @test.attr(type='smoke')
     def test_create_security_group_rule_with_additional_args(self):
-        # Verify creating security group rule with the following
-        # arguments works: "protocol": "tcp", "port_range_max": 77,
-        # "port_range_min": 77, "direction":"ingress".
-        group_create_body, _ = self._create_security_group()
+        """Verify security group rule with additional arguments works.
 
+        direction:ingress, ethertype:[IPv4/IPv6],
+        protocol:tcp, port_range_min:77, port_range_max:77
+        """
+        group_create_body, _ = self._create_security_group()
+        sg_id = group_create_body['security_group']['id']
         direction = 'ingress'
         protocol = 'tcp'
         port_range_min = 77
         port_range_max = 77
-        _, rule_create_body = self.client.create_security_group_rule(
-            security_group_id=group_create_body['security_group']['id'],
-            direction=direction,
-            protocol=protocol,
-            port_range_min=port_range_min,
-            port_range_max=port_range_max
-        )
+        self._create_verify_security_group_rule(sg_id, direction,
+                                                self.ethertype, protocol,
+                                                port_range_min,
+                                                port_range_max)
 
-        sec_group_rule = rule_create_body['security_group_rule']
+    @test.attr(type='smoke')
+    def test_create_security_group_rule_with_icmp_type_code(self):
+        """Verify security group rule for icmp protocol works.
 
-        self.assertEqual(sec_group_rule['direction'], direction)
-        self.assertEqual(sec_group_rule['protocol'], protocol)
-        self.assertEqual(int(sec_group_rule['port_range_min']), port_range_min)
-        self.assertEqual(int(sec_group_rule['port_range_max']), port_range_max)
+        Specify icmp type (port_range_min) and icmp code
+        (port_range_max) with different values. A seperate testcase
+        is added for icmp protocol as icmp validation would be
+        different from tcp/udp.
+        """
+        group_create_body, _ = self._create_security_group()
+
+        sg_id = group_create_body['security_group']['id']
+        direction = 'ingress'
+        protocol = 'icmp'
+        icmp_type_codes = [(3, 2), (2, 3), (3, 0), (2, None)]
+        for icmp_type, icmp_code in icmp_type_codes:
+            self._create_verify_security_group_rule(sg_id, direction,
+                                                    self.ethertype, protocol,
+                                                    icmp_type, icmp_code)
+
+    @test.attr(type='smoke')
+    def test_create_security_group_rule_with_remote_group_id(self):
+        # Verify creating security group rule with remote_group_id works
+        sg1_body, _ = self._create_security_group()
+        sg2_body, _ = self._create_security_group()
+
+        sg_id = sg1_body['security_group']['id']
+        direction = 'ingress'
+        protocol = 'udp'
+        port_range_min = 50
+        port_range_max = 55
+        remote_id = sg2_body['security_group']['id']
+        self._create_verify_security_group_rule(sg_id, direction,
+                                                self.ethertype, protocol,
+                                                port_range_min,
+                                                port_range_max,
+                                                remote_group_id=remote_id)
+
+    @test.attr(type='smoke')
+    def test_create_security_group_rule_with_remote_ip_prefix(self):
+        # Verify creating security group rule with remote_ip_prefix works
+        sg1_body, _ = self._create_security_group()
+
+        sg_id = sg1_body['security_group']['id']
+        direction = 'ingress'
+        protocol = 'tcp'
+        port_range_min = 76
+        port_range_max = 77
+        ip_prefix = self._tenant_network_cidr
+        self._create_verify_security_group_rule(sg_id, direction,
+                                                self.ethertype, protocol,
+                                                port_range_min,
+                                                port_range_max,
+                                                remote_ip_prefix=ip_prefix)
 
 
 class SecGroupTestXML(SecGroupTest):
     _interface = 'xml'
+
+
+class SecGroupIPv6Test(SecGroupTest):
+    _ip_version = 6
+    _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+
+    @classmethod
+    def setUpClass(cls):
+        if not CONF.network_feature_enabled.ipv6:
+            skip_msg = "IPv6 Tests are disabled."
+            raise cls.skipException(skip_msg)
+        super(SecGroupIPv6Test, cls).setUpClass()
+
+
+class SecGroupIPv6TestXML(SecGroupIPv6Test):
+    _interface = 'xml'
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index 9c6c267..2e3091e 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -16,12 +16,16 @@
 import uuid
 
 from tempest.api.network import base_security_groups as base
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class NegativeSecGroupTest(base.BaseSecGroupTest):
     _interface = 'json'
+    _tenant_network_cidr = CONF.network.tenant_network_cidr
 
     @classmethod
     def resource_setup(cls):
@@ -60,23 +64,87 @@
         self.assertRaises(
             exceptions.BadRequest, self.client.create_security_group_rule,
             security_group_id=group_create_body['security_group']['id'],
-            protocol=pname, direction='ingress')
+            protocol=pname, direction='ingress', ethertype=self.ethertype)
+
+    @test.attr(type=['negative', 'gate'])
+    def test_create_security_group_rule_with_bad_remote_ip_prefix(self):
+        group_create_body, _ = self._create_security_group()
+
+        # Create rule with bad remote_ip_prefix
+        prefix = ['192.168.1./24', '192.168.1.1/33', 'bad_prefix', '256']
+        for remote_ip_prefix in prefix:
+            self.assertRaises(
+                exceptions.BadRequest, self.client.create_security_group_rule,
+                security_group_id=group_create_body['security_group']['id'],
+                protocol='tcp', direction='ingress', ethertype=self.ethertype,
+                remote_ip_prefix=remote_ip_prefix)
+
+    @test.attr(type=['negative', 'gate'])
+    def test_create_security_group_rule_with_non_existent_remote_groupid(self):
+        group_create_body, _ = self._create_security_group()
+        non_exist_id = str(uuid.uuid4())
+
+        # Create rule with non existent remote_group_id
+        group_ids = ['bad_group_id', non_exist_id]
+        for remote_group_id in group_ids:
+            self.assertRaises(
+                exceptions.NotFound, self.client.create_security_group_rule,
+                security_group_id=group_create_body['security_group']['id'],
+                protocol='tcp', direction='ingress', ethertype=self.ethertype,
+                remote_group_id=remote_group_id)
+
+    @test.attr(type=['negative', 'gate'])
+    def test_create_security_group_rule_with_remote_ip_and_group(self):
+        sg1_body, _ = self._create_security_group()
+        sg2_body, _ = self._create_security_group()
+
+        # Create rule specifying both remote_ip_prefix and remote_group_id
+        prefix = self._tenant_network_cidr
+        self.assertRaises(
+            exceptions.BadRequest, self.client.create_security_group_rule,
+            security_group_id=sg1_body['security_group']['id'],
+            protocol='tcp', direction='ingress',
+            ethertype=self.ethertype, remote_ip_prefix=prefix,
+            remote_group_id=sg2_body['security_group']['id'])
+
+    @test.attr(type=['negative', 'gate'])
+    def test_create_security_group_rule_with_bad_ethertype(self):
+        group_create_body, _ = self._create_security_group()
+
+        # Create rule with bad ethertype
+        ethertype = 'bad_ethertype'
+        self.assertRaises(
+            exceptions.BadRequest, self.client.create_security_group_rule,
+            security_group_id=group_create_body['security_group']['id'],
+            protocol='udp', direction='ingress', ethertype=ethertype)
 
     @test.attr(type=['negative', 'gate'])
     def test_create_security_group_rule_with_invalid_ports(self):
         group_create_body, _ = self._create_security_group()
 
-        # Create rule with invalid ports
+        # Create rule for tcp protocol with invalid ports
         states = [(-16, 80, 'Invalid value for port -16'),
                   (80, 79, 'port_range_min must be <= port_range_max'),
                   (80, 65536, 'Invalid value for port 65536'),
+                  (None, 6, 'port_range_min must be <= port_range_max'),
                   (-16, 65536, 'Invalid value for port')]
         for pmin, pmax, msg in states:
             ex = self.assertRaises(
                 exceptions.BadRequest, self.client.create_security_group_rule,
                 security_group_id=group_create_body['security_group']['id'],
                 protocol='tcp', port_range_min=pmin, port_range_max=pmax,
-                direction='ingress')
+                direction='ingress', ethertype=self.ethertype)
+            self.assertIn(msg, str(ex))
+
+        # Create rule for icmp protocol with invalid ports
+        states = [(1, 256, 'Invalid value for ICMP code'),
+                  (300, 1, 'Invalid value for ICMP type')]
+        for pmin, pmax, msg in states:
+            ex = self.assertRaises(
+                exceptions.BadRequest, self.client.create_security_group_rule,
+                security_group_id=group_create_body['security_group']['id'],
+                protocol='icmp', port_range_min=pmin, port_range_max=pmax,
+                direction='ingress', ethertype=self.ethertype)
             self.assertIn(msg, str(ex))
 
     @test.attr(type=['negative', 'smoke'])
@@ -88,14 +156,54 @@
                           name=name)
 
     @test.attr(type=['negative', 'smoke'])
+    def test_create_duplicate_security_group_rule_fails(self):
+        # Create duplicate security group rule, it should fail.
+        body, _ = self._create_security_group()
+
+        min_port = 66
+        max_port = 67
+        # Create a rule with valid params
+        resp, _ = self.client.create_security_group_rule(
+            security_group_id=body['security_group']['id'],
+            direction='ingress',
+            ethertype=self.ethertype,
+            protocol='tcp',
+            port_range_min=min_port,
+            port_range_max=max_port
+        )
+
+        # Try creating the same security group rule, it should fail
+        self.assertRaises(
+            exceptions.Conflict, self.client.create_security_group_rule,
+            security_group_id=body['security_group']['id'],
+            protocol='tcp', direction='ingress', ethertype=self.ethertype,
+            port_range_min=min_port, port_range_max=max_port)
+
+    @test.attr(type=['negative', 'smoke'])
     def test_create_security_group_rule_with_non_existent_security_group(self):
         # Create security group rules with not existing security group.
         non_existent_sg = str(uuid.uuid4())
         self.assertRaises(exceptions.NotFound,
                           self.client.create_security_group_rule,
                           security_group_id=non_existent_sg,
-                          direction='ingress')
+                          direction='ingress', ethertype=self.ethertype)
 
 
 class NegativeSecGroupTestXML(NegativeSecGroupTest):
     _interface = 'xml'
+
+
+class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
+    _ip_version = 6
+    _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+
+    @classmethod
+    def setUpClass(cls):
+        if not CONF.network_feature_enabled.ipv6:
+            skip_msg = "IPv6 Tests are disabled."
+            raise cls.skipException(skip_msg)
+        super(NegativeSecGroupIPv6Test, cls).setUpClass()
+
+
+class NegativeSecGroupIPv6TestXML(NegativeSecGroupIPv6Test):
+    _interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index a143659..6a5fd3d 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -28,9 +28,9 @@
 class BaseObjectTest(tempest.test.BaseTestCase):
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         cls.set_network_resources()
-        super(BaseObjectTest, cls).setUpClass()
+        super(BaseObjectTest, cls).resource_setup()
         if not CONF.service_available.swift:
             skip_msg = ("%s skipped as swift is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
@@ -72,10 +72,10 @@
         cls.data = SwiftDataGenerator(cls.identity_admin_client)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.data.teardown_all()
         cls.isolated_creds.clear_isolated_creds()
-        super(BaseObjectTest, cls).tearDownClass()
+        super(BaseObjectTest, cls).resource_cleanup()
 
     @classmethod
     def delete_containers(cls, containers, container_client=None,
diff --git a/tempest/api/object_storage/test_account_bulk.py b/tempest/api/object_storage/test_account_bulk.py
index a94c883..743f1aa 100644
--- a/tempest/api/object_storage/test_account_bulk.py
+++ b/tempest/api/object_storage/test_account_bulk.py
@@ -50,16 +50,27 @@
 
         return tarpath.name, container_name, object_name
 
-    @test.attr(type='gate')
-    def test_extract_archive(self):
-        # Test bulk operation of file upload with an archived file
-        filepath, container_name, object_name = self._create_archive()
-
+    def _upload_archive(self, filepath):
+        # upload an archived file
         params = {'extract-archive': 'tar'}
         with open(filepath) as fh:
             mydata = fh.read()
             resp, body = self.account_client.create_account(data=mydata,
                                                             params=params)
+        return resp, body
+
+    def _check_contents_deleted(self, container_name):
+        param = {'format': 'txt'}
+        resp, body = self.account_client.list_account_containers(param)
+        self.assertHeaders(resp, 'Account', 'GET')
+        self.assertNotIn(container_name, body)
+
+    @test.attr(type='gate')
+    @test.requires_ext(extension='bulk', service='object')
+    def test_extract_archive(self):
+        # Test bulk operation of file upload with an archived file
+        filepath, container_name, object_name = self._create_archive()
+        resp, _ = self._upload_archive(filepath)
 
         self.containers.append(container_name)
 
@@ -95,23 +106,17 @@
         self.assertIn(object_name, [c['name'] for c in contents_list])
 
     @test.attr(type='gate')
+    @test.requires_ext(extension='bulk', service='object')
     def test_bulk_delete(self):
         # Test bulk operation of deleting multiple files
         filepath, container_name, object_name = self._create_archive()
-
-        params = {'extract-archive': 'tar'}
-        with open(filepath) as fh:
-            mydata = fh.read()
-            resp, body = self.account_client.create_account(data=mydata,
-                                                            params=params)
+        self._upload_archive(filepath)
 
         data = '%s/%s\n%s' % (container_name, object_name, container_name)
         params = {'bulk-delete': ''}
         resp, body = self.account_client.delete_account(data=data,
                                                         params=params)
 
-        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
-
         # When deleting multiple files using the bulk operation, the response
         # does not contain 'content-length' header. This is the special case,
         # therefore the existence of response headers is checked without
@@ -124,11 +129,33 @@
         # Check only the format of common headers with custom matcher
         self.assertThat(resp, custom_matchers.AreAllWellFormatted())
 
-        # Check if a container is deleted
-        param = {'format': 'txt'}
-        resp, body = self.account_client.list_account_containers(param)
+        # Check if uploaded contents are completely deleted
+        self._check_contents_deleted(container_name)
 
-        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
-        self.assertHeaders(resp, 'Account', 'GET')
+    @test.attr(type='gate')
+    @test.requires_ext(extension='bulk', service='object')
+    def test_bulk_delete_by_POST(self):
+        # Test bulk operation of deleting multiple files
+        filepath, container_name, object_name = self._create_archive()
+        self._upload_archive(filepath)
 
-        self.assertNotIn(container_name, body)
+        data = '%s/%s\n%s' % (container_name, object_name, container_name)
+        params = {'bulk-delete': ''}
+
+        resp, body = self.account_client.create_account_metadata(
+            {}, data=data, params=params)
+
+        # When deleting multiple files using the bulk operation, the response
+        # does not contain 'content-length' header. This is the special case,
+        # therefore the existence of response headers is checked without
+        # custom matcher.
+        self.assertIn('transfer-encoding', resp)
+        self.assertIn('content-type', resp)
+        self.assertIn('x-trans-id', resp)
+        self.assertIn('date', resp)
+
+        # Check only the format of common headers with custom matcher
+        self.assertThat(resp, custom_matchers.AreAllWellFormatted())
+
+        # Check if uploaded contents are completely deleted
+        self._check_contents_deleted(container_name)
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index c1eb897..97e9195 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -26,9 +26,8 @@
 class AccountQuotasTest(base.BaseObjectTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(AccountQuotasTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(AccountQuotasTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name="TestContainer")
         cls.container_client.create_container(cls.container_name)
 
@@ -71,10 +70,10 @@
         super(AccountQuotasTest, self).tearDown()
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        super(AccountQuotasTest, cls).tearDownClass()
+        super(AccountQuotasTest, cls).resource_cleanup()
 
     @test.attr(type="smoke")
     @test.requires_ext(extension='account_quotas', service='object')
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 7324c2e..6c1fb5a 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -27,9 +27,8 @@
 class AccountQuotasNegativeTest(base.BaseObjectTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(AccountQuotasNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(AccountQuotasNegativeTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name="TestContainer")
         cls.container_client.create_container(cls.container_name)
 
@@ -71,10 +70,10 @@
         super(AccountQuotasNegativeTest, self).tearDown()
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        super(AccountQuotasNegativeTest, cls).tearDownClass()
+        super(AccountQuotasNegativeTest, cls).resource_cleanup()
 
     @test.attr(type=["negative", "smoke"])
     @test.requires_ext(extension='account_quotas', service='object')
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 69cba1e..a0436ee 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -32,9 +32,8 @@
     containers = []
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(AccountTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(AccountTest, cls).resource_setup()
         for i in moves.xrange(ord('a'), ord('f') + 1):
             name = data_utils.rand_name(name='%s-' % chr(i))
             cls.container_client.create_container(name)
@@ -42,9 +41,9 @@
         cls.containers_count = len(cls.containers)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_containers(cls.containers)
-        super(AccountTest, cls).tearDownClass()
+        super(AccountTest, cls).resource_cleanup()
 
     @test.attr(type='smoke')
     def test_list_containers(self):
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index a7d45be..e816a9f 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -21,8 +21,8 @@
 
 class ObjectTestACLs(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ObjectTestACLs, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectTestACLs, cls).resource_setup()
         cls.data.setup_test_user()
         test_os = clients.Manager(cls.data.test_credentials)
         cls.test_auth_data = test_os.auth_provider.auth_data
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 1a21ecc..9b49db3 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -23,8 +23,8 @@
 
 class ObjectACLsNegativeTest(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ObjectACLsNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectACLsNegativeTest, cls).resource_setup()
         cls.data.setup_test_user()
         test_os = clients.Manager(cls.data.test_credentials)
         cls.test_auth_data = test_os.auth_provider.auth_data
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 28bde24..966a08d 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -23,9 +23,8 @@
 class StaticWebTest(base.BaseObjectTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(StaticWebTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(StaticWebTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name="TestContainer")
 
         # This header should be posted on the container before every test
@@ -45,10 +44,10 @@
             metadata_prefix="X-Container-")
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        super(StaticWebTest, cls).tearDownClass()
+        super(StaticWebTest, cls).resource_cleanup()
 
     @test.requires_ext(extension='staticweb', service='object')
     @test.attr('gate')
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 3e6d58c..aebcb5c 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -35,9 +35,8 @@
     clients = {}
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ContainerSyncTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ContainerSyncTest, cls).resource_setup()
         cls.containers = []
         cls.objects = []
 
@@ -62,10 +61,10 @@
             cls.containers.append(cont_name)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for client in cls.clients.values():
             cls.delete_containers(cls.containers, client[0], client[1])
-        super(ContainerSyncTest, cls).tearDownClass()
+        super(ContainerSyncTest, cls).resource_cleanup()
 
     @test.attr(type='slow')
     @test.skip_because(bug='1317133')
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index ad7e068..f6d1fb9 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -22,8 +22,8 @@
 class CrossdomainTest(base.BaseObjectTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(CrossdomainTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(CrossdomainTest, cls).resource_setup()
 
         cls.xml_start = '<?xml version="1.0"?>\n' \
                         '<!DOCTYPE cross-domain-policy SYSTEM ' \
diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
index e27c7ef..a1138e6 100644
--- a/tempest/api/object_storage/test_healthcheck.py
+++ b/tempest/api/object_storage/test_healthcheck.py
@@ -23,8 +23,8 @@
 class HealthcheckTest(base.BaseObjectTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(HealthcheckTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(HealthcheckTest, cls).resource_setup()
 
     def setUp(self):
         super(HealthcheckTest, self).setUp()
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index 73b4f3b..8cec2fc 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -23,8 +23,8 @@
 
 class ObjectExpiryTest(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ObjectExpiryTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectExpiryTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.container_client.create_container(cls.container_name)
 
@@ -36,9 +36,9 @@
                                                    self.object_name, '')
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_containers([cls.container_name])
-        super(ObjectExpiryTest, cls).tearDownClass()
+        super(ObjectExpiryTest, cls).resource_cleanup()
 
     def _test_object_expiry(self, metadata):
         # update object metadata
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index a0fb708..05c8ff2 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -30,9 +30,8 @@
     containers = []
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ObjectFormPostTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectFormPostTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.object_name = data_utils.rand_name(name='ObjectTemp')
 
@@ -56,10 +55,10 @@
             self.key)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
         cls.delete_containers(cls.containers)
-        super(ObjectFormPostTest, cls).tearDownClass()
+        super(ObjectFormPostTest, cls).resource_cleanup()
 
     def get_multipart_form(self, expires=600):
         path = "%s/%s/%s" % (
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index 103bc8e..32f5917 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -30,9 +30,8 @@
     containers = []
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ObjectFormPostNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectFormPostNegativeTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.object_name = data_utils.rand_name(name='ObjectTemp')
 
@@ -56,10 +55,10 @@
             self.key)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
         cls.delete_containers(cls.containers)
-        super(ObjectFormPostNegativeTest, cls).tearDownClass()
+        super(ObjectFormPostNegativeTest, cls).resource_cleanup()
 
     def get_multipart_form(self, expires=600):
         path = "%s/%s/%s" % (
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 8b74b7e..56ab1fb 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -30,16 +30,16 @@
 
 class ObjectTest(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ObjectTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.container_client.create_container(cls.container_name)
         cls.containers = [cls.container_name]
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_containers(cls.containers)
-        super(ObjectTest, cls).tearDownClass()
+        super(ObjectTest, cls).resource_cleanup()
 
     def _create_object(self, metadata=None):
         # setup object
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index f5ebce7..e70bd9a 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -30,8 +30,8 @@
 class ObjectTempUrlTest(base.BaseObjectTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(ObjectTempUrlTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectTempUrlTest, cls).resource_setup()
         # create a container
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.container_client.create_container(cls.container_name)
@@ -52,14 +52,14 @@
                                         cls.object_name, cls.content)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for metadata in cls.metadatas:
             cls.account_client.delete_account_metadata(
                 metadata=metadata)
 
         cls.delete_containers(cls.containers)
 
-        super(ObjectTempUrlTest, cls).tearDownClass()
+        super(ObjectTempUrlTest, cls).resource_cleanup()
 
     def setUp(self):
         super(ObjectTempUrlTest, self).setUp()
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 28173fe..b752348 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -31,9 +31,8 @@
     containers = []
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ObjectTempUrlNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectTempUrlNegativeTest, cls).resource_setup()
 
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.container_client.create_container(cls.container_name)
@@ -47,13 +46,13 @@
             cls.account_client.list_account_metadata()
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         resp, _ = cls.account_client.delete_account_metadata(
             metadata=cls.metadata)
 
         cls.delete_containers(cls.containers)
 
-        super(ObjectTempUrlNegativeTest, cls).tearDownClass()
+        super(ObjectTempUrlNegativeTest, cls).resource_cleanup()
 
     def setUp(self):
         super(ObjectTempUrlNegativeTest, self).setUp()
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index 971449d..5fe4fc8 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -25,14 +25,14 @@
 
 class ContainerTest(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ContainerTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ContainerTest, cls).resource_setup()
         cls.containers = []
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_containers(cls.containers)
-        super(ContainerTest, cls).tearDownClass()
+        super(ContainerTest, cls).resource_cleanup()
 
     def assertContainer(self, container, count, byte, versioned):
         resp, _ = self.container_client.list_container_metadata(container)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 0b22de5..5a586fc 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -30,8 +30,8 @@
     """Base test case class for all Orchestration API tests."""
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseOrchestrationTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseOrchestrationTest, cls).resource_setup()
         cls.os = clients.Manager()
         if not CONF.service_available.heat:
             raise cls.skipException("Heat support is required")
@@ -146,11 +146,11 @@
             return yaml.safe_load(f)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls._clear_stacks()
         cls._clear_keypairs()
         cls._clear_images()
-        super(BaseOrchestrationTest, cls).tearDownClass()
+        super(BaseOrchestrationTest, cls).resource_cleanup()
 
     @staticmethod
     def stack_output(stack, output_key):
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index ffadb16..f1a4f85 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -30,9 +30,8 @@
 class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(NeutronResourcesTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(NeutronResourcesTestJSON, cls).resource_setup()
         if not CONF.orchestration.image_ref:
             raise cls.skipException("No image available to test")
         os = clients.Manager()
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 72ad5f5..759cbbe 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -25,8 +25,8 @@
 class StacksTestJSON(base.BaseOrchestrationTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(StacksTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(StacksTestJSON, cls).resource_setup()
         cls.stack_name = data_utils.rand_name('heat')
         template = cls.read_template('non_empty_stack')
         image_id = (CONF.orchestration.image_ref or
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 2f58611..1da340c 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -27,8 +27,8 @@
     _type = 'type'
 
     @classmethod
-    def setUpClass(cls):
-        super(NovaKeyPairResourcesYAMLTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(NovaKeyPairResourcesYAMLTest, cls).resource_setup()
         cls.stack_name = data_utils.rand_name('heat')
         template = cls.read_template('nova_keypair', ext=cls._tpl_type)
 
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index 8023f2c..d7fbd65 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -23,8 +23,8 @@
     empty_template = "HeatTemplateFormatVersion: '2012-12-12'\n"
 
     @classmethod
-    def setUpClass(cls):
-        super(StacksTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(StacksTestJSON, cls).resource_setup()
 
     @test.attr(type='smoke')
     def test_stack_list_responds(self):
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index d7c2a0d..307468e 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -26,9 +26,8 @@
 
 class SwiftResourcesTestJSON(base.BaseOrchestrationTest):
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(SwiftResourcesTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(SwiftResourcesTestJSON, cls).resource_setup()
         cls.stack_name = data_utils.rand_name('heat')
         template = cls.read_template('swift_basic')
         os = clients.Manager()
diff --git a/tempest/api/orchestration/stacks/test_templates.py b/tempest/api/orchestration/stacks/test_templates.py
index 0d6060d..262c576 100644
--- a/tempest/api/orchestration/stacks/test_templates.py
+++ b/tempest/api/orchestration/stacks/test_templates.py
@@ -26,9 +26,8 @@
 """
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(TemplateYAMLTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(TemplateYAMLTestJSON, cls).resource_setup()
         cls.stack_name = data_utils.rand_name('heat')
         cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
         cls.client.wait_for_stack_status(cls.stack_identifier,
diff --git a/tempest/api/orchestration/stacks/test_templates_negative.py b/tempest/api/orchestration/stacks/test_templates_negative.py
index b325104..9082107 100644
--- a/tempest/api/orchestration/stacks/test_templates_negative.py
+++ b/tempest/api/orchestration/stacks/test_templates_negative.py
@@ -30,8 +30,8 @@
     invalid_template_url = 'http://www.example.com/template.yaml'
 
     @classmethod
-    def setUpClass(cls):
-        super(TemplateYAMLNegativeTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(TemplateYAMLNegativeTestJSON, cls).resource_setup()
         cls.parameters = {}
 
     @test.attr(type=['gate', 'negative'])
diff --git a/tempest/api/orchestration/stacks/test_update.py b/tempest/api/orchestration/stacks/test_update.py
deleted file mode 100644
index 98761ac..0000000
--- a/tempest/api/orchestration/stacks/test_update.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import logging
-
-from tempest.api.orchestration import base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-LOG = logging.getLogger(__name__)
-
-
-class UpdateStackTestJSON(base.BaseOrchestrationTest):
-    _interface = 'json'
-
-    template = '''
-heat_template_version: 2013-05-23
-resources:
-  random1:
-    type: OS::Heat::RandomString
-'''
-    update_template = '''
-heat_template_version: 2013-05-23
-resources:
-  random1:
-    type: OS::Heat::RandomString
-  random2:
-    type: OS::Heat::RandomString
-'''
-
-    def update_stack(self, stack_identifier, template):
-        stack_name = stack_identifier.split('/')[0]
-        self.client.update_stack(
-            stack_identifier=stack_identifier,
-            name=stack_name,
-            template=template)
-        self.client.wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
-
-    @test.attr(type='gate')
-    def test_stack_update_nochange(self):
-        stack_name = data_utils.rand_name('heat')
-        stack_identifier = self.create_stack(stack_name, self.template)
-        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
-        expected_resources = {'random1': 'OS::Heat::RandomString'}
-        self.assertEqual(expected_resources,
-                         self.list_resources(stack_identifier))
-
-        # Update with no changes, resources should be unchanged
-        self.update_stack(stack_identifier, self.template)
-        self.assertEqual(expected_resources,
-                         self.list_resources(stack_identifier))
-
-    @test.attr(type='gate')
-    def test_stack_update_add_remove(self):
-        stack_name = data_utils.rand_name('heat')
-        stack_identifier = self.create_stack(stack_name, self.template)
-        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
-        initial_resources = {'random1': 'OS::Heat::RandomString'}
-        self.assertEqual(initial_resources,
-                         self.list_resources(stack_identifier))
-
-        # Add one resource via a stack update
-        self.update_stack(stack_identifier, self.update_template)
-        updated_resources = {'random1': 'OS::Heat::RandomString',
-                             'random2': 'OS::Heat::RandomString'}
-        self.assertEqual(updated_resources,
-                         self.list_resources(stack_identifier))
-
-        # Then remove it by updating with the original template
-        self.update_stack(stack_identifier, self.template)
-        self.assertEqual(initial_resources,
-                         self.list_resources(stack_identifier))
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index f371370..f47078c 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -26,8 +26,8 @@
 class CinderResourcesTest(base.BaseOrchestrationTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(CinderResourcesTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(CinderResourcesTest, cls).resource_setup()
         if not CONF.service_available.cinder:
             raise cls.skipException('Cinder support is required')
 
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 8c2f37b..769c201 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -26,11 +26,11 @@
     """Base test case class for all Telemetry API tests."""
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if not CONF.service_available.ceilometer:
             raise cls.skipException("Ceilometer support is required")
         cls.set_network_resources()
-        super(BaseTelemetryTest, cls).setUpClass()
+        super(BaseTelemetryTest, cls).resource_setup()
         os = cls.get_client_manager()
         cls.telemetry_client = os.telemetry_client
         cls.servers_client = os.servers_client
@@ -84,12 +84,12 @@
                 pass
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
         cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
         cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids)
         cls.clear_isolated_creds()
-        super(BaseTelemetryTest, cls).tearDownClass()
+        super(BaseTelemetryTest, cls).resource_cleanup()
 
     def await_samples(self, metric, query):
         """
diff --git a/tempest/api/telemetry/test_telemetry_alarming_api.py b/tempest/api/telemetry/test_telemetry_alarming_api.py
index 95758e8..b45d545 100644
--- a/tempest/api/telemetry/test_telemetry_alarming_api.py
+++ b/tempest/api/telemetry/test_telemetry_alarming_api.py
@@ -20,8 +20,8 @@
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
-        super(TelemetryAlarmingAPITestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(TelemetryAlarmingAPITestJSON, cls).resource_setup()
         cls.rule = {'meter_name': 'cpu_util',
                     'comparison_operator': 'gt',
                     'threshold': 80.0,
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 9b15c51..3782b70 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -23,11 +23,11 @@
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if CONF.telemetry.too_slow_to_test:
             raise cls.skipException("Ceilometer feature for fast work mysql "
                                     "is disabled")
-        super(TelemetryNotificationAPITestJSON, cls).setUpClass()
+        super(TelemetryNotificationAPITestJSON, cls).resource_setup()
 
     @test.attr(type="gate")
     @testtools.skipIf(not CONF.service_available.nova,
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 769f5e0..db2aab5 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -25,9 +25,8 @@
     _interface = "json"
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumeMultiBackendTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumeMultiBackendTest, cls).resource_setup()
         if not CONF.volume_feature_enabled.multi_backend:
             raise cls.skipException("Cinder multi-backend feature disabled")
 
@@ -76,7 +75,7 @@
             self.volume['id'], 'available')
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         # volumes deletion
         vid_prefix = getattr(cls, 'volume_id_list_with_prefix', [])
         for volume_id in vid_prefix:
@@ -93,7 +92,7 @@
         for volume_type_id in volume_type_id_list:
             cls.client.delete_volume_type(volume_type_id)
 
-        super(VolumeMultiBackendTest, cls).tearDownClass()
+        super(VolumeMultiBackendTest, cls).resource_cleanup()
 
     @test.attr(type='smoke')
     def test_backend_name_reporting(self):
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index abbe1e9..720734b 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -22,9 +22,8 @@
     _interface = "json"
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(SnapshotsActionsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(SnapshotsActionsTest, cls).resource_setup()
         cls.client = cls.snapshots_client
 
         # Create admin volume client
@@ -46,7 +45,7 @@
                                             'available')
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         # Delete the test snapshot
         cls.client.delete_snapshot(cls.snapshot['id'])
         cls.client.wait_for_resource_deletion(cls.snapshot['id'])
@@ -55,7 +54,7 @@
         cls.volumes_client.delete_volume(cls.volume['id'])
         cls.volumes_client.wait_for_resource_deletion(cls.volume['id'])
 
-        super(SnapshotsActionsTest, cls).tearDownClass()
+        super(SnapshotsActionsTest, cls).resource_cleanup()
 
     def tearDown(self):
         # Set snapshot's status to available after test
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index fa3b667..7e24fa4 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -27,8 +27,8 @@
     force_tenant_isolation = True
 
     @classmethod
-    def setUpClass(cls):
-        super(VolumeQuotasAdminTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumeQuotasAdminTestJSON, cls).resource_setup()
         cls.admin_volume_client = cls.os_adm.volumes_client
         cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
 
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index 515024f..60a0adb 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -23,9 +23,8 @@
     force_tenant_isolation = True
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumeQuotasNegativeTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumeQuotasNegativeTestJSON, cls).resource_setup()
         demo_user = cls.isolated_creds.get_primary_creds()
         cls.demo_tenant_id = demo_user.tenant_id
         cls.shared_quota_set = {'gigabytes': 3, 'volumes': 1, 'snapshots': 1}
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
index 4a68e05..7820148 100644
--- a/tempest/api/volume/admin/test_volume_services.py
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -25,8 +25,8 @@
     _interface = "json"
 
     @classmethod
-    def setUpClass(cls):
-        super(VolumesServicesTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesServicesTestJSON, cls).resource_setup()
         cls.client = cls.os_adm.volume_services_client
         _, cls.services = cls.client.list_services()
         cls.host_name = cls.services[0]['host']
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index c682866..2d72dd2 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -22,15 +22,15 @@
     _interface = "json"
 
     @classmethod
-    def setUpClass(cls):
-        super(VolumeTypesExtraSpecsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumeTypesExtraSpecsTest, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type-')
         _, cls.volume_type = cls.client.create_volume_type(vol_type_name)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.client.delete_volume_type(cls.volume_type['id'])
-        super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
+        super(VolumeTypesExtraSpecsTest, cls).resource_cleanup()
 
     @test.attr(type='smoke')
     def test_volume_type_extra_specs_list(self):
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index ff4f113..f3eee00 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -25,8 +25,8 @@
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
-        super(ExtraSpecsNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ExtraSpecsNegativeTest, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type-')
         cls.extra_specs = {"spec1": "val1"}
         _, cls.volume_type = cls.client.create_volume_type(
@@ -34,9 +34,9 @@
             extra_specs=cls.extra_specs)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.client.delete_volume_type(cls.volume_type['id'])
-        super(ExtraSpecsNegativeTest, cls).tearDownClass()
+        super(ExtraSpecsNegativeTest, cls).resource_cleanup()
 
     @test.attr(type='gate')
     def test_update_no_body(self):
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index d6db1df..f85718b 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -22,9 +22,8 @@
     _interface = "json"
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesActionsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesActionsTest, cls).resource_setup()
         cls.client = cls.volumes_client
 
         # Create admin volume client
@@ -38,12 +37,12 @@
         cls.client.wait_for_volume_status(cls.volume['id'], 'available')
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         # Delete the test volume
         cls.client.delete_volume(cls.volume['id'])
         cls.client.wait_for_resource_deletion(cls.volume['id'])
 
-        super(VolumesActionsTest, cls).tearDownClass()
+        super(VolumesActionsTest, cls).resource_cleanup()
 
     def _reset_volume_status(self, volume_id, status):
         # Reset the volume status
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 3699e9c..8b90b07 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -27,9 +27,8 @@
     _interface = "json"
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesBackupsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesBackupsTest, cls).resource_setup()
 
         if not CONF.volume_feature_enabled.backup:
             raise cls.skipException("Cinder backup feature disabled")
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 43f48ff..7f5361d 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -32,9 +32,9 @@
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         cls.set_network_resources()
-        super(BaseVolumeTest, cls).setUpClass()
+        super(BaseVolumeTest, cls).resource_setup()
 
         if not CONF.service_available.cinder:
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
@@ -69,6 +69,7 @@
             if not CONF.volume_feature_enabled.api_v2:
                 msg = "Volume API v2 is disabled"
                 raise cls.skipException(msg)
+            cls.snapshots_client = cls.os.snapshots_v2_client
             cls.volumes_client = cls.os.volumes_v2_client
             cls.volumes_extension_client = cls.os.volumes_v2_extension_client
             cls.availability_zone_client = (
@@ -82,11 +83,11 @@
             raise exceptions.InvalidConfiguration(message=msg)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.clear_snapshots()
         cls.clear_volumes()
         cls.clear_isolated_creds()
-        super(BaseVolumeTest, cls).tearDownClass()
+        super(BaseVolumeTest, cls).resource_cleanup()
 
     @classmethod
     def create_volume(cls, size=1, **kwargs):
@@ -151,8 +152,8 @@
 class BaseVolumeAdminTest(BaseVolumeTest):
     """Base test case class for all Volume Admin API tests."""
     @classmethod
-    def setUpClass(cls):
-        super(BaseVolumeAdminTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseVolumeAdminTest, cls).resource_setup()
         cls.adm_user = CONF.identity.admin_username
         cls.adm_pass = CONF.identity.admin_password
         cls.adm_tenant = CONF.identity.admin_tenant_name
@@ -186,9 +187,9 @@
             cls.volume_qos_client = cls.os_adm.volume_qos_v2_client
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.clear_qos_specs()
-        super(BaseVolumeAdminTest, cls).tearDownClass()
+        super(BaseVolumeAdminTest, cls).resource_cleanup()
 
     @classmethod
     def create_test_qos_specs(cls, name=None, consumer=None, **kwargs):
diff --git a/tempest/api/volume/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
index c026f71..648bd8b 100644
--- a/tempest/api/volume/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -24,8 +24,8 @@
     """
 
     @classmethod
-    def setUpClass(cls):
-        super(AvailabilityZoneV2TestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(AvailabilityZoneV2TestJSON, cls).resource_setup()
         cls.client = cls.availability_zone_client
 
     @test.attr(type='gate')
diff --git a/tempest/api/volume/test_qos.py b/tempest/api/volume/test_qos.py
index 8b6ba49..a719b79 100644
--- a/tempest/api/volume/test_qos.py
+++ b/tempest/api/volume/test_qos.py
@@ -25,9 +25,8 @@
     """
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(QosSpecsV2TestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(QosSpecsV2TestJSON, cls).resource_setup()
         # Create admin qos client
         # Create a test shared qos-specs for tests
         cls.qos_name = utils.rand_name(cls.__name__ + '-QoS')
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index 94ba095..777d3de 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -17,13 +17,11 @@
 from tempest import test
 
 
-class SnapshotMetadataTest(base.BaseVolumeV1Test):
-    _interface = "json"
+class SnapshotV2MetadataTestJSON(base.BaseVolumeTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(SnapshotMetadataTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(SnapshotV2MetadataTestJSON, cls).resource_setup()
         cls.client = cls.snapshots_client
         # Create a volume
         cls.volume = cls.create_volume()
@@ -34,7 +32,7 @@
     def tearDown(self):
         # Update the metadata to {}
         self.client.update_snapshot_metadata(self.snapshot_id, {})
-        super(SnapshotMetadataTest, self).tearDown()
+        super(SnapshotV2MetadataTestJSON, self).tearDown()
 
     @test.attr(type='gate')
     def test_create_get_delete_snapshot_metadata(self):
@@ -100,5 +98,13 @@
         self.assertEqual(expect, body)
 
 
-class SnapshotMetadataTestXML(SnapshotMetadataTest):
+class SnapshotV2MetadataTestXML(SnapshotV2MetadataTestJSON):
+    _interface = "xml"
+
+
+class SnapshotV1MetadataTestJSON(SnapshotV2MetadataTestJSON):
+    _api_version = 1
+
+
+class SnapshotV1MetadataTestXML(SnapshotV1MetadataTestJSON):
     _interface = "xml"
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index ac760aa..2ec8667 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -22,9 +22,8 @@
 class VolumesV2MetadataTest(base.BaseVolumeTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesV2MetadataTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2MetadataTest, cls).resource_setup()
         # Create a volume
         cls.volume = cls.create_volume()
         cls.volume_id = cls.volume['id']
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 4a6ba03..90ac9c1 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -26,8 +26,8 @@
 class VolumesV2TransfersTest(base.BaseVolumeTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(VolumesV2TransfersTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2TransfersTest, cls).resource_setup()
 
         # Add another tenant to test volume-transfer
         if CONF.compute.allow_tenant_isolation:
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index c87878d..a9bc70a 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -24,9 +24,8 @@
 class VolumesV2ActionsTest(base.BaseVolumeTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesV2ActionsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2ActionsTest, cls).resource_setup()
         cls.client = cls.volumes_client
         cls.image_client = cls.os.image_client
 
@@ -45,12 +44,12 @@
         self.image_client.wait_for_resource_deletion(image_id)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         # Delete the test instance
         cls.servers_client.delete_server(cls.server['id'])
         cls.servers_client.wait_for_server_termination(cls.server['id'])
 
-        super(VolumesV2ActionsTest, cls).tearDownClass()
+        super(VolumesV2ActionsTest, cls).resource_cleanup()
 
     @test.stresstest(class_setup_per='process')
     @test.attr(type='smoke')
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index c9e80aa..edd497c 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -23,9 +23,8 @@
 class VolumesV2ExtendTest(base.BaseVolumeTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesV2ExtendTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2ExtendTest, cls).resource_setup()
         cls.client = cls.volumes_client
 
     @test.attr(type='gate')
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index a346a17..033beb4 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -26,8 +26,8 @@
 class VolumesV2GetTest(base.BaseVolumeTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(VolumesV2GetTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2GetTest, cls).resource_setup()
         cls.client = cls.volumes_client
 
         cls.name_field = cls.special_fields['name_field']
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 272a41a..016e9ab 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -55,9 +55,8 @@
                              [str_vol(v) for v in fetched_list]))
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesV2ListTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2ListTestJSON, cls).resource_setup()
         cls.client = cls.volumes_client
         cls.name = cls.VOLUME_FIELDS[1]
 
@@ -72,12 +71,12 @@
             cls.volume_id_list.append(volume['id'])
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         # Delete the created volumes
         for volid in cls.volume_id_list:
             cls.client.delete_volume(volid)
             cls.client.wait_for_resource_deletion(volid)
-        super(VolumesV2ListTestJSON, cls).tearDownClass()
+        super(VolumesV2ListTestJSON, cls).resource_cleanup()
 
     def _list_by_param_value_and_assert(self, params, with_detail=False):
         """
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 5f0cffa..2b43c63 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -24,9 +24,8 @@
 class VolumesV2NegativeTest(base.BaseVolumeTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesV2NegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2NegativeTest, cls).resource_setup()
         cls.client = cls.volumes_client
 
         cls.name_field = cls.special_fields['name_field']
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 7db1ef1..78df1df 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -20,21 +20,18 @@
 CONF = config.CONF
 
 
-class VolumesSnapshotTest(base.BaseVolumeV1Test):
-    _interface = "json"
+class VolumesV2SnapshotTestJSON(base.BaseVolumeTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesSnapshotTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2SnapshotTestJSON, cls).resource_setup()
         cls.volume_origin = cls.create_volume()
 
         if not CONF.volume_feature_enabled.snapshot:
             raise cls.skipException("Cinder volume snapshots are disabled")
 
-    @classmethod
-    def tearDownClass(cls):
-        super(VolumesSnapshotTest, cls).tearDownClass()
+        cls.name_field = cls.special_fields['name_field']
+        cls.descrip_field = cls.special_fields['descrip_field']
 
     def _detach(self, volume_id):
         """Detach volume."""
@@ -90,8 +87,8 @@
     def test_snapshot_create_get_list_update_delete(self):
         # Create a snapshot
         s_name = data_utils.rand_name('snap')
-        snapshot = self.create_snapshot(self.volume_origin['id'],
-                                        display_name=s_name)
+        params = {self.name_field: s_name}
+        snapshot = self.create_snapshot(self.volume_origin['id'], **params)
 
         # Get the snap and check for some of its details
         _, snap_get = self.snapshots_client.get_snapshot(snapshot['id'])
@@ -100,26 +97,26 @@
                          "Referred volume origin mismatch")
 
         # Compare also with the output from the list action
-        tracking_data = (snapshot['id'], snapshot['display_name'])
+        tracking_data = (snapshot['id'], snapshot[self.name_field])
         _, snaps_list = self.snapshots_client.list_snapshots()
-        snaps_data = [(f['id'], f['display_name']) for f in snaps_list]
+        snaps_data = [(f['id'], f[self.name_field]) for f in snaps_list]
         self.assertIn(tracking_data, snaps_data)
 
         # Updates snapshot with new values
         new_s_name = data_utils.rand_name('new-snap')
         new_desc = 'This is the new description of snapshot.'
+        params = {self.name_field: new_s_name,
+                  self.descrip_field: new_desc}
         _, update_snapshot = \
-            self.snapshots_client.update_snapshot(snapshot['id'],
-                                                  display_name=new_s_name,
-                                                  display_description=new_desc)
+            self.snapshots_client.update_snapshot(snapshot['id'], **params)
         # Assert response body for update_snapshot method
-        self.assertEqual(new_s_name, update_snapshot['display_name'])
-        self.assertEqual(new_desc, update_snapshot['display_description'])
+        self.assertEqual(new_s_name, update_snapshot[self.name_field])
+        self.assertEqual(new_desc, update_snapshot[self.descrip_field])
         # Assert response body for get_snapshot method
         _, updated_snapshot = \
             self.snapshots_client.get_snapshot(snapshot['id'])
-        self.assertEqual(new_s_name, updated_snapshot['display_name'])
-        self.assertEqual(new_desc, updated_snapshot['display_description'])
+        self.assertEqual(new_s_name, updated_snapshot[self.name_field])
+        self.assertEqual(new_desc, updated_snapshot[self.descrip_field])
 
         # Delete the snapshot
         self.snapshots_client.delete_snapshot(snapshot['id'])
@@ -131,11 +128,11 @@
         """list snapshots with params."""
         # Create a snapshot
         display_name = data_utils.rand_name('snap')
-        snapshot = self.create_snapshot(self.volume_origin['id'],
-                                        display_name=display_name)
+        params = {self.name_field: display_name}
+        snapshot = self.create_snapshot(self.volume_origin['id'], **params)
 
         # Verify list snapshots by display_name filter
-        params = {'display_name': snapshot['display_name']}
+        params = {self.name_field: snapshot[self.name_field]}
         self._list_by_param_values_and_assert(params)
 
         # Verify list snapshots by status filter
@@ -144,7 +141,7 @@
 
         # Verify list snapshots by status and display name filter
         params = {'status': 'available',
-                  'display_name': snapshot['display_name']}
+                  self.name_field: snapshot[self.name_field]}
         self._list_by_param_values_and_assert(params)
 
     @test.attr(type='gate')
@@ -152,18 +149,18 @@
         """list snapshot details with params."""
         # Create a snapshot
         display_name = data_utils.rand_name('snap')
-        snapshot = self.create_snapshot(self.volume_origin['id'],
-                                        display_name=display_name)
+        params = {self.name_field: display_name}
+        snapshot = self.create_snapshot(self.volume_origin['id'], **params)
 
         # Verify list snapshot details by display_name filter
-        params = {'display_name': snapshot['display_name']}
+        params = {self.name_field: snapshot[self.name_field]}
         self._list_by_param_values_and_assert(params, with_detail=True)
         # Verify list snapshot details by status filter
         params = {'status': 'available'}
         self._list_by_param_values_and_assert(params, with_detail=True)
         # Verify list snapshot details by status and display name filter
         params = {'status': 'available',
-                  'display_name': snapshot['display_name']}
+                  self.name_field: snapshot[self.name_field]}
         self._list_by_param_values_and_assert(params, with_detail=True)
 
     @test.attr(type='gate')
@@ -181,5 +178,13 @@
         self.clear_snapshots()
 
 
-class VolumesSnapshotTestXML(VolumesSnapshotTest):
+class VolumesV2SnapshotTestXML(VolumesV2SnapshotTestJSON):
+    _interface = "xml"
+
+
+class VolumesV1SnapshotTestJSON(VolumesV2SnapshotTestJSON):
+    _api_version = 1
+
+
+class VolumesV1SnapshotTestXML(VolumesV1SnapshotTestJSON):
     _interface = "xml"
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 61aa307..75a62a8 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -21,12 +21,11 @@
 CONF = config.CONF
 
 
-class VolumesSnapshotNegativeTest(base.BaseVolumeV1Test):
-    _interface = "json"
+class VolumesV2SnapshotNegativeTestJSON(base.BaseVolumeTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(VolumesSnapshotNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2SnapshotNegativeTestJSON, cls).resource_setup()
 
         if not CONF.volume_feature_enabled.snapshot:
             raise cls.skipException("Cinder volume snapshots are disabled")
@@ -48,5 +47,13 @@
                           None, display_name=s_name)
 
 
-class VolumesSnapshotNegativeTestXML(VolumesSnapshotNegativeTest):
+class VolumesV2SnapshotNegativeTestXML(VolumesV2SnapshotNegativeTestJSON):
+    _interface = "xml"
+
+
+class VolumesV1SnapshotNegativeTestJSON(VolumesV2SnapshotNegativeTestJSON):
+    _api_version = 1
+
+
+class VolumesV1SnapshotNegativeTestXML(VolumesV1SnapshotNegativeTestJSON):
     _interface = "xml"
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 3ae227d..cc56873 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -31,9 +31,8 @@
     """
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(VolumesV2ListTestJSON, cls).setUpClass()
+    def resource_setup(cls):
+        super(VolumesV2ListTestJSON, cls).resource_setup()
         cls.client = cls.volumes_client
 
         # Create 3 test volumes
@@ -47,12 +46,12 @@
             cls.volume_id_list.append(volume['id'])
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         # Delete the created volumes
         for volid in cls.volume_id_list:
             cls.client.delete_volume(volid)
             cls.client.wait_for_resource_deletion(volid)
-        super(VolumesV2ListTestJSON, cls).tearDownClass()
+        super(VolumesV2ListTestJSON, cls).resource_cleanup()
 
     @test.attr(type='gate')
     def test_volume_list_details_with_multiple_params(self):
diff --git a/tempest/api_schema/response/compute/v2/security_group_default_rule.py b/tempest/api_schema/response/compute/v2/security_group_default_rule.py
new file mode 100644
index 0000000..9246ab8
--- /dev/null
+++ b/tempest/api_schema/response/compute/v2/security_group_default_rule.py
@@ -0,0 +1,61 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+common_security_group_default_rule_info = {
+    'type': 'object',
+    'properties': {
+        'from_port': {'type': 'integer'},
+        'id': {'type': 'integer'},
+        'ip_protocol': {'type': 'string'},
+        'ip_range': {
+            'type': 'object',
+            'properties': {
+                'cidr': {'type': 'string'}
+            },
+            'required': ['cidr'],
+        },
+        'to_port': {'type': 'integer'},
+    },
+    'required': ['from_port', 'id', 'ip_protocol', 'ip_range', 'to_port'],
+}
+
+create_get_security_group_default_rule = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'security_group_default_rule':
+                common_security_group_default_rule_info
+        },
+        'required': ['security_group_default_rule']
+    }
+}
+
+delete_security_group_default_rule = {
+    'status_code': [204]
+}
+
+list_security_group_default_rules = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'security_group_default_rules': {
+                'type': 'array',
+                'items': common_security_group_default_rule_info
+            }
+        },
+        'required': ['security_group_default_rules']
+    }
+}
diff --git a/tempest/api_schema/response/queuing/__init__.py b/tempest/api_schema/response/messaging/__init__.py
similarity index 100%
rename from tempest/api_schema/response/queuing/__init__.py
rename to tempest/api_schema/response/messaging/__init__.py
diff --git a/tempest/api_schema/response/queuing/v1/__init__.py b/tempest/api_schema/response/messaging/v1/__init__.py
similarity index 100%
rename from tempest/api_schema/response/queuing/v1/__init__.py
rename to tempest/api_schema/response/messaging/v1/__init__.py
diff --git a/tempest/api_schema/response/queuing/v1/queues.py b/tempest/api_schema/response/messaging/v1/queues.py
similarity index 100%
rename from tempest/api_schema/response/queuing/v1/queues.py
rename to tempest/api_schema/response/messaging/v1/queues.py
diff --git a/tempest/auth.py b/tempest/auth.py
index c84ad6b..b1ead29 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -40,11 +40,9 @@
     Provide authentication
     """
 
-    def __init__(self, credentials, client_type='tempest',
-                 interface=None):
+    def __init__(self, credentials, interface=None):
         """
         :param credentials: credentials for authentication
-        :param client_type: 'tempest' or 'official'
         :param interface: 'json' or 'xml'. Applicable for tempest client only
         """
         credentials = self._convert_credentials(credentials)
@@ -52,9 +50,8 @@
             self.credentials = credentials
         else:
             raise TypeError("Invalid credentials")
-        self.client_type = client_type
         self.interface = interface
-        if self.client_type == 'tempest' and self.interface is None:
+        if self.interface is None:
             self.interface = 'json'
         self.cache = None
         self.alt_auth_data = None
@@ -68,11 +65,10 @@
             return credentials
 
     def __str__(self):
-        return "Creds :{creds}, client type: {client_type}, interface: " \
-               "{interface}, cached auth data: {cache}".format(
-                   creds=self.credentials, client_type=self.client_type,
-                   interface=self.interface, cache=self.cache
-               )
+        return "Creds :{creds}, interface: {interface}, " \
+               "cached auth data: {cache}".format(
+                   creds=self.credentials, interface=self.interface,
+                   cache=self.cache)
 
     @abc.abstractmethod
     def _decorate_request(self, filters, method, url, headers=None, body=None,
@@ -208,9 +204,8 @@
 
     token_expiry_threshold = datetime.timedelta(seconds=60)
 
-    def __init__(self, credentials, client_type='tempest', interface=None):
-        super(KeystoneAuthProvider, self).__init__(credentials, client_type,
-                                                   interface)
+    def __init__(self, credentials, interface=None):
+        super(KeystoneAuthProvider, self).__init__(credentials, interface)
         self.auth_client = self._auth_client()
 
     def _decorate_request(self, filters, method, url, headers=None, body=None,
@@ -244,15 +239,12 @@
 
     def _get_auth(self):
         # Bypasses the cache
-        if self.client_type == 'tempest':
-            auth_func = getattr(self.auth_client, 'get_token')
-            auth_params = self._auth_params()
+        auth_func = getattr(self.auth_client, 'get_token')
+        auth_params = self._auth_params()
 
-            # returns token, auth_data
-            token, auth_data = auth_func(**auth_params)
-            return token, auth_data
-        else:
-            raise NotImplementedError
+        # returns token, auth_data
+        token, auth_data = auth_func(**auth_params)
+        return token, auth_data
 
     def get_token(self):
         return self.auth_data[0]
@@ -263,23 +255,17 @@
     EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
 
     def _auth_client(self):
-        if self.client_type == 'tempest':
-            if self.interface == 'json':
-                return json_id.TokenClientJSON()
-            else:
-                return xml_id.TokenClientXML()
+        if self.interface == 'json':
+            return json_id.TokenClientJSON()
         else:
-            raise NotImplementedError
+            return xml_id.TokenClientXML()
 
     def _auth_params(self):
-        if self.client_type == 'tempest':
-            return dict(
-                user=self.credentials.username,
-                password=self.credentials.password,
-                tenant=self.credentials.tenant_name,
-                auth_data=True)
-        else:
-            raise NotImplementedError
+        return dict(
+            user=self.credentials.username,
+            password=self.credentials.password,
+            tenant=self.credentials.tenant_name,
+            auth_data=True)
 
     def _fill_credentials(self, auth_data_body):
         tenant = auth_data_body['token']['tenant']
@@ -350,24 +336,18 @@
     EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
 
     def _auth_client(self):
-        if self.client_type == 'tempest':
-            if self.interface == 'json':
-                return json_v3id.V3TokenClientJSON()
-            else:
-                return xml_v3id.V3TokenClientXML()
+        if self.interface == 'json':
+            return json_v3id.V3TokenClientJSON()
         else:
-            raise NotImplementedError
+            return xml_v3id.V3TokenClientXML()
 
     def _auth_params(self):
-        if self.client_type == 'tempest':
-            return dict(
-                user=self.credentials.username,
-                password=self.credentials.password,
-                tenant=self.credentials.tenant_name,
-                domain=self.credentials.user_domain_name,
-                auth_data=True)
-        else:
-            raise NotImplementedError
+        return dict(
+            user=self.credentials.username,
+            password=self.credentials.password,
+            tenant=self.credentials.tenant_name,
+            domain=self.credentials.user_domain_name,
+            auth_data=True)
 
     def _fill_credentials(self, auth_data_body):
         # project or domain, depending on the scope
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index c33589a..ca6d7fe 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -94,11 +94,11 @@
 
 class ClientTestBase(tempest.test.BaseTestCase):
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if not CONF.cli.enabled:
             msg = "cli testing disabled"
             raise cls.skipException(msg)
-        super(ClientTestBase, cls).setUpClass()
+        super(ClientTestBase, cls).resource_setup()
 
     def __init__(self, *args, **kwargs):
         self.parser = tempest.cli.output_parser
diff --git a/tempest/cli/simple_read_only/compute/test_nova.py b/tempest/cli/simple_read_only/compute/test_nova.py
index 9bac7a6..6e5e077 100644
--- a/tempest/cli/simple_read_only/compute/test_nova.py
+++ b/tempest/cli/simple_read_only/compute/test_nova.py
@@ -41,11 +41,11 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if not CONF.service_available.nova:
             msg = ("%s skipped as Nova is not available" % cls.__name__)
             raise cls.skipException(msg)
-        super(SimpleReadOnlyNovaClientTest, cls).setUpClass()
+        super(SimpleReadOnlyNovaClientTest, cls).resource_setup()
 
     def test_admin_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/cli/simple_read_only/compute/test_nova_manage.py b/tempest/cli/simple_read_only/compute/test_nova_manage.py
index c27b12e..cff543f 100644
--- a/tempest/cli/simple_read_only/compute/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/compute/test_nova_manage.py
@@ -36,7 +36,7 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if not CONF.service_available.nova:
             msg = ("%s skipped as Nova is not available" % cls.__name__)
             raise cls.skipException(msg)
@@ -44,7 +44,7 @@
             msg = ("%s skipped as *-manage commands not available"
                    % cls.__name__)
             raise cls.skipException(msg)
-        super(SimpleReadOnlyNovaManageTest, cls).setUpClass()
+        super(SimpleReadOnlyNovaManageTest, cls).resource_setup()
 
     def test_admin_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
@@ -65,20 +65,17 @@
                          self.nova_manage('', '--version', merge_stderr=True))
 
     def test_debug_flag(self):
-        self.assertNotEqual("", self.nova_manage('flavor list',
+        self.assertNotEqual("", self.nova_manage('service list',
                             '--debug'))
 
     def test_verbose_flag(self):
-        self.assertNotEqual("", self.nova_manage('flavor list',
+        self.assertNotEqual("", self.nova_manage('service list',
                             '--verbose'))
 
     # test actions
     def test_version(self):
         self.assertNotEqual("", self.nova_manage('version'))
 
-    def test_flavor_list(self):
-        self.assertNotEqual("", self.nova_manage('flavor list'))
-
     def test_db_sync(self):
         # make sure command doesn't error out
         self.nova_manage('db sync')
diff --git a/tempest/cli/simple_read_only/data_processing/test_sahara.py b/tempest/cli/simple_read_only/data_processing/test_sahara.py
index 2c6e0e2..751a4ad 100644
--- a/tempest/cli/simple_read_only/data_processing/test_sahara.py
+++ b/tempest/cli/simple_read_only/data_processing/test_sahara.py
@@ -34,11 +34,11 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if not CONF.service_available.sahara:
             msg = "Skipping all Sahara cli tests because it is not available"
             raise cls.skipException(msg)
-        super(SimpleReadOnlySaharaClientTest, cls).setUpClass()
+        super(SimpleReadOnlySaharaClientTest, cls).resource_setup()
 
     @test.attr(type='negative')
     def test_sahara_fake_action(self):
diff --git a/tempest/cli/simple_read_only/image/test_glance.py b/tempest/cli/simple_read_only/image/test_glance.py
index 2fd8212..a9cbadb 100644
--- a/tempest/cli/simple_read_only/image/test_glance.py
+++ b/tempest/cli/simple_read_only/image/test_glance.py
@@ -34,11 +34,11 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if not CONF.service_available.glance:
             msg = ("%s skipped as Glance is not available" % cls.__name__)
             raise cls.skipException(msg)
-        super(SimpleReadOnlyGlanceClientTest, cls).setUpClass()
+        super(SimpleReadOnlyGlanceClientTest, cls).resource_setup()
 
     def test_glance_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/cli/simple_read_only/network/test_neutron.py b/tempest/cli/simple_read_only/network/test_neutron.py
index 87f6b67..f9f8906 100644
--- a/tempest/cli/simple_read_only/network/test_neutron.py
+++ b/tempest/cli/simple_read_only/network/test_neutron.py
@@ -35,11 +35,11 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if (not CONF.service_available.neutron):
             msg = "Skipping all Neutron cli tests because it is not available"
             raise cls.skipException(msg)
-        super(SimpleReadOnlyNeutronClientTest, cls).setUpClass()
+        super(SimpleReadOnlyNeutronClientTest, cls).resource_setup()
 
     @test.attr(type='smoke')
     def test_neutron_fake_action(self):
diff --git a/tempest/cli/simple_read_only/object_storage/test_swift.py b/tempest/cli/simple_read_only/object_storage/test_swift.py
index 069a384..a162660 100644
--- a/tempest/cli/simple_read_only/object_storage/test_swift.py
+++ b/tempest/cli/simple_read_only/object_storage/test_swift.py
@@ -31,11 +31,11 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if not CONF.service_available.swift:
             msg = ("%s skipped as Swift is not available" % cls.__name__)
             raise cls.skipException(msg)
-        super(SimpleReadOnlySwiftClientTest, cls).setUpClass()
+        super(SimpleReadOnlySwiftClientTest, cls).resource_setup()
 
     def test_swift_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/cli/simple_read_only/orchestration/test_heat.py b/tempest/cli/simple_read_only/orchestration/test_heat.py
index 430cdf1..7d7f8c9 100644
--- a/tempest/cli/simple_read_only/orchestration/test_heat.py
+++ b/tempest/cli/simple_read_only/orchestration/test_heat.py
@@ -32,12 +32,12 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if (not CONF.service_available.heat):
             msg = ("Skipping all Heat cli tests because it is "
                    "not available")
             raise cls.skipException(msg)
-        super(SimpleReadOnlyHeatClientTest, cls).setUpClass()
+        super(SimpleReadOnlyHeatClientTest, cls).resource_setup()
         cls.heat_template_path = os.path.join(os.path.dirname(
             os.path.dirname(os.path.realpath(__file__))),
             'heat_templates/heat_minimal.yaml')
diff --git a/tempest/cli/simple_read_only/telemetry/test_ceilometer.py b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
index 1d2822d..45b793b 100644
--- a/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
+++ b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
@@ -32,12 +32,12 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if (not CONF.service_available.ceilometer):
             msg = ("Skipping all Ceilometer cli tests because it is "
                    "not available")
             raise cls.skipException(msg)
-        super(SimpleReadOnlyCeilometerClientTest, cls).setUpClass()
+        super(SimpleReadOnlyCeilometerClientTest, cls).resource_setup()
 
     def test_ceilometer_meter_list(self):
         self.ceilometer('meter-list')
diff --git a/tempest/cli/simple_read_only/volume/test_cinder.py b/tempest/cli/simple_read_only/volume/test_cinder.py
index e44a577..45f6c41 100644
--- a/tempest/cli/simple_read_only/volume/test_cinder.py
+++ b/tempest/cli/simple_read_only/volume/test_cinder.py
@@ -35,11 +35,11 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         if not CONF.service_available.cinder:
             msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(msg)
-        super(SimpleReadOnlyCinderClientTest, cls).setUpClass()
+        super(SimpleReadOnlyCinderClientTest, cls).resource_setup()
 
     def test_cinder_fake_action(self):
         self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/clients.py b/tempest/clients.py
index eab496e..2d07852 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,9 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import keystoneclient.exceptions
-import keystoneclient.v2_0.client
-
 from tempest import auth
 from tempest.common import rest_client
 from tempest import config
@@ -151,6 +148,8 @@
 from tempest.services.identity.xml.identity_client import TokenClientXML
 from tempest.services.image.v1.json.image_client import ImageClientJSON
 from tempest.services.image.v2.json.image_client import ImageClientV2JSON
+from tempest.services.messaging.json.messaging_client import \
+    MessagingClientJSON
 from tempest.services.network.json.network_client import NetworkClientJSON
 from tempest.services.network.xml.network_client import NetworkClientXML
 from tempest.services.object_storage.account_client import AccountClient
@@ -162,7 +161,6 @@
     ObjectClientCustomizedHeader
 from tempest.services.orchestration.json.orchestration_client import \
     OrchestrationClient
-from tempest.services.queuing.json.queuing_client import QueuingClientJSON
 from tempest.services.telemetry.json.telemetry_client import \
     TelemetryClientJSON
 from tempest.services.telemetry.xml.telemetry_client import \
@@ -188,11 +186,15 @@
 from tempest.services.volume.v2.json.extensions_client import \
     ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
 from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
+from tempest.services.volume.v2.json.snapshots_client import \
+    SnapshotsV2ClientJSON
 from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
 from tempest.services.volume.v2.xml.availability_zone_client import \
     VolumeV2AvailabilityZoneClientXML
 from tempest.services.volume.v2.xml.extensions_client import \
     ExtensionsV2ClientXML as VolumeV2ExtensionClientXML
+from tempest.services.volume.v2.xml.snapshots_client import \
+    SnapshotsV2ClientXML
 from tempest.services.volume.v2.xml.volumes_client import VolumesV2ClientXML
 from tempest.services.volume.xml.admin.volume_hosts_client import \
     VolumeHostsClientXML
@@ -223,7 +225,6 @@
     def __init__(self, credentials=None, interface='json', service=None):
         # Set interface and client type first
         self.interface = interface
-        self.client_type = 'tempest'
         # super cares for credentials validation
         super(Manager, self).__init__(credentials=credentials)
 
@@ -245,6 +246,7 @@
                 self.auth_provider)
             self.backups_client = BackupsClientXML(self.auth_provider)
             self.snapshots_client = SnapshotsClientXML(self.auth_provider)
+            self.snapshots_v2_client = SnapshotsV2ClientXML(self.auth_provider)
             self.volumes_client = VolumesClientXML(self.auth_provider)
             self.volumes_v2_client = VolumesV2ClientXML(self.auth_provider)
             self.volume_types_client = VolumeTypesClientXML(
@@ -324,6 +326,8 @@
                 self.auth_provider)
             self.backups_client = BackupsClientJSON(self.auth_provider)
             self.snapshots_client = SnapshotsClientJSON(self.auth_provider)
+            self.snapshots_v2_client = SnapshotsV2ClientJSON(
+                self.auth_provider)
             self.volumes_client = VolumesClientJSON(self.auth_provider)
             self.volumes_v2_client = VolumesV2ClientJSON(self.auth_provider)
             self.volume_types_client = VolumeTypesClientJSON(
@@ -384,7 +388,7 @@
                 self.auth_provider)
             self.database_versions_client = DatabaseVersionsClientJSON(
                 self.auth_provider)
-            self.queuing_client = QueuingClientJSON(self.auth_provider)
+            self.messaging_client = MessagingClientJSON(self.auth_provider)
             if CONF.service_available.ceilometer:
                 self.telemetry_client = TelemetryClientJSON(
                     self.auth_provider)
@@ -480,290 +484,3 @@
             credentials=auth.get_default_credentials('compute_admin'),
             interface=interface,
             service=service)
-
-
-class OfficialClientManager(manager.Manager):
-    """
-    Manager that provides access to the official python clients for
-    calling various OpenStack APIs.
-    """
-
-    NOVACLIENT_VERSION = '2'
-    CINDERCLIENT_VERSION = '1'
-    HEATCLIENT_VERSION = '1'
-    IRONICCLIENT_VERSION = '1'
-    SAHARACLIENT_VERSION = '1.1'
-    CEILOMETERCLIENT_VERSION = '2'
-
-    def __init__(self, credentials):
-        # FIXME(andreaf) Auth provider for client_type 'official' is
-        # not implemented yet, setting to 'tempest' for now.
-        self.client_type = 'tempest'
-        self.interface = None
-        # super cares for credentials validation
-        super(OfficialClientManager, self).__init__(credentials=credentials)
-        self.baremetal_client = self._get_baremetal_client()
-        self.compute_client = self._get_compute_client(credentials)
-        self.identity_client = self._get_identity_client(credentials)
-        self.image_client = self._get_image_client()
-        self.network_client = self._get_network_client()
-        self.volume_client = self._get_volume_client(credentials)
-        self.object_storage_client = self._get_object_storage_client(
-            credentials)
-        self.orchestration_client = self._get_orchestration_client(
-            credentials)
-        self.data_processing_client = self._get_data_processing_client(
-            credentials)
-        self.ceilometer_client = self._get_ceilometer_client(
-            credentials)
-
-    def _get_roles(self):
-        admin_credentials = auth.get_default_credentials('identity_admin')
-        keystone_admin = self._get_identity_client(admin_credentials)
-
-        username = self.credentials.username
-        tenant_name = self.credentials.tenant_name
-        user_id = keystone_admin.users.find(name=username).id
-        tenant_id = keystone_admin.tenants.find(name=tenant_name).id
-
-        roles = keystone_admin.roles.roles_for_user(
-            user=user_id, tenant=tenant_id)
-
-        return [r.name for r in roles]
-
-    def _get_compute_client(self, credentials):
-        # Novaclient will not execute operations for anyone but the
-        # identified user, so a new client needs to be created for
-        # each user that operations need to be performed for.
-        if not CONF.service_available.nova:
-            return None
-        import novaclient.client
-
-        auth_url = CONF.identity.uri
-        dscv = CONF.identity.disable_ssl_certificate_validation
-        region = CONF.identity.region
-
-        client_args = (credentials.username, credentials.password,
-                       credentials.tenant_name, auth_url)
-
-        # Create our default Nova client to use in testing
-        service_type = CONF.compute.catalog_type
-        endpoint_type = CONF.compute.endpoint_type
-        return novaclient.client.Client(self.NOVACLIENT_VERSION,
-                                        *client_args,
-                                        service_type=service_type,
-                                        endpoint_type=endpoint_type,
-                                        region_name=region,
-                                        no_cache=True,
-                                        insecure=dscv,
-                                        http_log_debug=True)
-
-    def _get_image_client(self):
-        if not CONF.service_available.glance:
-            return None
-        import glanceclient
-        token = self.identity_client.auth_token
-        region = CONF.identity.region
-        endpoint_type = CONF.image.endpoint_type
-        endpoint = self.identity_client.service_catalog.url_for(
-            attr='region', filter_value=region,
-            service_type=CONF.image.catalog_type, endpoint_type=endpoint_type)
-        dscv = CONF.identity.disable_ssl_certificate_validation
-        return glanceclient.Client('1', endpoint=endpoint, token=token,
-                                   insecure=dscv)
-
-    def _get_volume_client(self, credentials):
-        if not CONF.service_available.cinder:
-            return None
-        import cinderclient.client
-        auth_url = CONF.identity.uri
-        region = CONF.identity.region
-        endpoint_type = CONF.volume.endpoint_type
-        dscv = CONF.identity.disable_ssl_certificate_validation
-        return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
-                                          credentials.username,
-                                          credentials.password,
-                                          credentials.tenant_name,
-                                          auth_url,
-                                          region_name=region,
-                                          endpoint_type=endpoint_type,
-                                          insecure=dscv,
-                                          http_log_debug=True)
-
-    def _get_object_storage_client(self, credentials):
-        if not CONF.service_available.swift:
-            return None
-        import swiftclient
-        auth_url = CONF.identity.uri
-        # add current tenant to swift operator role group.
-        admin_credentials = auth.get_default_credentials('identity_admin')
-        keystone_admin = self._get_identity_client(admin_credentials)
-
-        # enable test user to operate swift by adding operator role to him.
-        roles = keystone_admin.roles.list()
-        operator_role = CONF.object_storage.operator_role
-        member_role = [role for role in roles if role.name == operator_role][0]
-        # NOTE(maurosr): This is surrounded in the try-except block cause
-        # neutron tests doesn't have tenant isolation.
-        try:
-            keystone_admin.roles.add_user_role(self.identity_client.user_id,
-                                               member_role.id,
-                                               self.identity_client.tenant_id)
-        except keystoneclient.exceptions.Conflict:
-            pass
-
-        endpoint_type = CONF.object_storage.endpoint_type
-        os_options = {'endpoint_type': endpoint_type}
-        return swiftclient.Connection(auth_url, credentials.username,
-                                      credentials.password,
-                                      tenant_name=credentials.tenant_name,
-                                      auth_version='2',
-                                      os_options=os_options)
-
-    def _get_orchestration_client(self, credentials):
-        if not CONF.service_available.heat:
-            return None
-        import heatclient.client
-
-        keystone = self._get_identity_client(credentials)
-        region = CONF.identity.region
-        endpoint_type = CONF.orchestration.endpoint_type
-        token = keystone.auth_token
-        service_type = CONF.orchestration.catalog_type
-        try:
-            endpoint = keystone.service_catalog.url_for(
-                attr='region',
-                filter_value=region,
-                service_type=service_type,
-                endpoint_type=endpoint_type)
-        except keystoneclient.exceptions.EndpointNotFound:
-            return None
-        else:
-            return heatclient.client.Client(self.HEATCLIENT_VERSION,
-                                            endpoint,
-                                            token=token,
-                                            username=credentials.username,
-                                            password=credentials.password)
-
-    def _get_identity_client(self, credentials):
-        # This identity client is not intended to check the security
-        # of the identity service, so use admin credentials by default.
-
-        auth_url = CONF.identity.uri
-        dscv = CONF.identity.disable_ssl_certificate_validation
-
-        return keystoneclient.v2_0.client.Client(
-            username=credentials.username,
-            password=credentials.password,
-            tenant_name=credentials.tenant_name,
-            auth_url=auth_url,
-            insecure=dscv)
-
-    def _get_baremetal_client(self):
-        # ironic client is currently intended to by used by admin users
-        if not CONF.service_available.ironic:
-            return None
-        import ironicclient.client
-        roles = self._get_roles()
-        if CONF.identity.admin_role not in roles:
-            return None
-
-        auth_url = CONF.identity.uri
-        api_version = self.IRONICCLIENT_VERSION
-        insecure = CONF.identity.disable_ssl_certificate_validation
-        service_type = CONF.baremetal.catalog_type
-        endpoint_type = CONF.baremetal.endpoint_type
-        creds = {
-            'os_username': self.credentials.username,
-            'os_password': self.credentials.password,
-            'os_tenant_name': self.credentials.tenant_name
-        }
-
-        try:
-            return ironicclient.client.get_client(
-                api_version=api_version,
-                os_auth_url=auth_url,
-                insecure=insecure,
-                os_service_type=service_type,
-                os_endpoint_type=endpoint_type,
-                **creds)
-        except keystoneclient.exceptions.EndpointNotFound:
-            return None
-
-    def _get_network_client(self):
-        # The intended configuration is for the network client to have
-        # admin privileges and indicate for whom resources are being
-        # created via a 'tenant_id' parameter.  This will often be
-        # preferable to authenticating as a specific user because
-        # working with certain resources (public routers and networks)
-        # often requires admin privileges anyway.
-        if not CONF.service_available.neutron:
-            return None
-        import neutronclient.v2_0.client
-
-        credentials = auth.get_default_credentials('identity_admin')
-
-        auth_url = CONF.identity.uri
-        dscv = CONF.identity.disable_ssl_certificate_validation
-        endpoint_type = CONF.network.endpoint_type
-
-        return neutronclient.v2_0.client.Client(
-            username=credentials.username,
-            password=credentials.password,
-            tenant_name=credentials.tenant_name,
-            endpoint_type=endpoint_type,
-            auth_url=auth_url,
-            insecure=dscv)
-
-    def _get_data_processing_client(self, credentials):
-        if not CONF.service_available.sahara:
-            # Sahara isn't available
-            return None
-
-        import saharaclient.client
-
-        endpoint_type = CONF.data_processing.endpoint_type
-        catalog_type = CONF.data_processing.catalog_type
-        auth_url = CONF.identity.uri
-
-        client = saharaclient.client.Client(
-            self.SAHARACLIENT_VERSION,
-            credentials.username,
-            credentials.password,
-            project_name=credentials.tenant_name,
-            endpoint_type=endpoint_type,
-            service_type=catalog_type,
-            auth_url=auth_url)
-
-        return client
-
-    def _get_ceilometer_client(self, credentials):
-        if not CONF.service_available.ceilometer:
-            return None
-
-        import ceilometerclient.client
-
-        keystone = self._get_identity_client(credentials)
-        region = CONF.identity.region
-
-        endpoint_type = CONF.telemetry.endpoint_type
-        service_type = CONF.telemetry.catalog_type
-        auth_url = CONF.identity.uri
-
-        try:
-            keystone.service_catalog.url_for(
-                attr='region',
-                filter_value=region,
-                service_type=service_type,
-                endpoint_type=endpoint_type)
-        except keystoneclient.exceptions.EndpointNotFound:
-            return None
-        else:
-            return ceilometerclient.client.get_client(
-                self.CEILOMETERCLIENT_VERSION,
-                os_username=credentials.username,
-                os_password=credentials.password,
-                os_tenant_name=credentials.tenant_name,
-                os_auth_url=auth_url,
-                os_service_type=service_type,
-                os_endpoint_type=endpoint_type)
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
new file mode 100644
index 0000000..9ae3dfb
--- /dev/null
+++ b/tempest/cmd/cleanup.py
@@ -0,0 +1,301 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Dell Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+# @author: David Paterson
+
+"""
+Utility for cleaning up environment after Tempest run
+
+Runtime Arguments
+-----------------
+
+--init-saved-state: Before you can execute cleanup you must initialize
+the saved state by running it with the --init-saved-state flag
+(creating ./saved_state.json), which protects your deployment from
+cleanup deleting objects you want to keep.  Typically you would run
+cleanup with --init-saved-state prior to a tempest run. If this is not
+the case saved_state.json must be edited, removing objects you want
+cleanup to delete.
+
+--dry-run: Creates a report (dry_run.json) of the tenants that will be
+cleaned up (in the "_tenants_to_clean" array), and the global objects
+that will be removed (tenants, users, flavors and images).  Once
+cleanup is executed in normal mode, running it again with --dry-run
+should yield an empty report.
+
+**NOTE**: The _tenants_to_clean array in dry-run.json lists the
+tenants that cleanup will loop through and delete child objects, not
+delete the tenant itself. This may differ from the tenants array as you
+can clean the tempest and alternate tempest tenants but not delete the
+tenants themselves.  This is actually the default behavior.
+
+**Normal mode**: running with no arguments, will query your deployment and
+build a list of objects to delete after filtering out out the objects
+found in saved_state.json and based on the
+--preserve-tempest-conf-objects and
+--delete-tempest-conf-objects flags.
+
+By default the tempest and alternate tempest users and tenants are not
+deleted and the admin user specified in tempest.conf is never deleted.
+
+Please run with --help to see full list of options.
+"""
+import argparse
+import json
+import sys
+
+from tempest import auth
+from tempest import clients
+from tempest.cmd import cleanup_service
+from tempest import config
+from tempest.openstack.common import log as logging
+
+SAVED_STATE_JSON = "saved_state.json"
+DRY_RUN_JSON = "dry_run.json"
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class Cleanup(object):
+
+    def __init__(self):
+        self.admin_mgr = clients.AdminManager()
+        self.dry_run_data = {}
+        self.json_data = {}
+        self._init_options()
+
+        self.admin_id = ""
+        self.admin_role_id = ""
+        self.admin_tenant_id = ""
+        self._init_admin_ids()
+
+        self.admin_role_added = []
+
+        # available services
+        self.tenant_services = cleanup_service.get_tenant_cleanup_services()
+        self.global_services = cleanup_service.get_global_cleanup_services()
+        cleanup_service.init_conf()
+
+    def run(self):
+        opts = self.options
+        if opts.init_saved_state:
+            self._init_state()
+            return
+
+        self._load_json()
+        self._cleanup()
+
+    def _cleanup(self):
+        LOG.debug("Begin cleanup")
+        is_dry_run = self.options.dry_run
+        is_preserve = self.options.preserve_tempest_conf_objects
+        is_save_state = False
+
+        if is_dry_run:
+            self.dry_run_data["_tenants_to_clean"] = {}
+            f = open(DRY_RUN_JSON, 'w+')
+
+        admin_mgr = self.admin_mgr
+        # Always cleanup tempest and alt tempest tenants unless
+        # they are in saved state json. Therefore is_preserve is False
+        kwargs = {'data': self.dry_run_data,
+                  'is_dry_run': is_dry_run,
+                  'saved_state_json': self.json_data,
+                  'is_preserve': False,
+                  'is_save_state': is_save_state}
+        tenant_service = cleanup_service.TenantService(admin_mgr, **kwargs)
+        tenants = tenant_service.list()
+        LOG.debug("Process %s tenants" % len(tenants))
+
+        # Loop through list of tenants and clean them up.
+        for tenant in tenants:
+            self._add_admin(tenant['id'])
+            self._clean_tenant(tenant)
+
+        kwargs = {'data': self.dry_run_data,
+                  'is_dry_run': is_dry_run,
+                  'saved_state_json': self.json_data,
+                  'is_preserve': is_preserve,
+                  'is_save_state': is_save_state}
+        for service in self.global_services:
+            svc = service(admin_mgr, **kwargs)
+            svc.run()
+
+        if is_dry_run:
+            f.write(json.dumps(self.dry_run_data, sort_keys=True,
+                               indent=2, separators=(',', ': ')))
+            f.close()
+
+        self._remove_admin_user_roles()
+
+    def _remove_admin_user_roles(self):
+        tenant_ids = self.admin_role_added
+        LOG.debug("Removing admin user roles where needed for tenants: %s"
+                  % tenant_ids)
+        for tenant_id in tenant_ids:
+            self._remove_admin_role(tenant_id)
+
+    def _clean_tenant(self, tenant):
+        LOG.debug("Cleaning tenant:  %s " % tenant['name'])
+        is_dry_run = self.options.dry_run
+        dry_run_data = self.dry_run_data
+        is_preserve = self.options.preserve_tempest_conf_objects
+        tenant_id = tenant['id']
+        tenant_name = tenant['name']
+        tenant_data = None
+        if is_dry_run:
+            tenant_data = dry_run_data["_tenants_to_clean"][tenant_id] = {}
+            tenant_data['name'] = tenant_name
+
+        kwargs = {"username": CONF.identity.admin_username,
+                  "password": CONF.identity.admin_password,
+                  "tenant_name": tenant['name']}
+        mgr = clients.Manager(credentials=auth.get_credentials(**kwargs))
+        kwargs = {'data': tenant_data,
+                  'is_dry_run': is_dry_run,
+                  'saved_state_json': None,
+                  'is_preserve': is_preserve,
+                  'is_save_state': False,
+                  'tenant_id': tenant_id}
+        for service in self.tenant_services:
+            svc = service(mgr, **kwargs)
+            svc.run()
+
+    def _init_admin_ids(self):
+        id_cl = self.admin_mgr.identity_client
+
+        tenant = id_cl.get_tenant_by_name(CONF.identity.admin_tenant_name)
+        self.admin_tenant_id = tenant['id']
+
+        user = id_cl.get_user_by_username(self.admin_tenant_id,
+                                          CONF.identity.admin_username)
+        self.admin_id = user['id']
+
+        _, roles = id_cl.list_roles()
+        for role in roles:
+            if role['name'] == CONF.identity.admin_role:
+                self.admin_role_id = role['id']
+                break
+
+    def _init_options(self):
+        parser = argparse.ArgumentParser(
+            description='Cleanup after tempest run')
+        parser.add_argument('--init-saved-state', action="store_true",
+                            dest='init_saved_state', default=False,
+                            help="Creates JSON file: " + SAVED_STATE_JSON +
+                            ", representing the current state of your "
+                            "deployment,  specifically objects types "
+                            "Tempest creates and destroys during a run. "
+                            "You must run with this flag prior to "
+                            "executing cleanup.")
+        parser.add_argument('--preserve-tempest-conf-objects',
+                            action="store_true",
+                            dest='preserve_tempest_conf_objects',
+                            default=True, help="Do not delete the "
+                            "tempest and alternate tempest users and "
+                            "tenants, so they may be used for future "
+                            "tempest runs. By default this is argument "
+                            "is true.")
+        parser.add_argument('--delete-tempest-conf-objects',
+                            action="store_false",
+                            dest='preserve_tempest_conf_objects',
+                            default=False,
+                            help="Delete the tempest and "
+                            "alternate tempest users and tenants.")
+        parser.add_argument('--dry-run', action="store_true",
+                            dest='dry_run', default=False,
+                            help="Generate JSON file:" + DRY_RUN_JSON +
+                            ", that reports the objects that would have "
+                            "been deleted had a full cleanup been run.")
+
+        self.options = parser.parse_args()
+
+    def _add_admin(self, tenant_id):
+        id_cl = self.admin_mgr.identity_client
+        needs_role = True
+        _, roles = id_cl.list_user_roles(tenant_id, self.admin_id)
+        for role in roles:
+            if role['id'] == self.admin_role_id:
+                needs_role = False
+                LOG.debug("User already had admin privilege for this tenant")
+        if needs_role:
+            LOG.debug("Adding admin priviledge for : %s" % tenant_id)
+            id_cl.assign_user_role(tenant_id, self.admin_id,
+                                   self.admin_role_id)
+            self.admin_role_added.append(tenant_id)
+
+    def _remove_admin_role(self, tenant_id):
+        LOG.debug("Remove admin user role for tenant: %s" % tenant_id)
+        # Must initialize AdminManager for each user role
+        # Otherwise authentication exception is thrown, weird
+        id_cl = clients.AdminManager().identity_client
+        if (self._tenant_exists(tenant_id)):
+            try:
+                id_cl.remove_user_role(tenant_id, self.admin_id,
+                                       self.admin_role_id)
+            except Exception as ex:
+                LOG.exception("Failed removing role from tenant which still"
+                              "exists, exception: %s" % ex)
+
+    def _tenant_exists(self, tenant_id):
+        id_cl = self.admin_mgr.identity_client
+        try:
+            t = id_cl.get_tenant(tenant_id)
+            LOG.debug("Tenant is: %s" % str(t))
+            return True
+        except Exception as ex:
+            LOG.debug("Tenant no longer exists? %s" % ex)
+            return False
+
+    def _init_state(self):
+        LOG.debug("Initializing saved state.")
+        data = {}
+        admin_mgr = self.admin_mgr
+        kwargs = {'data': data,
+                  'is_dry_run': False,
+                  'saved_state_json': data,
+                  'is_preserve': False,
+                  'is_save_state': True}
+        for service in self.global_services:
+            svc = service(admin_mgr, **kwargs)
+            svc.run()
+
+        f = open(SAVED_STATE_JSON, 'w+')
+        f.write(json.dumps(data,
+                           sort_keys=True, indent=2, separators=(',', ': ')))
+        f.close()
+
+    def _load_json(self):
+        try:
+            json_file = open(SAVED_STATE_JSON)
+            self.json_data = json.load(json_file)
+            json_file.close()
+        except IOError as ex:
+            LOG.exception("Failed loading saved state, please be sure you"
+                          " have first run cleanup with --init-saved-state "
+                          "flag prior to running tempest. Exception: %s" % ex)
+            sys.exit(ex)
+        except Exception as ex:
+            LOG.exception("Exception parsing saved state json : %s" % ex)
+            sys.exit(ex)
+
+
+def main():
+    cleanup = Cleanup()
+    cleanup.run()
+    LOG.info('Cleanup finished!')
+    return 0
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
new file mode 100644
index 0000000..f5f0db3
--- /dev/null
+++ b/tempest/cmd/cleanup_service.py
@@ -0,0 +1,1066 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Dell Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+'''
+Created on Sep 3, 2014
+
+@author: David_Paterson
+'''
+from tempest import config
+from tempest.openstack.common import log as logging
+from tempest import test
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+CONF_USERS = None
+CONF_TENANTS = None
+CONF_PUB_NETWORK = None
+CONF_PRIV_NETWORK_NAME = None
+CONF_PUB_ROUTER = None
+CONF_FLAVORS = None
+CONF_IMAGES = None
+
+IS_CEILOMETER = None
+IS_CINDER = None
+IS_GLANCE = None
+IS_HEAT = None
+IS_NEUTRON = None
+IS_NOVA = None
+
+
+def init_conf():
+    global CONF_USERS
+    global CONF_TENANTS
+    global CONF_PUB_NETWORK
+    global CONF_PRIV_NETWORK_NAME
+    global CONF_PUB_ROUTER
+    global CONF_FLAVORS
+    global CONF_IMAGES
+
+    global IS_CEILOMETER
+    global IS_CINDER
+    global IS_GLANCE
+    global IS_HEAT
+    global IS_NEUTRON
+    global IS_NOVA
+
+    CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
+                  CONF.identity.alt_username]
+    CONF_TENANTS = [CONF.identity.admin_tenant_name,
+                    CONF.identity.tenant_name,
+                    CONF.identity.alt_tenant_name]
+    CONF_PUB_NETWORK = CONF.network.public_network_id
+    CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
+    CONF_PUB_ROUTER = CONF.network.public_router_id
+    CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
+    CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
+
+    IS_CEILOMETER = CONF.service_available.ceilometer
+    IS_CINDER = CONF.service_available.cinder
+    IS_GLANCE = CONF.service_available.glance
+    IS_HEAT = CONF.service_available.heat
+    IS_NEUTRON = CONF.service_available.neutron
+    IS_NOVA = CONF.service_available.nova
+
+
+class BaseService(object):
+    def __init__(self, kwargs):
+        self.client = None
+        for key, value in kwargs.items():
+            setattr(self, key, value)
+
+    def _filter_by_tenant_id(self, item_list):
+        if (item_list is None
+                or len(item_list) == 0
+                or not hasattr(self, 'tenant_id')
+                or self.tenant_id is None
+                or 'tenant_id' not in item_list[0]):
+            return item_list
+
+        _filtered_list = []
+        for item in item_list:
+            if item['tenant_id'] == self.tenant_id:
+                _filtered_list.append(item)
+        return _filtered_list
+
+    def list(self):
+        pass
+
+    def delete(self):
+        pass
+
+    def dry_run(self):
+        pass
+
+    def save_state(self):
+        pass
+
+    def run(self):
+        if self.is_dry_run:
+            self.dry_run()
+        elif self.is_save_state:
+            self.save_state()
+        else:
+            self.delete()
+
+
+class SnapshotService(BaseService):
+
+    def __init__(self, manager, **kwargs):
+        super(SnapshotService, self).__init__(kwargs)
+        self.client = manager.snapshots_client
+
+    def list(self):
+        client = self.client
+        __, snaps = client.list_snapshots()
+        LOG.debug("List count, %s Snapshots" % len(snaps))
+        return snaps
+
+    def delete(self):
+        snaps = self.list()
+        client = self.client
+        for snap in snaps:
+            try:
+                client.delete_snapshot(snap['id'])
+            except Exception as e:
+                LOG.exception("Delete Snapshot exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        snaps = self.list()
+        self.data['snapshots'] = snaps
+
+
+class ServerService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(ServerService, self).__init__(kwargs)
+        self.client = manager.servers_client
+
+    def list(self):
+        client = self.client
+        _, servers_body = client.list_servers()
+        servers = servers_body['servers']
+        LOG.debug("List count, %s Servers" % len(servers))
+        return servers
+
+    def delete(self):
+        client = self.client
+        servers = self.list()
+        for server in servers:
+            try:
+                client.delete_server(server['id'])
+            except Exception as e:
+                LOG.exception("Delete Server exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        servers = self.list()
+        self.data['servers'] = servers
+
+
+class ServerGroupService(ServerService):
+
+    def list(self):
+        client = self.client
+        _, sgs = client.list_server_groups()
+        LOG.debug("List count, %s Server Groups" % len(sgs))
+        return sgs
+
+    def delete(self):
+        client = self.client
+        sgs = self.list()
+        for sg in sgs:
+            try:
+                client.delete_server_group(sg['id'])
+            except Exception as e:
+                LOG.exception("Delete Server Group exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        sgs = self.list()
+        self.data['server_groups'] = sgs
+
+
+class StackService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(StackService, self).__init__(kwargs)
+        self.client = manager.orchestration_client
+
+    def list(self):
+        client = self.client
+        _, stacks = client.list_stacks()
+        LOG.debug("List count, %s Stacks" % len(stacks))
+        return stacks
+
+    def delete(self):
+        client = self.client
+        stacks = self.list()
+        for stack in stacks:
+            try:
+                client.delete_stack(stack['id'])
+            except Exception as e:
+                LOG.exception("Delete Stack exception: %s " % e)
+                pass
+
+    def dry_run(self):
+        stacks = self.list()
+        self.data['stacks'] = stacks
+
+
+class KeyPairService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(KeyPairService, self).__init__(kwargs)
+        self.client = manager.keypairs_client
+
+    def list(self):
+        client = self.client
+        _, keypairs = client.list_keypairs()
+        LOG.debug("List count, %s Keypairs" % len(keypairs))
+        return keypairs
+
+    def delete(self):
+        client = self.client
+        keypairs = self.list()
+        for k in keypairs:
+            try:
+                name = k['keypair']['name']
+                client.delete_keypair(name)
+            except Exception as e:
+                LOG.exception("Delete Keypairs exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        keypairs = self.list()
+        self.data['keypairs'] = keypairs
+
+
+class SecurityGroupService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(SecurityGroupService, self).__init__(kwargs)
+        self.client = manager.security_groups_client
+
+    def list(self):
+        client = self.client
+        _, secgrps = client.list_security_groups()
+        secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
+        LOG.debug("List count, %s Security Groups" % len(secgrp_del))
+        return secgrp_del
+
+    def delete(self):
+        client = self.client
+        secgrp_del = self.list()
+        for g in secgrp_del:
+            try:
+                client.delete_security_group(g['id'])
+            except Exception as e:
+                LOG.exception("Delete Security Groups exception: %s" % e)
+
+    def dry_run(self):
+        secgrp_del = self.list()
+        self.data['security_groups'] = secgrp_del
+
+
+class FloatingIpService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(FloatingIpService, self).__init__(kwargs)
+        self.client = manager.floating_ips_client
+
+    def list(self):
+        client = self.client
+        _, floating_ips = client.list_floating_ips()
+        LOG.debug("List count, %s Floating IPs" % len(floating_ips))
+        return floating_ips
+
+    def delete(self):
+        client = self.client
+        floating_ips = self.list()
+        for f in floating_ips:
+            try:
+                client.delete_floating_ip(f['id'])
+            except Exception as e:
+                LOG.exception("Delete Floating IPs exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        floating_ips = self.list()
+        self.data['floating_ips'] = floating_ips
+
+
+class VolumeService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(VolumeService, self).__init__(kwargs)
+        self.client = manager.volumes_client
+
+    def list(self):
+        client = self.client
+        _, vols = client.list_volumes()
+        LOG.debug("List count, %s Volumes" % len(vols))
+        return vols
+
+    def delete(self):
+        client = self.client
+        vols = self.list()
+        for v in vols:
+            try:
+                client.delete_volume(v['id'])
+            except Exception as e:
+                LOG.exception("Delete Volume exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        vols = self.list()
+        self.data['volumes'] = vols
+
+
+# Begin network service classes
+class NetworkService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(NetworkService, self).__init__(kwargs)
+        self.client = manager.network_client
+
+    def list(self):
+        client = self.client
+        _, networks = client.list_networks()
+        networks = self._filter_by_tenant_id(networks['networks'])
+        # filter out networks declared in tempest.conf
+        if self.is_preserve:
+            networks = [network for network in networks
+                        if (network['name'] != CONF_PRIV_NETWORK_NAME
+                            and network['id'] != CONF_PUB_NETWORK)]
+        LOG.debug("List count, %s Networks" % networks)
+        return networks
+
+    def delete(self):
+        client = self.client
+        networks = self.list()
+        for n in networks:
+            try:
+                client.delete_network(n['id'])
+            except Exception as e:
+                LOG.exception("Delete Network exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        networks = self.list()
+        self.data['networks'] = networks
+
+
+class NetworkIpSecPolicyService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, ipsecpols = client.list_ipsecpolicies()
+        ipsecpols = ipsecpols['ipsecpolicies']
+        ipsecpols = self._filter_by_tenant_id(ipsecpols)
+        LOG.debug("List count, %s IP Security Policies" % len(ipsecpols))
+        return ipsecpols
+
+    def delete(self):
+        client = self.client
+        ipsecpols = self.list()
+        for ipsecpol in ipsecpols:
+            try:
+                client.delete_ipsecpolicy(ipsecpol['id'])
+            except Exception as e:
+                LOG.exception("Delete IP Securty Policy exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        ipsecpols = self.list()
+        self.data['ip_security_policies'] = ipsecpols
+
+
+class NetworkFwPolicyService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, fwpols = client.list_firewall_policies()
+        fwpols = fwpols['firewall_policies']
+        fwpols = self._filter_by_tenant_id(fwpols)
+        LOG.debug("List count, %s Firewall Policies" % len(fwpols))
+        return fwpols
+
+    def delete(self):
+        client = self.client
+        fwpols = self.list()
+        for fwpol in fwpols:
+            try:
+                client.delete_firewall_policy(fwpol['id'])
+            except Exception as e:
+                LOG.exception("Delete Firewall Policy exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        fwpols = self.list()
+        self.data['firewall_policies'] = fwpols
+
+
+class NetworkFwRulesService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, fwrules = client.list_firewall_rules()
+        fwrules = fwrules['firewall_rules']
+        fwrules = self._filter_by_tenant_id(fwrules)
+        LOG.debug("List count, %s Firewall Rules" % len(fwrules))
+        return fwrules
+
+    def delete(self):
+        client = self.client
+        fwrules = self.list()
+        for fwrule in fwrules:
+            try:
+                client.delete_firewall_rule(fwrule['id'])
+            except Exception as e:
+                LOG.exception("Delete Firewall Rule exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        fwrules = self.list()
+        self.data['firewall_rules'] = fwrules
+
+
+class NetworkIkePolicyService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, ikepols = client.list_ikepolicies()
+        ikepols = ikepols['ikepolicies']
+        ikepols = self._filter_by_tenant_id(ikepols)
+        LOG.debug("List count, %s IKE Policies" % len(ikepols))
+        return ikepols
+
+    def delete(self):
+        client = self.client
+        ikepols = self.list()
+        for ikepol in ikepols:
+            try:
+                client.delete_firewall_rule(ikepol['id'])
+            except Exception as e:
+                LOG.exception("Delete IKE Policy exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        ikepols = self.list()
+        self.data['ike_policies'] = ikepols
+
+
+class NetworkVpnServiceService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, vpnsrvs = client.list_vpnservices()
+        vpnsrvs = vpnsrvs['vpnservices']
+        vpnsrvs = self._filter_by_tenant_id(vpnsrvs)
+        LOG.debug("List count, %s VPN Services" % len(vpnsrvs))
+        return vpnsrvs
+
+    def delete(self):
+        client = self.client
+        vpnsrvs = self.list()
+        for vpnsrv in vpnsrvs:
+            try:
+                client.delete_vpnservice(vpnsrv['id'])
+            except Exception as e:
+                LOG.exception("Delete VPN Service exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        vpnsrvs = self.list()
+        self.data['vpn_services'] = vpnsrvs
+
+
+class NetworkFloatingIpService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, flips = client.list_floatingips()
+        flips = flips['floatingips']
+        flips = self._filter_by_tenant_id(flips)
+        LOG.debug("List count, %s Network Floating IPs" % len(flips))
+        return flips
+
+    def delete(self):
+        client = self.client
+        flips = self.list()
+        for flip in flips:
+            try:
+                client.delete_floatingip(flip['id'])
+            except Exception as e:
+                LOG.exception("Delete Network Floating IP exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        flips = self.list()
+        self.data['floating_ips'] = flips
+
+
+class NetworkRouterService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, routers = client.list_routers()
+        routers = routers['routers']
+        routers = self._filter_by_tenant_id(routers)
+        if self.is_preserve:
+            routers = [router for router in routers
+                       if router['id'] != CONF_PUB_ROUTER]
+
+        LOG.debug("List count, %s Routers" % len(routers))
+        return routers
+
+    def delete(self):
+        client = self.client
+        routers = self.list()
+        for router in routers:
+            try:
+                rid = router['id']
+                _, ports = client.list_router_interfaces(rid)
+                ports = ports['ports']
+                for port in ports:
+                    subid = port['fixed_ips'][0]['subnet_id']
+                    client.remove_router_interface_with_subnet_id(rid, subid)
+                    client.delete_router(rid)
+            except Exception as e:
+                LOG.exception("Delete Router exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        routers = self.list()
+        self.data['routers'] = routers
+
+
+class NetworkHealthMonitorService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, hms = client.list_health_monitors()
+        hms = hms['health_monitors']
+        hms = self._filter_by_tenant_id(hms)
+        LOG.debug("List count, %s Health Monitors" % len(hms))
+        return hms
+
+    def delete(self):
+        client = self.client
+        hms = self.list()
+        for hm in hms:
+            try:
+                client.delete_health_monitor(hm['id'])
+            except Exception as e:
+                LOG.exception("Delete Health Monitor exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        hms = self.list()
+        self.data['health_monitors'] = hms
+
+
+class NetworkMemberService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, members = client.list_members()
+        members = members['members']
+        members = self._filter_by_tenant_id(members)
+        LOG.debug("List count, %s Members" % len(members))
+        return members
+
+    def delete(self):
+        client = self.client
+        members = self.list()
+        for member in members:
+            try:
+                client.delete_member(member['id'])
+            except Exception as e:
+                LOG.exception("Delete Member exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        members = self.list()
+        self.data['members'] = members
+
+
+class NetworkVipService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, vips = client.list_vips()
+        vips = vips['vips']
+        vips = self._filter_by_tenant_id(vips)
+        LOG.debug("List count, %s VIPs" % len(vips))
+        return vips
+
+    def delete(self):
+        client = self.client
+        vips = self.list()
+        for vip in vips:
+            try:
+                client.delete_vip(vip['id'])
+            except Exception as e:
+                LOG.exception("Delete VIP exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        vips = self.list()
+        self.data['vips'] = vips
+
+
+class NetworkPoolService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, pools = client.list_pools()
+        pools = pools['pools']
+        pools = self._filter_by_tenant_id(pools)
+        LOG.debug("List count, %s Pools" % len(pools))
+        return pools
+
+    def delete(self):
+        client = self.client
+        pools = self.list()
+        for pool in pools:
+            try:
+                client.delete_pool(pool['id'])
+            except Exception as e:
+                LOG.exception("Delete Pool exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        pools = self.list()
+        self.data['pools'] = pools
+
+
+class NetworMeteringLabelRuleService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, rules = client.list_metering_label_rules()
+        rules = rules['metering_label_rules']
+        rules = self._filter_by_tenant_id(rules)
+        LOG.debug("List count, %s Metering Label Rules" % len(rules))
+        return rules
+
+    def delete(self):
+        client = self.client
+        rules = self.list()
+        for rule in rules:
+            try:
+                client.delete_metering_label_rule(rule['id'])
+            except Exception as e:
+                LOG.exception("Delete Metering Label Rule exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        rules = self.list()
+        self.data['rules'] = rules
+
+
+class NetworMeteringLabelService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, labels = client.list_metering_labels()
+        labels = labels['metering_labels']
+        labels = self._filter_by_tenant_id(labels)
+        LOG.debug("List count, %s Metering Labels" % len(labels))
+        return labels
+
+    def delete(self):
+        client = self.client
+        labels = self.list()
+        for label in labels:
+            try:
+                client.delete_metering_label(label['id'])
+            except Exception as e:
+                LOG.exception("Delete Metering Label exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        labels = self.list()
+        self.data['labels'] = labels
+
+
+class NetworkPortService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, ports = client.list_ports()
+        ports = ports['ports']
+        ports = self._filter_by_tenant_id(ports)
+        LOG.debug("List count, %s Ports" % len(ports))
+        return ports
+
+    def delete(self):
+        client = self.client
+        ports = self.list()
+        for port in ports:
+            try:
+                client.delete_port(port['id'])
+            except Exception as e:
+                LOG.exception("Delete Port exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        ports = self.list()
+        self.data['ports'] = ports
+
+
+class NetworkSubnetService(NetworkService):
+
+    def list(self):
+        client = self.client
+        _, subnets = client.list_subnets()
+        subnets = subnets['subnets']
+        subnets = self._filter_by_tenant_id(subnets)
+        LOG.debug("List count, %s Subnets" % len(subnets))
+        return subnets
+
+    def delete(self):
+        client = self.client
+        subnets = self.list()
+        for subnet in subnets:
+            try:
+                client.delete_subnet(subnet['id'])
+            except Exception as e:
+                LOG.exception("Delete Subnet exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        subnets = self.list()
+        self.data['subnets'] = subnets
+
+
+# Telemetry services
+class TelemetryAlarmService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(TelemetryAlarmService, self).__init__(kwargs)
+        self.client = manager.telemetry_client
+
+    def list(self):
+        client = self.client
+        _, alarms = client.list_alarms()
+        LOG.debug("List count, %s Alarms" % len(alarms))
+        return alarms
+
+    def delete(self):
+        client = self.client
+        alarms = self.list()
+        for alarm in alarms:
+            try:
+                client.delete_alarm(alarm['id'])
+            except Exception as e:
+                LOG.exception("Delete Alarms exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        alarms = self.list()
+        self.data['alarms'] = alarms
+
+
+# begin global services
+class FlavorService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(FlavorService, self).__init__(kwargs)
+        self.client = manager.flavors_client
+
+    def list(self):
+        client = self.client
+        _, flavors = client.list_flavors({"is_public": None})
+        if not self.is_save_state:
+            # recreate list removing saved flavors
+            flavors = [flavor for flavor in flavors if flavor['id']
+                       not in self.saved_state_json['flavors'].keys()]
+
+        if self.is_preserve:
+            flavors = [flavor for flavor in flavors
+                       if flavor['id'] not in CONF_FLAVORS]
+        LOG.debug("List count, %s Flavors after reconcile" % len(flavors))
+        return flavors
+
+    def delete(self):
+        client = self.client
+        flavors = self.list()
+        for flavor in flavors:
+            try:
+                client.delete_flavor(flavor['id'])
+            except Exception as e:
+                LOG.exception("Delete Flavor exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        flavors = self.list()
+        self.data['flavors'] = flavors
+
+    def save_state(self):
+        flavors = self.list()
+        flavor_data = self.data['flavors'] = {}
+        for flavor in flavors:
+            flavor_data[flavor['id']] = flavor['name']
+
+
+class ImageService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(ImageService, self).__init__(kwargs)
+        self.client = manager.images_client
+
+    def list(self):
+        client = self.client
+        _, images = client.list_images({"all_tenants": True})
+        if not self.is_save_state:
+            images = [image for image in images if image['id']
+                      not in self.saved_state_json['images'].keys()]
+        if self.is_preserve:
+            images = [image for image in images
+                      if image['id'] not in CONF_IMAGES]
+        LOG.debug("List count, %s Images after reconcile" % len(images))
+        return images
+
+    def delete(self):
+        client = self.client
+        images = self.list()
+        for image in images:
+            try:
+                client.delete_image(image['id'])
+            except Exception as e:
+                LOG.exception("Delete Image exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        images = self.list()
+        self.data['images'] = images
+
+    def save_state(self):
+        images = self.list()
+        image_data = self.data['images'] = {}
+        for image in images:
+            image_data[image['id']] = image['name']
+
+
+class IdentityService(BaseService):
+    def __init__(self, manager, **kwargs):
+        super(IdentityService, self).__init__(kwargs)
+        self.client = manager.identity_client
+
+
+class UserService(IdentityService):
+
+    def list(self):
+        client = self.client
+        _, users = client.get_users()
+
+        if not self.is_save_state:
+            users = [user for user in users if user['id']
+                     not in self.saved_state_json['users'].keys()]
+
+        if self.is_preserve:
+            users = [user for user in users if user['name']
+                     not in CONF_USERS]
+
+        elif not self.is_save_state:  # Never delete admin user
+            users = [user for user in users if user['name'] !=
+                     CONF.identity.admin_username]
+
+        LOG.debug("List count, %s Users after reconcile" % len(users))
+        return users
+
+    def delete(self):
+        client = self.client
+        users = self.list()
+        for user in users:
+            try:
+                client.delete_user(user['id'])
+            except Exception as e:
+                LOG.exception("Delete User exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        users = self.list()
+        self.data['users'] = users
+
+    def save_state(self):
+        users = self.list()
+        user_data = self.data['users'] = {}
+        for user in users:
+            user_data[user['id']] = user['name']
+
+
+class RoleService(IdentityService):
+
+    def list(self):
+        client = self.client
+        try:
+            _, roles = client.list_roles()
+            # reconcile roles with saved state and never list admin role
+            if not self.is_save_state:
+                roles = [role for role in roles if
+                         (role['id'] not in
+                          self.saved_state_json['roles'].keys()
+                          and role['name'] != CONF.identity.admin_role)]
+                LOG.debug("List count, %s Roles after reconcile" % len(roles))
+            return roles
+        except Exception as ex:
+            LOG.exception("Cannot retrieve Roles, exception: %s" % ex)
+            return []
+
+    def delete(self):
+        client = self.client
+        roles = self.list()
+        for role in roles:
+            try:
+                client.delete_role(role['id'])
+            except Exception as e:
+                LOG.exception("Delete Role exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        roles = self.list()
+        self.data['roles'] = roles
+
+    def save_state(self):
+        roles = self.list()
+        role_data = self.data['roles'] = {}
+        for role in roles:
+            role_data[role['id']] = role['name']
+
+
+class TenantService(IdentityService):
+
+    def list(self):
+        client = self.client
+        _, tenants = client.list_tenants()
+        if not self.is_save_state:
+            tenants = [tenant for tenant in tenants if (tenant['id']
+                       not in self.saved_state_json['tenants'].keys()
+                       and tenant['name'] != CONF.identity.admin_tenant_name)]
+
+        if self.is_preserve:
+            tenants = [tenant for tenant in tenants if tenant['name']
+                       not in CONF_TENANTS]
+
+        LOG.debug("List count, %s Tenants after reconcile" % len(tenants))
+        return tenants
+
+    def delete(self):
+        client = self.client
+        tenants = self.list()
+        for tenant in tenants:
+            try:
+                client.delete_tenant(tenant['id'])
+            except Exception as e:
+                LOG.exception("Delete Tenant exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        tenants = self.list()
+        self.data['tenants'] = tenants
+
+    def save_state(self):
+        tenants = self.list()
+        tenant_data = self.data['tenants'] = {}
+        for tenant in tenants:
+            tenant_data[tenant['id']] = tenant['name']
+
+
+class DomainService(BaseService):
+
+    def __init__(self, manager, **kwargs):
+        super(DomainService, self).__init__(kwargs)
+        self.client = manager.identity_v3_client
+
+    def list(self):
+        client = self.client
+        _, domains = client.list_domains()
+        if not self.is_save_state:
+            domains = [domain for domain in domains if domain['id']
+                       not in self.saved_state_json['domains'].keys()]
+
+        LOG.debug("List count, %s Domains after reconcile" % len(domains))
+        return domains
+
+    def delete(self):
+        client = self.client
+        domains = self.list()
+        for domain in domains:
+            try:
+                client.update_domain(domain['id'], enabled=False)
+                client.delete_domain(domain['id'])
+            except Exception as e:
+                LOG.exception("Delete Domain exception: %s" % e)
+                pass
+
+    def dry_run(self):
+        domains = self.list()
+        self.data['domains'] = domains
+
+    def save_state(self):
+        domains = self.list()
+        domain_data = self.data['domains'] = {}
+        for domain in domains:
+            domain_data[domain['id']] = domain['name']
+
+
+def get_tenant_cleanup_services():
+    tenant_services = []
+
+    if IS_CEILOMETER:
+        tenant_services.append(TelemetryAlarmService)
+    if IS_NOVA:
+        tenant_services.append(ServerService)
+        tenant_services.append(KeyPairService)
+        tenant_services.append(SecurityGroupService)
+        tenant_services.append(ServerGroupService)
+        if not IS_NEUTRON:
+            tenant_services.append(FloatingIpService)
+    if IS_HEAT:
+        tenant_services.append(StackService)
+    if IS_NEUTRON:
+        if test.is_extension_enabled('vpnaas', 'network'):
+            tenant_services.append(NetworkIpSecPolicyService)
+            tenant_services.append(NetworkIkePolicyService)
+            tenant_services.append(NetworkVpnServiceService)
+        if test.is_extension_enabled('fwaas', 'network'):
+            tenant_services.append(NetworkFwPolicyService)
+            tenant_services.append(NetworkFwRulesService)
+        if test.is_extension_enabled('lbaas', 'network'):
+            tenant_services.append(NetworkHealthMonitorService)
+            tenant_services.append(NetworkMemberService)
+            tenant_services.append(NetworkVipService)
+            tenant_services.append(NetworkPoolService)
+        if test.is_extension_enabled('metering', 'network'):
+            tenant_services.append(NetworMeteringLabelRuleService)
+            tenant_services.append(NetworMeteringLabelService)
+        tenant_services.append(NetworkRouterService)
+        tenant_services.append(NetworkFloatingIpService)
+        tenant_services.append(NetworkPortService)
+        tenant_services.append(NetworkSubnetService)
+        tenant_services.append(NetworkService)
+    if IS_CINDER:
+        tenant_services.append(SnapshotService)
+        tenant_services.append(VolumeService)
+    return tenant_services
+
+
+def get_global_cleanup_services():
+    global_services = []
+    if IS_NOVA:
+        global_services.append(FlavorService)
+    if IS_GLANCE:
+        global_services.append(ImageService)
+    global_services.append(UserService)
+    global_services.append(TenantService)
+    global_services.append(DomainService)
+    global_services.append(RoleService)
+    return global_services
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 3f8db3d..3c41dd9 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -184,7 +184,9 @@
 def destroy_users(users):
     admin = keystone_admin()
     for user in users:
-        user_id = admin.identity.get_user_by_name(user['name'])['id']
+        tenant_id = admin.identity.get_tenant_by_name(user['tenant'])['id']
+        user_id = admin.identity.get_user_by_username(tenant_id,
+                                                      user['name'])['id']
         r, body = admin.identity.delete_user(user_id)
 
 
@@ -213,9 +215,7 @@
         self.check_users()
         self.check_objects()
         self.check_servers()
-        # TODO(sdague): Volumes not yet working, bring it back once the
-        # code is self testing.
-        # self.check_volumes()
+        self.check_volumes()
         self.check_telemetry()
 
     def check_users(self):
@@ -300,15 +300,15 @@
         LOG.info("checking volumes")
         for volume in self.res['volumes']:
             client = client_for_user(volume['owner'])
-            found = _get_volume_by_name(client, volume['name'])
+            vol_body = _get_volume_by_name(client, volume['name'])
             self.assertIsNotNone(
-                found,
+                vol_body,
                 "Couldn't find expected volume %s" % volume['name'])
 
             # Verify that a volume's attachment retrieved
             server_id = _get_server_by_name(client, volume['server'])['id']
-            attachment = self.client.get_attachment_from_volume(volume)
-            self.assertEqual(volume['id'], attachment['volume_id'])
+            attachment = client.volumes.get_attachment_from_volume(vol_body)
+            self.assertEqual(vol_body['id'], attachment['volume_id'])
             self.assertEqual(server_id, attachment['server_id'])
 
     def _confirm_telemetry_sample(self, server, sample):
@@ -501,8 +501,8 @@
 
 def _get_volume_by_name(client, name):
     r, body = client.volumes.list_volumes()
-    for volume in body['volumes']:
-        if name == volume['name']:
+    for volume in body:
+        if name == volume['display_name']:
             return volume
     return None
 
@@ -512,26 +512,32 @@
         client = client_for_user(volume['owner'])
 
         # only create a volume if the name isn't here
-        r, body = client.volumes.list_volumes()
-        if any(item['name'] == volume['name'] for item in body):
+        if _get_volume_by_name(client, volume['name']):
+            LOG.info("volume '%s' already exists" % volume['name'])
             continue
 
-        client.volumes.create_volume(volume['name'], volume['size'])
+        size = volume['gb']
+        v_name = volume['name']
+        resp, body = client.volumes.create_volume(size=size,
+                                                  display_name=v_name)
+        client.volumes.wait_for_volume_status(body['id'], 'available')
 
 
 def destroy_volumes(volumes):
     for volume in volumes:
         client = client_for_user(volume['owner'])
         volume_id = _get_volume_by_name(client, volume['name'])['id']
-        r, body = client.volumes.delete_volume(volume_id)
+        client.volumes.detach_volume(volume_id)
+        client.volumes.delete_volume(volume_id)
 
 
 def attach_volumes(volumes):
     for volume in volumes:
         client = client_for_user(volume['owner'])
-
         server_id = _get_server_by_name(client, volume['server'])['id']
-        client.volumes.attach_volume(volume['name'], server_id)
+        volume_id = _get_volume_by_name(client, volume['name'])['id']
+        device = volume['device']
+        client.volumes.attach_volume(volume_id, server_id, device)
 
 
 #######################
@@ -552,10 +558,8 @@
     create_objects(RES['objects'])
     create_images(RES['images'])
     create_servers(RES['servers'])
-    # TODO(sdague): volumes definition doesn't work yet, bring it
-    # back once we're actually executing the code
-    # create_volumes(RES['volumes'])
-    # attach_volumes(RES['volumes'])
+    create_volumes(RES['volumes'])
+    attach_volumes(RES['volumes'])
 
 
 def destroy_resources():
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
index 3450e1f..19ee6d5 100644
--- a/tempest/cmd/resources.yaml
+++ b/tempest/cmd/resources.yaml
@@ -36,11 +36,13 @@
   - name: assegai
     server: peltast
     owner: javelin
-    size: 1
+    gb: 1
+    device: /dev/vdb
   - name: pifpouf
     server: hoplite
     owner: javelin
-    size: 2
+    gb: 2
+    device: /dev/vdb
 servers:
   - name: peltast
     owner: javelin
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index cd696a9..5046bff 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -247,7 +247,7 @@
         'data_processing': 'sahara',
         'baremetal': 'ironic',
         'identity': 'keystone',
-        'queuing': 'zaqar',
+        'messaging': 'zaqar',
         'database': 'trove'
     }
     # Get catalog list for endpoints to use for validation
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
index 9808ed1..56d34a5 100644
--- a/tempest/common/cred_provider.py
+++ b/tempest/common/cred_provider.py
@@ -24,8 +24,8 @@
 
 @six.add_metaclass(abc.ABCMeta)
 class CredentialProvider(object):
-    def __init__(self, name, tempest_client=True, interface='json',
-                 password='pass', network_resources=None):
+    def __init__(self, name, interface='json', password='pass',
+                 network_resources=None):
         self.name = name
 
     @abc.abstractmethod
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 02c50e4..b2edfee 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -13,7 +13,6 @@
 #    under the License.
 
 import netaddr
-from neutronclient.common import exceptions as n_exc
 
 from tempest import auth
 from tempest import clients
@@ -29,15 +28,14 @@
 
 class IsolatedCreds(cred_provider.CredentialProvider):
 
-    def __init__(self, name, tempest_client=True, interface='json',
-                 password='pass', network_resources=None):
-        super(IsolatedCreds, self).__init__(name, tempest_client, interface,
-                                            password, network_resources)
+    def __init__(self, name, interface='json', password='pass',
+                 network_resources=None):
+        super(IsolatedCreds, self).__init__(name, interface, password,
+                                            network_resources)
         self.network_resources = network_resources
         self.isolated_creds = {}
         self.isolated_net_resources = {}
         self.ports = []
-        self.tempest_client = tempest_client
         self.interface = interface
         self.password = password
         self.identity_admin_client, self.network_admin_client = (
@@ -50,96 +48,50 @@
             identity
             network
         """
-        if self.tempest_client:
-            os = clients.AdminManager(interface=self.interface)
-        else:
-            os = clients.OfficialClientManager(
-                auth.get_default_credentials('identity_admin')
-            )
+        os = clients.AdminManager(interface=self.interface)
         return os.identity_client, os.network_client
 
     def _create_tenant(self, name, description):
-        if self.tempest_client:
-            _, tenant = self.identity_admin_client.create_tenant(
-                name=name, description=description)
-        else:
-            tenant = self.identity_admin_client.tenants.create(
-                name,
-                description=description)
+        _, tenant = self.identity_admin_client.create_tenant(
+            name=name, description=description)
         return tenant
 
     def _get_tenant_by_name(self, name):
-        if self.tempest_client:
-            _, tenant = self.identity_admin_client.get_tenant_by_name(name)
-        else:
-            tenants = self.identity_admin_client.tenants.list()
-            for ten in tenants:
-                if ten['name'] == name:
-                    tenant = ten
-                    break
-            else:
-                raise exceptions.NotFound('No such tenant')
+        _, tenant = self.identity_admin_client.get_tenant_by_name(name)
         return tenant
 
     def _create_user(self, username, password, tenant, email):
-        if self.tempest_client:
-            _, user = self.identity_admin_client.create_user(username,
-                                                             password,
-                                                             tenant['id'],
-                                                             email)
-        else:
-            user = self.identity_admin_client.users.create(username, password,
-                                                           email,
-                                                           tenant_id=tenant.id)
+        _, user = self.identity_admin_client.create_user(
+            username, password, tenant['id'], email)
         return user
 
     def _get_user(self, tenant, username):
-        if self.tempest_client:
-            _, user = self.identity_admin_client.get_user_by_username(
-                tenant['id'],
-                username)
-        else:
-            user = self.identity_admin_client.users.get(username)
+        _, user = self.identity_admin_client.get_user_by_username(
+            tenant['id'], username)
         return user
 
     def _list_roles(self):
-        if self.tempest_client:
-            _, roles = self.identity_admin_client.list_roles()
-        else:
-            roles = self.identity_admin_client.roles.list()
+        _, roles = self.identity_admin_client.list_roles()
         return roles
 
     def _assign_user_role(self, tenant, user, role_name):
         role = None
         try:
             roles = self._list_roles()
-            if self.tempest_client:
-                role = next(r for r in roles if r['name'] == role_name)
-            else:
-                role = next(r for r in roles if r.name == role_name)
+            role = next(r for r in roles if r['name'] == role_name)
         except StopIteration:
             msg = 'No "%s" role found' % role_name
             raise exceptions.NotFound(msg)
-        if self.tempest_client:
-            self.identity_admin_client.assign_user_role(tenant['id'],
-                                                        user['id'], role['id'])
-        else:
-            self.identity_admin_client.roles.add_user_role(user.id, role.id,
-                                                           tenant.id)
+        self.identity_admin_client.assign_user_role(tenant['id'], user['id'],
+                                                    role['id'])
 
     def _delete_user(self, user):
-        if self.tempest_client:
-            self.identity_admin_client.delete_user(user)
-        else:
-            self.identity_admin_client.users.delete(user)
+        self.identity_admin_client.delete_user(user)
 
     def _delete_tenant(self, tenant):
         if CONF.service_available.neutron:
             self._cleanup_default_secgroup(tenant)
-        if self.tempest_client:
-            self.identity_admin_client.delete_tenant(tenant)
-        else:
-            self.identity_admin_client.tenants.delete(tenant)
+        self.identity_admin_client.delete_tenant(tenant)
 
     def _create_creds(self, suffix="", admin=False):
         """Create random credentials under the following schema.
@@ -175,15 +127,9 @@
         return self._get_credentials(user, tenant)
 
     def _get_credentials(self, user, tenant):
-        if self.tempest_client:
-            user_get = user.get
-            tenant_get = tenant.get
-        else:
-            user_get = user.__dict__.get
-            tenant_get = tenant.__dict__.get
         return auth.get_credentials(
-            username=user_get('name'), user_id=user_get('id'),
-            tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
+            username=user['name'], user_id=user['id'],
+            tenant_name=tenant['name'], tenant_id=tenant['id'],
             password=self.password)
 
     def _create_network_resources(self, tenant_id):
@@ -228,45 +174,32 @@
         return network, subnet, router
 
     def _create_network(self, name, tenant_id):
-        if self.tempest_client:
-            resp, resp_body = self.network_admin_client.create_network(
-                name=name, tenant_id=tenant_id)
-        else:
-            body = {'network': {'tenant_id': tenant_id, 'name': name}}
-            resp_body = self.network_admin_client.create_network(body)
+        _, resp_body = self.network_admin_client.create_network(
+            name=name, tenant_id=tenant_id)
         return resp_body['network']
 
     def _create_subnet(self, subnet_name, tenant_id, network_id):
-        if not self.tempest_client:
-            body = {'subnet': {'name': subnet_name, 'tenant_id': tenant_id,
-                               'network_id': network_id, 'ip_version': 4}}
-            if self.network_resources:
-                body['enable_dhcp'] = self.network_resources['dhcp']
         base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
         mask_bits = CONF.network.tenant_network_mask_bits
         for subnet_cidr in base_cidr.subnet(mask_bits):
             try:
-                if self.tempest_client:
-                    if self.network_resources:
-                        resp, resp_body = self.network_admin_client.\
-                            create_subnet(
-                                network_id=network_id, cidr=str(subnet_cidr),
-                                name=subnet_name,
-                                tenant_id=tenant_id,
-                                enable_dhcp=self.network_resources['dhcp'],
-                                ip_version=4)
-                    else:
-                        resp, resp_body = self.network_admin_client.\
-                            create_subnet(network_id=network_id,
-                                          cidr=str(subnet_cidr),
-                                          name=subnet_name,
-                                          tenant_id=tenant_id,
-                                          ip_version=4)
+                if self.network_resources:
+                    _, resp_body = self.network_admin_client.\
+                        create_subnet(
+                            network_id=network_id, cidr=str(subnet_cidr),
+                            name=subnet_name,
+                            tenant_id=tenant_id,
+                            enable_dhcp=self.network_resources['dhcp'],
+                            ip_version=4)
                 else:
-                    body['subnet']['cidr'] = str(subnet_cidr)
-                    resp_body = self.network_admin_client.create_subnet(body)
+                    _, resp_body = self.network_admin_client.\
+                        create_subnet(network_id=network_id,
+                                      cidr=str(subnet_cidr),
+                                      name=subnet_name,
+                                      tenant_id=tenant_id,
+                                      ip_version=4)
                 break
-            except (n_exc.BadRequest, exceptions.BadRequest) as e:
+            except exceptions.BadRequest as e:
                 if 'overlaps with another subnet' not in str(e):
                     raise
         else:
@@ -278,25 +211,15 @@
     def _create_router(self, router_name, tenant_id):
         external_net_id = dict(
             network_id=CONF.network.public_network_id)
-        if self.tempest_client:
-            resp, resp_body = self.network_admin_client.create_router(
-                router_name,
-                external_gateway_info=external_net_id,
-                tenant_id=tenant_id)
-        else:
-            body = {'router': {'name': router_name, 'tenant_id': tenant_id,
-                               'external_gateway_info': external_net_id,
-                               'admin_state_up': True}}
-            resp_body = self.network_admin_client.create_router(body)
+        _, resp_body = self.network_admin_client.create_router(
+            router_name,
+            external_gateway_info=external_net_id,
+            tenant_id=tenant_id)
         return resp_body['router']
 
     def _add_router_interface(self, router_id, subnet_id):
-        if self.tempest_client:
-            self.network_admin_client.add_router_interface_with_subnet_id(
-                router_id, subnet_id)
-        else:
-            body = {'subnet_id': subnet_id}
-            self.network_admin_client.add_interface_router(router_id, body)
+        self.network_admin_client.add_router_interface_with_subnet_id(
+            router_id, subnet_id)
 
     def get_primary_network(self):
         return self.isolated_net_resources.get('primary')[0]
@@ -380,12 +303,8 @@
 
     def _cleanup_default_secgroup(self, tenant):
         net_client = self.network_admin_client
-        if self.tempest_client:
-            resp, resp_body = net_client.list_security_groups(tenant_id=tenant,
-                                                              name="default")
-        else:
-            resp_body = net_client.list_security_groups(tenant_id=tenant,
-                                                        name="default")
+        _, resp_body = net_client.list_security_groups(tenant_id=tenant,
+                                                       name="default")
         secgroups_to_delete = resp_body['security_groups']
         for secgroup in secgroups_to_delete:
             try:
@@ -404,12 +323,8 @@
             if (not self.network_resources or
                 self.network_resources.get('router')):
                 try:
-                    if self.tempest_client:
-                        net_client.remove_router_interface_with_subnet_id(
-                            router['id'], subnet['id'])
-                    else:
-                        body = {'subnet_id': subnet['id']}
-                        net_client.remove_interface_router(router['id'], body)
+                    net_client.remove_router_interface_with_subnet_id(
+                        router['id'], subnet['id'])
                 except exceptions.NotFound:
                     LOG.warn('router with name: %s not found for delete' %
                              router['name'])
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index e584cbf..00fe8d2 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -17,11 +17,11 @@
 import collections
 import json
 import re
-import string
 import time
 
 import jsonschema
 from lxml import etree
+import six
 
 from tempest.common import http
 from tempest.common.utils import misc as misc_utils
@@ -40,6 +40,19 @@
 HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
 
 
+# convert a structure into a string safely
+def safe_body(body, maxlen=2048):
+    try:
+        text = six.text_type(body)
+    except UnicodeDecodeError:
+        # if this isn't actually text, return marker that
+        return "<BinaryData: removed>"
+    if len(text) > maxlen:
+        return text[:maxlen]
+    else:
+        return text
+
+
 class RestClient(object):
 
     TYPE = "json"
@@ -258,6 +271,31 @@
             self.LOG.debug('Starting Request (%s): %s %s' %
                            (caller_name, method, req_url))
 
+    def _log_request_full(self, method, req_url, resp,
+                          secs="", req_headers=None,
+                          req_body=None, resp_body=None,
+                          caller_name=None, extra=None):
+        if 'X-Auth-Token' in req_headers:
+            req_headers['X-Auth-Token'] = '<omitted>'
+        log_fmt = """Request (%s): %s %s %s%s
+    Request - Headers: %s
+        Body: %s
+    Response - Headers: %s
+        Body: %s"""
+
+        self.LOG.debug(
+            log_fmt % (
+                caller_name,
+                resp['status'],
+                method,
+                req_url,
+                secs,
+                str(req_headers),
+                safe_body(req_body),
+                str(resp),
+                safe_body(resp_body)),
+            extra=extra)
+
     def _log_request(self, method, req_url, resp,
                      secs="", req_headers=None,
                      req_body=None, resp_body=None):
@@ -281,32 +319,10 @@
                 secs),
             extra=extra)
 
-        # We intentionally duplicate the info content because in a parallel
-        # world this is important to match
-        trace_regex = CONF.debug.trace_requests
-        if trace_regex and re.search(trace_regex, caller_name):
-            if 'X-Auth-Token' in req_headers:
-                req_headers['X-Auth-Token'] = '<omitted>'
-            log_fmt = """Request (%s): %s %s %s%s
-    Request - Headers: %s
-        Body: %s
-    Response - Headers: %s
-        Body: %s"""
-
-            self.LOG.debug(
-                log_fmt % (
-                    caller_name,
-                    resp['status'],
-                    method,
-                    req_url,
-                    secs,
-                    str(req_headers),
-                    filter(lambda x: x in string.printable,
-                           str(req_body)[:2048]),
-                    str(resp),
-                    filter(lambda x: x in string.printable,
-                           str(resp_body)[:2048])),
-                extra=extra)
+        # Also look everything at DEBUG if you want to filter this
+        # out, don't run at debug.
+        self._log_request_full(method, req_url, resp, secs, req_headers,
+                               req_body, resp_body, caller_name, extra)
 
     def _parse_resp(self, body):
         if self._get_type() is "json":
diff --git a/tempest/config.py b/tempest/config.py
index d3449a7..174a895 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -470,13 +470,13 @@
                 )
 ]
 
-queuing_group = cfg.OptGroup(name='queuing',
-                             title='Queuing Service')
+messaging_group = cfg.OptGroup(name='messaging',
+                               title='Messaging Service')
 
-QueuingGroup = [
+MessagingGroup = [
     cfg.StrOpt('catalog_type',
-               default='queuing',
-               help='Catalog type of the Queuing service.'),
+               default='messaging',
+               help='Catalog type of the Messaging service.'),
     cfg.IntOpt('max_queues_per_page',
                default=20,
                help='The maximum number of queue records per page when '
@@ -1034,7 +1034,7 @@
     register_opt_group(cfg.CONF, network_group, NetworkGroup)
     register_opt_group(cfg.CONF, network_feature_group,
                        NetworkFeaturesGroup)
-    register_opt_group(cfg.CONF, queuing_group, QueuingGroup)
+    register_opt_group(cfg.CONF, messaging_group, MessagingGroup)
     register_opt_group(cfg.CONF, volume_group, VolumeGroup)
     register_opt_group(cfg.CONF, volume_feature_group,
                        VolumeFeaturesGroup)
@@ -1091,7 +1091,7 @@
             'object-storage-feature-enabled']
         self.database = cfg.CONF.database
         self.orchestration = cfg.CONF.orchestration
-        self.queuing = cfg.CONF.queuing
+        self.messaging = cfg.CONF.messaging
         self.telemetry = cfg.CONF.telemetry
         self.dashboard = cfg.CONF.dashboard
         self.data_processing = cfg.CONF.data_processing
@@ -1141,8 +1141,10 @@
         # to remove an issue with the config file up to date checker.
         if parse_conf:
             config_files.append(path)
-
-        cfg.CONF([], project='tempest', default_config_files=config_files)
+        if os.path.isfile(path):
+            cfg.CONF([], project='tempest', default_config_files=config_files)
+        else:
+            cfg.CONF([], project='tempest')
         logging.setup('tempest')
         LOG = logging.getLogger('tempest')
         LOG.info("Using tempest config file %s" % path)
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index abc60cb..55cc89b 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -106,20 +106,6 @@
                             "T107: service tag should not be in path")
 
 
-def no_official_client_manager_in_api_tests(physical_line, filename):
-    """Check that the OfficialClientManager isn't used in the api tests
-
-    The api tests should not use the official clients.
-
-    T108: Can not use OfficialClientManager in the API tests
-    """
-    if 'tempest/api' in filename:
-        if 'OfficialClientManager' in physical_line:
-            return (physical_line.find('OfficialClientManager'),
-                    'T108: OfficialClientManager can not be used in the api '
-                    'tests')
-
-
 def no_mutable_default_args(logical_line):
     """Check that mutable object isn't used as default argument
 
@@ -136,5 +122,4 @@
     register(no_setupclass_for_unit_tests)
     register(no_vi_headers)
     register(service_tags_not_in_module_path)
-    register(no_official_client_manager_in_api_tests)
     register(no_mutable_default_args)
diff --git a/tempest/manager.py b/tempest/manager.py
index 75aee96..538b619 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -63,6 +63,5 @@
                 'Credentials must be specified')
         auth_provider_class = self.get_auth_provider_class(credentials)
         return auth_provider_class(
-            client_type=getattr(self, 'client_type', None),
             interface=getattr(self, 'interface', None),
             credentials=credentials)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 48eff84..79207cd 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -18,14 +18,9 @@
 import os
 import subprocess
 
-from cinderclient import exceptions as cinder_exceptions
-import glanceclient
 import netaddr
-from neutronclient.common import exceptions as exc
-from novaclient import exceptions as nova_exceptions
 import six
 
-from tempest.api.network import common as net_common
 from tempest import auth
 from tempest import clients
 from tempest.common import debug
@@ -51,23 +46,14 @@
 
 
 class ScenarioTest(tempest.test.BaseTestCase):
-    """Replaces the OfficialClientTest base class.
-
-    Uses tempest own clients as opposed to OfficialClients.
-
-    Common differences:
-    - replace resource.attribute with resource['attribute']
-    - replace resouce.delete with delete_callable(resource['id'])
-    - replace local waiters with common / rest_client waiters
-    """
+    """Base class for scenario tests. Uses tempest own clients. """
 
     @classmethod
-    def setUpClass(cls):
-        super(ScenarioTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ScenarioTest, cls).resource_setup()
         # Using tempest client for isolated credentials as well
         cls.isolated_creds = isolated_creds.IsolatedCreds(
-            cls.__name__, tempest_client=True,
-            network_resources=cls.network_resources)
+            cls.__name__, network_resources=cls.network_resources)
         cls.manager = clients.Manager(
             credentials=cls.credentials()
         )
@@ -457,10 +443,21 @@
         if wait:
             self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
+    def ping_ip_address(self, ip_address, should_succeed=True):
+        cmd = ['ping', '-c1', '-w1', ip_address]
 
-# TODO(yfried): change this class name to NetworkScenarioTest once client
-# migration is complete
-class NeutronScenarioTest(ScenarioTest):
+        def ping():
+            proc = subprocess.Popen(cmd,
+                                    stdout=subprocess.PIPE,
+                                    stderr=subprocess.PIPE)
+            proc.communicate()
+            return (proc.returncode == 0) == should_succeed
+
+        return tempest.test.call_until_true(
+            ping, CONF.compute.ping_timeout, 1)
+
+
+class NetworkScenarioTest(ScenarioTest):
     """Base class for network scenario tests.
     This class provide helpers for network scenario tests, using the neutron
     API. Helpers from ancestor which use the nova network API are overridden
@@ -473,22 +470,12 @@
 
     @classmethod
     def check_preconditions(cls):
-        if CONF.service_available.neutron:
-            cls.enabled = True
-            # verify that neutron_available is telling the truth
-            try:
-                cls.network_client.list_networks()
-            except exc.EndpointNotFound:
-                cls.enabled = False
-                raise
-        else:
-            cls.enabled = False
-            msg = 'Neutron not available'
-            raise cls.skipException(msg)
+        if not CONF.service_available.neutron:
+            raise cls.skipException('Neutron not available')
 
     @classmethod
-    def setUpClass(cls):
-        super(NeutronScenarioTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(NetworkScenarioTest, cls).resource_setup()
         cls.tenant_id = cls.manager.identity_client.tenant_id
         cls.check_preconditions()
 
@@ -602,7 +589,7 @@
 
     def _get_network_by_name(self, network_name):
         net = self._list_networks(name=network_name)
-        return net_common.AttributeDict(net[0])
+        return net_resources.AttributeDict(net[0])
 
     def _create_floating_ip(self, thing, external_network_id, port_id=None,
                             client=None):
@@ -635,19 +622,6 @@
         self.assertIsNone(floating_ip.port_id)
         return floating_ip
 
-    def _ping_ip_address(self, ip_address, should_succeed=True):
-        cmd = ['ping', '-c1', '-w1', ip_address]
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.communicate()
-            return (proc.returncode == 0) == should_succeed
-
-        return tempest.test.call_until_true(
-            ping, CONF.compute.ping_timeout, 1)
-
     def _check_vm_connectivity(self, ip_address,
                                username=None,
                                private_key=None,
@@ -667,8 +641,8 @@
             msg = "Timed out waiting for %s to become reachable" % ip_address
         else:
             msg = "ip address %s is reachable" % ip_address
-        self.assertTrue(self._ping_ip_address(ip_address,
-                                              should_succeed=should_connect),
+        self.assertTrue(self.ping_ip_address(ip_address,
+                                             should_succeed=should_connect),
                         msg=msg)
         if should_connect:
             # no need to check ssh for negative connectivity
@@ -809,9 +783,6 @@
         ]
         msg = "No default security group for tenant %s." % (tenant_id)
         self.assertTrue(len(sgs) > 0, msg)
-        if len(sgs) > 1:
-            msg = "Found %d default security groups" % len(sgs)
-            raise exc.NeutronClientNoUniqueMatch(msg=msg)
         return net_resources.DeletableSecurityGroup(client=client,
                                                     **sgs[0])
 
@@ -1005,511 +976,6 @@
         return network, subnet, router
 
 
-class OfficialClientTest(tempest.test.BaseTestCase):
-    """
-    Official Client test base class for scenario testing.
-
-    Official Client tests are tests that have the following characteristics:
-
-     * Test basic operations of an API, typically in an order that
-       a regular user would perform those operations
-     * Test only the correct inputs and action paths -- no fuzz or
-       random input data is sent, only valid inputs.
-     * Use only the default client tool for calling an API
-    """
-
-    @classmethod
-    def setUpClass(cls):
-        super(OfficialClientTest, cls).setUpClass()
-        cls.isolated_creds = isolated_creds.IsolatedCreds(
-            cls.__name__, tempest_client=False,
-            network_resources=cls.network_resources)
-
-        cls.manager = clients.OfficialClientManager(
-            credentials=cls.credentials())
-        cls.compute_client = cls.manager.compute_client
-        cls.image_client = cls.manager.image_client
-        cls.baremetal_client = cls.manager.baremetal_client
-        cls.identity_client = cls.manager.identity_client
-        cls.network_client = cls.manager.network_client
-        cls.volume_client = cls.manager.volume_client
-        cls.object_storage_client = cls.manager.object_storage_client
-        cls.orchestration_client = cls.manager.orchestration_client
-        cls.data_processing_client = cls.manager.data_processing_client
-        cls.ceilometer_client = cls.manager.ceilometer_client
-
-    @classmethod
-    def _get_credentials(cls, get_creds, ctype):
-        if CONF.compute.allow_tenant_isolation:
-            creds = get_creds()
-        else:
-            creds = auth.get_default_credentials(ctype)
-        return creds
-
-    @classmethod
-    def credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_primary_creds,
-                                    'user')
-
-    @classmethod
-    def alt_credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_alt_creds,
-                                    'alt_user')
-
-    @classmethod
-    def admin_credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_admin_creds,
-                                    'identity_admin')
-
-    def setUp(self):
-        super(OfficialClientTest, self).setUp()
-        self.cleanup_waits = []
-        # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
-        # because scenario tests in the same test class should not share
-        # resources. If resources were shared between test cases then it
-        # should be a single scenario test instead of multiples.
-
-        # NOTE(yfried): this list is cleaned at the end of test_methods and
-        # not at the end of the class
-        self.addCleanup(self._wait_for_cleanups)
-
-    @staticmethod
-    def not_found_exception(exception):
-        """
-        @return: True if exception is of NotFound type
-        """
-        NOT_FOUND_LIST = ['NotFound', 'HTTPNotFound']
-        return (exception.__class__.__name__ in NOT_FOUND_LIST
-                or
-                hasattr(exception, 'status_code') and
-                exception.status_code == 404)
-
-    def delete_wrapper(self, thing):
-        """Ignores NotFound exceptions for delete operations.
-
-        @param thing: object with delete() method.
-            OpenStack resources are assumed to have a delete() method which
-            destroys the resource
-        """
-
-        try:
-            thing.delete()
-        except Exception as e:
-            # If the resource is already missing, mission accomplished.
-            if not self.not_found_exception(e):
-                raise
-
-    def _wait_for_cleanups(self):
-        """To handle async delete actions, a list of waits is added
-        which will be iterated over as the last step of clearing the
-        cleanup queue. That way all the delete calls are made up front
-        and the tests won't succeed unless the deletes are eventually
-        successful. This is the same basic approach used in the api tests to
-        limit cleanup execution time except here it is multi-resource,
-        because of the nature of the scenario tests.
-        """
-        for wait in self.cleanup_waits:
-            self.delete_timeout(**wait)
-
-    def addCleanup_with_wait(self, things, thing_id,
-                             error_status='ERROR',
-                             exc_type=nova_exceptions.NotFound,
-                             cleanup_callable=None, cleanup_args=None,
-                             cleanup_kwargs=None):
-        """Adds wait for ansyc resource deletion at the end of cleanups
-
-        @param things: type of the resource to delete
-        @param thing_id:
-        @param error_status: see manager.delete_timeout()
-        @param exc_type: see manager.delete_timeout()
-        @param cleanup_callable: method to load pass to self.addCleanup with
-            the following *cleanup_args, **cleanup_kwargs.
-            usually a delete method. if not used, will try to use:
-            things.delete(thing_id)
-        """
-        if cleanup_args is None:
-            cleanup_args = []
-        if cleanup_kwargs is None:
-            cleanup_kwargs = {}
-        if cleanup_callable is None:
-            LOG.debug("no delete method passed. using {rclass}.delete({id}) as"
-                      " default".format(rclass=things, id=thing_id))
-            self.addCleanup(things.delete, thing_id)
-        else:
-            self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
-        wait_dict = {
-            'things': things,
-            'thing_id': thing_id,
-            'error_status': error_status,
-            'not_found_exception': exc_type,
-        }
-        self.cleanup_waits.append(wait_dict)
-
-    def status_timeout(self, things, thing_id, expected_status,
-                       error_status='ERROR',
-                       not_found_exception=nova_exceptions.NotFound):
-        """
-        Given a thing and an expected status, do a loop, sleeping
-        for a configurable amount of time, checking for the
-        expected status to show. At any time, if the returned
-        status of the thing is ERROR, fail out.
-        """
-        self._status_timeout(things, thing_id,
-                             expected_status=expected_status,
-                             error_status=error_status,
-                             not_found_exception=not_found_exception)
-
-    def delete_timeout(self, things, thing_id,
-                       error_status='ERROR',
-                       not_found_exception=nova_exceptions.NotFound):
-        """
-        Given a thing, do a loop, sleeping
-        for a configurable amount of time, checking for the
-        deleted status to show. At any time, if the returned
-        status of the thing is ERROR, fail out.
-        """
-        self._status_timeout(things,
-                             thing_id,
-                             allow_notfound=True,
-                             error_status=error_status,
-                             not_found_exception=not_found_exception)
-
-    def _status_timeout(self,
-                        things,
-                        thing_id,
-                        expected_status=None,
-                        allow_notfound=False,
-                        error_status='ERROR',
-                        not_found_exception=nova_exceptions.NotFound):
-
-        log_status = expected_status if expected_status else ''
-        if allow_notfound:
-            log_status += ' or NotFound' if log_status != '' else 'NotFound'
-
-        def check_status():
-            # python-novaclient has resources available to its client
-            # that all implement a get() method taking an identifier
-            # for the singular resource to retrieve.
-            try:
-                thing = things.get(thing_id)
-            except not_found_exception:
-                if allow_notfound:
-                    return True
-                raise
-            except Exception as e:
-                if allow_notfound and self.not_found_exception(e):
-                    return True
-                raise
-
-            new_status = thing.status
-
-            # Some components are reporting error status in lower case
-            # so case sensitive comparisons can really mess things
-            # up.
-            if new_status.lower() == error_status.lower():
-                message = ("%s failed to get to expected status (%s). "
-                           "In %s state.") % (thing, expected_status,
-                                              new_status)
-                raise exceptions.BuildErrorException(message,
-                                                     server_id=thing_id)
-            elif new_status == expected_status and expected_status is not None:
-                return True  # All good.
-            LOG.debug("Waiting for %s to get to %s status. "
-                      "Currently in %s status",
-                      thing, log_status, new_status)
-        if not tempest.test.call_until_true(
-            check_status,
-            CONF.compute.build_timeout,
-            CONF.compute.build_interval):
-            message = ("Timed out waiting for thing %s "
-                       "to become %s") % (thing_id, log_status)
-            raise exceptions.TimeoutException(message)
-
-    def _create_loginable_secgroup_rule_nova(self, client=None,
-                                             secgroup_id=None):
-        if client is None:
-            client = self.compute_client
-        if secgroup_id is None:
-            sgs = client.security_groups.list()
-            for sg in sgs:
-                if sg.name == 'default':
-                    secgroup_id = sg.id
-
-        # These rules are intended to permit inbound ssh and icmp
-        # traffic from all sources, so no group_id is provided.
-        # Setting a group_id would only permit traffic from ports
-        # belonging to the same security group.
-        rulesets = [
-            {
-                # ssh
-                'ip_protocol': 'tcp',
-                'from_port': 22,
-                'to_port': 22,
-                'cidr': '0.0.0.0/0',
-            },
-            {
-                # ssh -6
-                'ip_protocol': 'tcp',
-                'from_port': 22,
-                'to_port': 22,
-                'cidr': '::/0',
-            },
-            {
-                # ping
-                'ip_protocol': 'icmp',
-                'from_port': -1,
-                'to_port': -1,
-                'cidr': '0.0.0.0/0',
-            },
-            {
-                # ping6
-                'ip_protocol': 'icmp',
-                'from_port': -1,
-                'to_port': -1,
-                'cidr': '::/0',
-            }
-        ]
-        rules = list()
-        for ruleset in rulesets:
-            sg_rule = client.security_group_rules.create(secgroup_id,
-                                                         **ruleset)
-            self.addCleanup(self.delete_wrapper, sg_rule)
-            rules.append(sg_rule)
-        return rules
-
-    def _create_security_group_nova(self, client=None,
-                                    namestart='secgroup-smoke-'):
-        if client is None:
-            client = self.compute_client
-        # Create security group
-        sg_name = data_utils.rand_name(namestart)
-        sg_desc = sg_name + " description"
-        secgroup = client.security_groups.create(sg_name, sg_desc)
-        self.assertEqual(secgroup.name, sg_name)
-        self.assertEqual(secgroup.description, sg_desc)
-        self.addCleanup(self.delete_wrapper, secgroup)
-
-        # Add rules to the security group
-        self._create_loginable_secgroup_rule_nova(client, secgroup.id)
-
-        return secgroup
-
-    def rebuild_server(self, server, client=None, image=None,
-                       preserve_ephemeral=False, wait=True,
-                       rebuild_kwargs=None):
-        if client is None:
-            client = self.compute_client
-        if image is None:
-            image = CONF.compute.image_ref
-        rebuild_kwargs = rebuild_kwargs or {}
-
-        LOG.debug("Rebuilding server (name: %s, image: %s, preserve eph: %s)",
-                  server.name, image, preserve_ephemeral)
-        server.rebuild(image, preserve_ephemeral=preserve_ephemeral,
-                       **rebuild_kwargs)
-        if wait:
-            self.status_timeout(client.servers, server.id, 'ACTIVE')
-
-    def create_server(self, client=None, name=None, image=None, flavor=None,
-                      wait_on_boot=True, wait_on_delete=True,
-                      create_kwargs=None):
-        """Creates VM instance.
-
-        @param client: compute client to create the instance
-        @param image: image from which to create the instance
-        @param wait_on_boot: wait for status ACTIVE before continue
-        @param wait_on_delete: force synchronous delete on cleanup
-        @param create_kwargs: additional details for instance creation
-        @return: client.server object
-        """
-        if client is None:
-            client = self.compute_client
-        if name is None:
-            name = data_utils.rand_name('scenario-server-')
-        if image is None:
-            image = CONF.compute.image_ref
-        if flavor is None:
-            flavor = CONF.compute.flavor_ref
-        if create_kwargs is None:
-            create_kwargs = {}
-
-        fixed_network_name = CONF.compute.fixed_network_name
-        if 'nics' not in create_kwargs and fixed_network_name:
-            networks = client.networks.list()
-            # If several networks found, set the NetID on which to connect the
-            # server to avoid the following error "Multiple possible networks
-            # found, use a Network ID to be more specific."
-            # See Tempest #1250866
-            if len(networks) > 1:
-                for network in networks:
-                    if network.label == fixed_network_name:
-                        create_kwargs['nics'] = [{'net-id': network.id}]
-                        break
-                # If we didn't find the network we were looking for :
-                else:
-                    msg = ("The network on which the NIC of the server must "
-                           "be connected can not be found : "
-                           "fixed_network_name=%s. Starting instance without "
-                           "specifying a network.") % fixed_network_name
-                    LOG.info(msg)
-
-        LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
-                  name, image, flavor)
-        server = client.servers.create(name, image, flavor, **create_kwargs)
-        self.assertEqual(server.name, name)
-        if wait_on_delete:
-            self.addCleanup(self.delete_timeout,
-                            self.compute_client.servers,
-                            server.id)
-        self.addCleanup_with_wait(self.compute_client.servers, server.id,
-                                  cleanup_callable=self.delete_wrapper,
-                                  cleanup_args=[server])
-        if wait_on_boot:
-            self.status_timeout(client.servers, server.id, 'ACTIVE')
-        # The instance retrieved on creation is missing network
-        # details, necessitating retrieval after it becomes active to
-        # ensure correct details.
-        server = client.servers.get(server.id)
-        LOG.debug("Created server: %s", server)
-        return server
-
-    def create_volume(self, client=None, size=1, name=None,
-                      snapshot_id=None, imageRef=None, volume_type=None,
-                      wait_on_delete=True):
-        if client is None:
-            client = self.volume_client
-        if name is None:
-            name = data_utils.rand_name('scenario-volume-')
-        LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
-        volume = client.volumes.create(size=size, display_name=name,
-                                       snapshot_id=snapshot_id,
-                                       imageRef=imageRef,
-                                       volume_type=volume_type)
-        if wait_on_delete:
-            self.addCleanup(self.delete_timeout,
-                            self.volume_client.volumes,
-                            volume.id)
-        self.addCleanup_with_wait(self.volume_client.volumes, volume.id,
-                                  exc_type=cinder_exceptions.NotFound)
-        self.assertEqual(name, volume.display_name)
-        self.status_timeout(client.volumes, volume.id, 'available')
-        LOG.debug("Created volume: %s", volume)
-        return volume
-
-    def create_server_snapshot(self, server, compute_client=None,
-                               image_client=None, name=None):
-        if compute_client is None:
-            compute_client = self.compute_client
-        if image_client is None:
-            image_client = self.image_client
-        if name is None:
-            name = data_utils.rand_name('scenario-snapshot-')
-        LOG.debug("Creating a snapshot image for server: %s", server.name)
-        image_id = compute_client.servers.create_image(server, name)
-        self.addCleanup_with_wait(self.image_client.images, image_id,
-                                  exc_type=glanceclient.exc.HTTPNotFound)
-        self.status_timeout(image_client.images, image_id, 'active')
-        snapshot_image = image_client.images.get(image_id)
-        self.assertEqual(name, snapshot_image.name)
-        LOG.debug("Created snapshot image %s for server %s",
-                  snapshot_image.name, server.name)
-        return snapshot_image
-
-    def create_keypair(self, client=None, name=None):
-        if client is None:
-            client = self.compute_client
-        if name is None:
-            name = data_utils.rand_name('scenario-keypair-')
-        keypair = client.keypairs.create(name)
-        self.assertEqual(keypair.name, name)
-        self.addCleanup(self.delete_wrapper, keypair)
-        return keypair
-
-    def get_remote_client(self, server_or_ip, username=None, private_key=None):
-        if isinstance(server_or_ip, six.string_types):
-            ip = server_or_ip
-        else:
-            network_name_for_ssh = CONF.compute.network_for_ssh
-            ip = server_or_ip.networks[network_name_for_ssh][0]
-        if username is None:
-            username = CONF.scenario.ssh_user
-        if private_key is None:
-            private_key = self.keypair.private_key
-        linux_client = remote_client.RemoteClient(ip, username,
-                                                  pkey=private_key)
-        try:
-            linux_client.validate_authentication()
-        except exceptions.SSHTimeout:
-            LOG.exception('ssh connection to %s failed' % ip)
-            debug.log_net_debug()
-            raise
-
-        return linux_client
-
-    def _log_console_output(self, servers=None):
-        if not CONF.compute_feature_enabled.console_output:
-            LOG.debug('Console output not supported, cannot log')
-            return
-        if not servers:
-            servers = self.compute_client.servers.list()
-        for server in servers:
-            LOG.debug('Console output for %s', server.id)
-            LOG.debug(server.get_console_output())
-
-    def wait_for_volume_status(self, status):
-        volume_id = self.volume.id
-        self.status_timeout(
-            self.volume_client.volumes, volume_id, status)
-
-    def _image_create(self, name, fmt, path, properties=None):
-        if properties is None:
-            properties = {}
-        name = data_utils.rand_name('%s-' % name)
-        image_file = open(path, 'rb')
-        self.addCleanup(image_file.close)
-        params = {
-            'name': name,
-            'container_format': fmt,
-            'disk_format': fmt,
-            'is_public': 'False',
-        }
-        params.update(properties)
-        image = self.image_client.images.create(**params)
-        self.addCleanup(self.image_client.images.delete, image)
-        self.assertEqual("queued", image.status)
-        image.update(data=image_file)
-        return image.id
-
-    def glance_image_create(self):
-        img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
-        aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
-        ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
-        ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
-        img_container_format = CONF.scenario.img_container_format
-        img_disk_format = CONF.scenario.img_disk_format
-        LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
-                  "ami: %s, ari: %s, aki: %s" %
-                  (img_path, img_container_format, img_disk_format,
-                   ami_img_path, ari_img_path, aki_img_path))
-        try:
-            self.image = self._image_create('scenario-img',
-                                            img_container_format,
-                                            img_path,
-                                            properties={'disk_format':
-                                                        img_disk_format})
-        except IOError:
-            LOG.debug("A qcow2 image was not found. Try to get a uec image.")
-            kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
-            ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
-            properties = {
-                'properties': {'kernel_id': kernel, 'ramdisk_id': ramdisk}
-            }
-            self.image = self._image_create('scenario-ami', 'ami',
-                                            path=ami_img_path,
-                                            properties=properties)
-        LOG.debug("image:%s" % self.image)
-
-
 # power/provision states as of icehouse
 class BaremetalPowerStates(object):
     """Possible power states of an Ironic node."""
@@ -1536,8 +1002,8 @@
 
 class BaremetalScenarioTest(ScenarioTest):
     @classmethod
-    def setUpClass(cls):
-        super(BaremetalScenarioTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaremetalScenarioTest, cls).resource_setup()
 
         if (not CONF.service_available.ironic or
            not CONF.baremetal.driver_enabled):
@@ -1582,13 +1048,12 @@
 
     def wait_node(self, instance_id):
         """Waits for a node to be associated with instance_id."""
-        from ironicclient import exc as ironic_exceptions
 
         def _get_node():
             node = None
             try:
                 node = self.get_node(instance_id=instance_id)
-            except ironic_exceptions.HTTPNotFound:
+            except exceptions.NotFound:
                 pass
             return node is not None
 
@@ -1669,8 +1134,8 @@
     """
 
     @classmethod
-    def setUpClass(cls):
-        super(EncryptionScenarioTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(EncryptionScenarioTest, cls).resource_setup()
         cls.admin_volume_types_client = cls.admin_manager.volume_types_client
 
     def _wait_for_volume_status(self, status):
@@ -1710,578 +1175,14 @@
             control_location=control_location)
 
 
-class NetworkScenarioTest(OfficialClientTest):
-    """
-    Base class for network scenario tests
-    """
-
-    @classmethod
-    def check_preconditions(cls):
-        if (CONF.service_available.neutron):
-            cls.enabled = True
-            # verify that neutron_available is telling the truth
-            try:
-                cls.network_client.list_networks()
-            except exc.EndpointNotFound:
-                cls.enabled = False
-                raise
-        else:
-            cls.enabled = False
-            msg = 'Neutron not available'
-            raise cls.skipException(msg)
-
-    @classmethod
-    def setUpClass(cls):
-        super(NetworkScenarioTest, cls).setUpClass()
-        cls.tenant_id = cls.manager.identity_client.tenant_id
-
-    def _create_network(self, tenant_id, namestart='network-smoke-'):
-        name = data_utils.rand_name(namestart)
-        body = dict(
-            network=dict(
-                name=name,
-                tenant_id=tenant_id,
-            ),
-        )
-        result = self.network_client.create_network(body=body)
-        network = net_common.DeletableNetwork(client=self.network_client,
-                                              **result['network'])
-        self.assertEqual(network.name, name)
-        self.addCleanup(self.delete_wrapper, network)
-        return network
-
-    def _list_networks(self, **kwargs):
-        nets = self.network_client.list_networks(**kwargs)
-        return nets['networks']
-
-    def _list_subnets(self, **kwargs):
-        subnets = self.network_client.list_subnets(**kwargs)
-        return subnets['subnets']
-
-    def _list_routers(self, **kwargs):
-        routers = self.network_client.list_routers(**kwargs)
-        return routers['routers']
-
-    def _list_ports(self, **kwargs):
-        ports = self.network_client.list_ports(**kwargs)
-        return ports['ports']
-
-    def _get_tenant_own_network_num(self, tenant_id):
-        nets = self._list_networks(tenant_id=tenant_id)
-        return len(nets)
-
-    def _get_tenant_own_subnet_num(self, tenant_id):
-        subnets = self._list_subnets(tenant_id=tenant_id)
-        return len(subnets)
-
-    def _get_tenant_own_port_num(self, tenant_id):
-        ports = self._list_ports(tenant_id=tenant_id)
-        return len(ports)
-
-    def _create_subnet(self, network, namestart='subnet-smoke-', **kwargs):
-        """
-        Create a subnet for the given network within the cidr block
-        configured for tenant networks.
-        """
-
-        def cidr_in_use(cidr, tenant_id):
-            """
-            :return True if subnet with cidr already exist in tenant
-                False else
-            """
-            cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
-            return len(cidr_in_use) != 0
-
-        tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-        result = None
-        # Repeatedly attempt subnet creation with sequential cidr
-        # blocks until an unallocated block is found.
-        for subnet_cidr in tenant_cidr.subnet(
-            CONF.network.tenant_network_mask_bits):
-            str_cidr = str(subnet_cidr)
-            if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
-                continue
-
-            body = dict(
-                subnet=dict(
-                    name=data_utils.rand_name(namestart),
-                    ip_version=4,
-                    network_id=network.id,
-                    tenant_id=network.tenant_id,
-                    cidr=str_cidr,
-                ),
-            )
-            body['subnet'].update(kwargs)
-            try:
-                result = self.network_client.create_subnet(body=body)
-                break
-            except exc.NeutronClientException as e:
-                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
-                if not is_overlapping_cidr:
-                    raise
-        self.assertIsNotNone(result, 'Unable to allocate tenant network')
-        subnet = net_common.DeletableSubnet(client=self.network_client,
-                                            **result['subnet'])
-        self.assertEqual(subnet.cidr, str_cidr)
-        self.addCleanup(self.delete_wrapper, subnet)
-        return subnet
-
-    def _create_port(self, network, namestart='port-quotatest-'):
-        name = data_utils.rand_name(namestart)
-        body = dict(
-            port=dict(name=name,
-                      network_id=network.id,
-                      tenant_id=network.tenant_id))
-        result = self.network_client.create_port(body=body)
-        self.assertIsNotNone(result, 'Unable to allocate port')
-        port = net_common.DeletablePort(client=self.network_client,
-                                        **result['port'])
-        self.addCleanup(self.delete_wrapper, port)
-        return port
-
-    def _get_server_port_id(self, server, ip_addr=None):
-        ports = self._list_ports(device_id=server.id, fixed_ip=ip_addr)
-        self.assertEqual(len(ports), 1,
-                         "Unable to determine which port to target.")
-        return ports[0]['id']
-
-    def _get_network_by_name(self, network_name):
-        net = self._list_networks(name=network_name)
-        return net_common.AttributeDict(net[0])
-
-    def _create_floating_ip(self, thing, external_network_id, port_id=None):
-        if not port_id:
-            port_id = self._get_server_port_id(thing)
-        body = dict(
-            floatingip=dict(
-                floating_network_id=external_network_id,
-                port_id=port_id,
-                tenant_id=thing.tenant_id,
-            )
-        )
-        result = self.network_client.create_floatingip(body=body)
-        floating_ip = net_common.DeletableFloatingIp(
-            client=self.network_client,
-            **result['floatingip'])
-        self.addCleanup(self.delete_wrapper, floating_ip)
-        return floating_ip
-
-    def _associate_floating_ip(self, floating_ip, server):
-        port_id = self._get_server_port_id(server)
-        floating_ip.update(port_id=port_id)
-        self.assertEqual(port_id, floating_ip.port_id)
-        return floating_ip
-
-    def _disassociate_floating_ip(self, floating_ip):
-        """
-        :param floating_ip: type DeletableFloatingIp
-        """
-        floating_ip.update(port_id=None)
-        self.assertIsNone(floating_ip.port_id)
-        return floating_ip
-
-    def _ping_ip_address(self, ip_address, should_succeed=True):
-        cmd = ['ping', '-c1', '-w1', ip_address]
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.communicate()
-            return (proc.returncode == 0) == should_succeed
-
-        return tempest.test.call_until_true(
-            ping, CONF.compute.ping_timeout, 1)
-
-    def _create_pool(self, lb_method, protocol, subnet_id):
-        """Wrapper utility that returns a test pool."""
-        name = data_utils.rand_name('pool-')
-        body = {
-            "pool": {
-                "protocol": protocol,
-                "name": name,
-                "subnet_id": subnet_id,
-                "lb_method": lb_method
-            }
-        }
-        resp = self.network_client.create_pool(body=body)
-        pool = net_common.DeletablePool(client=self.network_client,
-                                        **resp['pool'])
-        self.assertEqual(pool['name'], name)
-        self.addCleanup(self.delete_wrapper, pool)
-        return pool
-
-    def _create_member(self, address, protocol_port, pool_id):
-        """Wrapper utility that returns a test member."""
-        body = {
-            "member": {
-                "protocol_port": protocol_port,
-                "pool_id": pool_id,
-                "address": address
-            }
-        }
-        resp = self.network_client.create_member(body)
-        member = net_common.DeletableMember(client=self.network_client,
-                                            **resp['member'])
-        self.addCleanup(self.delete_wrapper, member)
-        return member
-
-    def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
-        """Wrapper utility that returns a test vip."""
-        name = data_utils.rand_name('vip-')
-        body = {
-            "vip": {
-                "protocol": protocol,
-                "name": name,
-                "subnet_id": subnet_id,
-                "pool_id": pool_id,
-                "protocol_port": protocol_port
-            }
-        }
-        resp = self.network_client.create_vip(body)
-        vip = net_common.DeletableVip(client=self.network_client,
-                                      **resp['vip'])
-        self.assertEqual(vip['name'], name)
-        self.addCleanup(self.delete_wrapper, vip)
-        return vip
-
-    def _check_vm_connectivity(self, ip_address,
-                               username=None,
-                               private_key=None,
-                               should_connect=True):
-        """
-        :param ip_address: server to test against
-        :param username: server's ssh username
-        :param private_key: server's ssh private key to be used
-        :param should_connect: True/False indicates positive/negative test
-            positive - attempt ping and ssh
-            negative - attempt ping and fail if succeed
-
-        :raises: AssertError if the result of the connectivity check does
-            not match the value of the should_connect param
-        """
-        if should_connect:
-            msg = "Timed out waiting for %s to become reachable" % ip_address
-        else:
-            msg = "ip address %s is reachable" % ip_address
-        self.assertTrue(self._ping_ip_address(ip_address,
-                                              should_succeed=should_connect),
-                        msg=msg)
-        if should_connect:
-            # no need to check ssh for negative connectivity
-            self.get_remote_client(ip_address, username, private_key)
-
-    def _check_public_network_connectivity(self, ip_address, username,
-                                           private_key, should_connect=True,
-                                           msg=None, servers=None):
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        LOG.debug('checking network connections to IP %s with user: %s' %
-                  (ip_address, username))
-        try:
-            self._check_vm_connectivity(ip_address,
-                                        username,
-                                        private_key,
-                                        should_connect=should_connect)
-        except Exception as e:
-            ex_msg = 'Public network connectivity check failed'
-            if msg:
-                ex_msg += ": " + msg
-            LOG.exception(ex_msg)
-            self._log_console_output(servers)
-            # network debug is called as part of ssh init
-            if not isinstance(e, exceptions.SSHTimeout):
-                debug.log_net_debug()
-            raise
-
-    def _check_tenant_network_connectivity(self, server,
-                                           username,
-                                           private_key,
-                                           should_connect=True,
-                                           servers_for_debug=None):
-        if not CONF.network.tenant_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            for net_name, ip_addresses in server.networks.iteritems():
-                for ip_address in ip_addresses:
-                    self._check_vm_connectivity(ip_address,
-                                                username,
-                                                private_key,
-                                                should_connect=should_connect)
-        except Exception as e:
-            LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers_for_debug)
-            # network debug is called as part of ssh init
-            if not isinstance(e, exceptions.SSHTimeout):
-                debug.log_net_debug()
-            raise
-
-    def _check_remote_connectivity(self, source, dest, should_succeed=True):
-        """
-        check ping server via source ssh connection
-
-        :param source: RemoteClient: an ssh connection from which to ping
-        :param dest: and IP to ping against
-        :param should_succeed: boolean should ping succeed or not
-        :returns: boolean -- should_succeed == ping
-        :returns: ping is false if ping failed
-        """
-        def ping_remote():
-            try:
-                source.ping_host(dest)
-            except exceptions.SSHExecCommandFailed:
-                LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
-                         % (dest, source.ssh_client.host))
-                return not should_succeed
-            return should_succeed
-
-        return tempest.test.call_until_true(ping_remote,
-                                            CONF.compute.ping_timeout,
-                                            1)
-
-    def _create_security_group_neutron(self, tenant_id, client=None,
-                                       namestart='secgroup-smoke-'):
-        if client is None:
-            client = self.network_client
-        secgroup = self._create_empty_security_group(namestart=namestart,
-                                                     client=client,
-                                                     tenant_id=tenant_id)
-
-        # Add rules to the security group
-        rules = self._create_loginable_secgroup_rule_neutron(secgroup=secgroup)
-        for rule in rules:
-            self.assertEqual(tenant_id, rule.tenant_id)
-            self.assertEqual(secgroup.id, rule.security_group_id)
-        return secgroup
-
-    def _create_empty_security_group(self, tenant_id, client=None,
-                                     namestart='secgroup-smoke-'):
-        """Create a security group without rules.
-
-        Default rules will be created:
-         - IPv4 egress to any
-         - IPv6 egress to any
-
-        :param tenant_id: secgroup will be created in this tenant
-        :returns: DeletableSecurityGroup -- containing the secgroup created
-        """
-        if client is None:
-            client = self.network_client
-        sg_name = data_utils.rand_name(namestart)
-        sg_desc = sg_name + " description"
-        sg_dict = dict(name=sg_name,
-                       description=sg_desc)
-        sg_dict['tenant_id'] = tenant_id
-        body = dict(security_group=sg_dict)
-        result = client.create_security_group(body=body)
-        secgroup = net_common.DeletableSecurityGroup(
-            client=client,
-            **result['security_group']
-        )
-        self.assertEqual(secgroup.name, sg_name)
-        self.assertEqual(tenant_id, secgroup.tenant_id)
-        self.assertEqual(secgroup.description, sg_desc)
-        self.addCleanup(self.delete_wrapper, secgroup)
-        return secgroup
-
-    def _default_security_group(self, tenant_id, client=None):
-        """Get default secgroup for given tenant_id.
-
-        :returns: DeletableSecurityGroup -- default secgroup for given tenant
-        """
-        if client is None:
-            client = self.network_client
-        sgs = [
-            sg for sg in client.list_security_groups().values()[0]
-            if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
-        ]
-        msg = "No default security group for tenant %s." % (tenant_id)
-        self.assertTrue(len(sgs) > 0, msg)
-        if len(sgs) > 1:
-            msg = "Found %d default security groups" % len(sgs)
-            raise exc.NeutronClientNoUniqueMatch(msg=msg)
-        return net_common.DeletableSecurityGroup(client=client,
-                                                 **sgs[0])
-
-    def _create_security_group_rule(self, client=None, secgroup=None,
-                                    tenant_id=None, **kwargs):
-        """Create a rule from a dictionary of rule parameters.
-
-        Create a rule in a secgroup. if secgroup not defined will search for
-        default secgroup in tenant_id.
-
-        :param secgroup: type DeletableSecurityGroup.
-        :param secgroup_id: search for secgroup by id
-            default -- choose default secgroup for given tenant_id
-        :param tenant_id: if secgroup not passed -- the tenant in which to
-            search for default secgroup
-        :param kwargs: a dictionary containing rule parameters:
-            for example, to allow incoming ssh:
-            rule = {
-                    direction: 'ingress'
-                    protocol:'tcp',
-                    port_range_min: 22,
-                    port_range_max: 22
-                    }
-        """
-        if client is None:
-            client = self.network_client
-        if secgroup is None:
-            secgroup = self._default_security_group(tenant_id)
-
-        ruleset = dict(security_group_id=secgroup.id,
-                       tenant_id=secgroup.tenant_id,
-                       )
-        ruleset.update(kwargs)
-
-        body = dict(security_group_rule=dict(ruleset))
-        sg_rule = client.create_security_group_rule(body=body)
-        sg_rule = net_common.DeletableSecurityGroupRule(
-            client=client,
-            **sg_rule['security_group_rule']
-        )
-        self.addCleanup(self.delete_wrapper, sg_rule)
-        self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
-        self.assertEqual(secgroup.id, sg_rule.security_group_id)
-
-        return sg_rule
-
-    def _create_loginable_secgroup_rule_neutron(self, client=None,
-                                                secgroup=None):
-        """These rules are intended to permit inbound ssh and icmp
-        traffic from all sources, so no group_id is provided.
-        Setting a group_id would only permit traffic from ports
-        belonging to the same security group.
-        """
-
-        if client is None:
-            client = self.network_client
-        rules = []
-        rulesets = [
-            dict(
-                # ssh
-                protocol='tcp',
-                port_range_min=22,
-                port_range_max=22,
-            ),
-            dict(
-                # ping
-                protocol='icmp',
-            )
-        ]
-        for ruleset in rulesets:
-            for r_direction in ['ingress', 'egress']:
-                ruleset['direction'] = r_direction
-                try:
-                    sg_rule = self._create_security_group_rule(
-                        client=client, secgroup=secgroup, **ruleset)
-                except exc.NeutronClientException as ex:
-                    # if rule already exist - skip rule and continue
-                    if not (ex.status_code is 409 and 'Security group rule'
-                            ' already exists' in ex.message):
-                        raise ex
-                else:
-                    self.assertEqual(r_direction, sg_rule.direction)
-                    rules.append(sg_rule)
-
-        return rules
-
-    def _ssh_to_server(self, server, private_key):
-        ssh_login = CONF.compute.image_ssh_user
-        return self.get_remote_client(server,
-                                      username=ssh_login,
-                                      private_key=private_key)
-
-    def _show_quota_network(self, tenant_id):
-        quota = self.network_client.show_quota(tenant_id)
-        return quota['quota']['network']
-
-    def _show_quota_subnet(self, tenant_id):
-        quota = self.network_client.show_quota(tenant_id)
-        return quota['quota']['subnet']
-
-    def _show_quota_port(self, tenant_id):
-        quota = self.network_client.show_quota(tenant_id)
-        return quota['quota']['port']
-
-    def _get_router(self, tenant_id):
-        """Retrieve a router for the given tenant id.
-
-        If a public router has been configured, it will be returned.
-
-        If a public router has not been configured, but a public
-        network has, a tenant router will be created and returned that
-        routes traffic to the public network.
-        """
-        router_id = CONF.network.public_router_id
-        network_id = CONF.network.public_network_id
-        if router_id:
-            result = self.network_client.show_router(router_id)
-            return net_common.AttributeDict(**result['router'])
-        elif network_id:
-            router = self._create_router(tenant_id)
-            router.add_gateway(network_id)
-            return router
-        else:
-            raise Exception("Neither of 'public_router_id' or "
-                            "'public_network_id' has been defined.")
-
-    def _create_router(self, tenant_id, namestart='router-smoke-'):
-        name = data_utils.rand_name(namestart)
-        body = dict(
-            router=dict(
-                name=name,
-                admin_state_up=True,
-                tenant_id=tenant_id,
-            ),
-        )
-        result = self.network_client.create_router(body=body)
-        router = net_common.DeletableRouter(client=self.network_client,
-                                            **result['router'])
-        self.assertEqual(router.name, name)
-        self.addCleanup(self.delete_wrapper, router)
-        return router
-
-    def create_networks(self, tenant_id=None):
-        """Create a network with a subnet connected to a router.
-
-        The baremetal driver is a special case since all nodes are
-        on the same shared network.
-
-        :returns: network, subnet, router
-        """
-        if CONF.baremetal.driver_enabled:
-            # NOTE(Shrews): This exception is for environments where tenant
-            # credential isolation is available, but network separation is
-            # not (the current baremetal case). Likely can be removed when
-            # test account mgmt is reworked:
-            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
-            network = self._get_network_by_name(
-                CONF.compute.fixed_network_name)
-            router = None
-            subnet = None
-        else:
-            if tenant_id is None:
-                tenant_id = self.tenant_id
-            network = self._create_network(tenant_id)
-            router = self._get_router(tenant_id)
-            subnet = self._create_subnet(network)
-            subnet.add_to_router(router.id)
-        return network, subnet, router
-
-
 class OrchestrationScenarioTest(ScenarioTest):
     """
     Base class for orchestration scenario tests
     """
 
     @classmethod
-    def setUpClass(cls):
-        super(OrchestrationScenarioTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(OrchestrationScenarioTest, cls).resource_setup()
         if not CONF.service_available.heat:
             raise cls.skipException("Heat support is required")
 
@@ -2315,19 +1216,6 @@
         return next((o['output_value'] for o in stack['outputs']
                     if o['output_key'] == output_key), None)
 
-    def _ping_ip_address(self, ip_address, should_succeed=True):
-        cmd = ['ping', '-c1', '-w1', ip_address]
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.communicate()
-            return (proc.returncode == 0) == should_succeed
-
-        return tempest.test.call_until_true(
-            ping, CONF.orchestration.build_timeout, 1)
-
 
 class SwiftScenarioTest(ScenarioTest):
     """
@@ -2338,9 +1226,9 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         cls.set_network_resources()
-        super(SwiftScenarioTest, cls).setUpClass()
+        super(SwiftScenarioTest, cls).resource_setup()
         if not CONF.service_available.swift:
             skip_msg = ("%s skipped as swift is not available" %
                         cls.__name__)
@@ -2350,38 +1238,38 @@
         cls.container_client = cls.manager.container_client
         cls.object_client = cls.manager.object_client
 
-    def _get_swift_stat(self):
+    def get_swift_stat(self):
         """get swift status for our user account."""
         self.account_client.list_account_containers()
         LOG.debug('Swift status information obtained successfully')
 
-    def _create_container(self, container_name=None):
+    def create_container(self, container_name=None):
         name = container_name or data_utils.rand_name(
             'swift-scenario-container')
         self.container_client.create_container(name)
         # look for the container to assure it is created
-        self._list_and_check_container_objects(name)
+        self.list_and_check_container_objects(name)
         LOG.debug('Container %s created' % (name))
         return name
 
-    def _delete_container(self, container_name):
+    def delete_container(self, container_name):
         self.container_client.delete_container(container_name)
         LOG.debug('Container %s deleted' % (container_name))
 
-    def _upload_object_to_container(self, container_name, obj_name=None):
+    def upload_object_to_container(self, container_name, obj_name=None):
         obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
         obj_data = data_utils.arbitrary_string()
         self.object_client.create_object(container_name, obj_name, obj_data)
         return obj_name, obj_data
 
-    def _delete_object(self, container_name, filename):
+    def delete_object(self, container_name, filename):
         self.object_client.delete_object(container_name, filename)
-        self._list_and_check_container_objects(container_name,
-                                               not_present_obj=[filename])
+        self.list_and_check_container_objects(container_name,
+                                              not_present_obj=[filename])
 
-    def _list_and_check_container_objects(self, container_name,
-                                          present_obj=None,
-                                          not_present_obj=None):
+    def list_and_check_container_objects(self, container_name,
+                                         present_obj=None,
+                                         not_present_obj=None):
         """
         List objects for a given container and assert which are present and
         which are not.
@@ -2399,7 +1287,7 @@
             for obj in not_present_obj:
                 self.assertNotIn(obj, object_list)
 
-    def _change_container_acl(self, container_name, acl):
+    def change_container_acl(self, container_name, acl):
         metadata_param = {'metadata_prefix': 'x-container-',
                           'metadata': {'read': acl}}
         self.container_client.update_container_metadata(container_name,
@@ -2407,6 +1295,6 @@
         resp, _ = self.container_client.list_container_metadata(container_name)
         self.assertEqual(resp['x-container-read'], acl)
 
-    def _download_and_verify(self, container_name, obj_name, expected_data):
+    def download_and_verify(self, container_name, obj_name, expected_data):
         _, obj = self.object_client.get_object(container_name, obj_name)
         self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index 4e85429..abda1f8 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -24,6 +24,7 @@
 
 class CfnInitScenarioTest(manager.OrchestrationScenarioTest):
 
+    @test.skip_because(bug="1374175")
     def setUp(self):
         super(CfnInitScenarioTest, self).setUp()
         if not CONF.orchestration.image_ref:
@@ -83,10 +84,11 @@
         server_ip =\
             server['addresses'][CONF.compute.network_for_ssh][0]['addr']
 
-        if not self._ping_ip_address(server_ip):
+        if not self.ping_ip_address(server_ip):
             self._log_console_output(servers=[server])
             self.fail(
-                "Timed out waiting for %s to become reachable" % server_ip)
+                "(CfnInitScenarioTest:test_server_cfn_init) Timed out waiting "
+                "for %s to become reachable" % server_ip)
 
         try:
             self.client.wait_for_resource_status(
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 3ad5c69..75769ce 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -33,8 +33,8 @@
     Deletes aggregate
     """
     @classmethod
-    def setUpClass(cls):
-        super(TestAggregatesBasicOps, cls).setUpClass()
+    def resource_setup(cls):
+        super(TestAggregatesBasicOps, cls).resource_setup()
         cls.aggregates_client = cls.manager.aggregates_client
         cls.hosts_client = cls.manager.hosts_client
 
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 72cc8b0..f218fb2 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -34,9 +34,9 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         cls.set_network_resources()
-        super(TestDashboardBasicOps, cls).setUpClass()
+        super(TestDashboardBasicOps, cls).resource_setup()
 
         if not CONF.service_available.horizon:
             raise cls.skipException("Horizon support is required")
@@ -69,7 +69,6 @@
         response = self.opener.open(CONF.dashboard.dashboard_url)
         self.assertIn('Overview', response.read())
 
-    @test.skip_because(bug="1345955")
     @test.services('dashboard')
     def test_basic_scenario(self):
         self.check_login_page()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index a7ea70f..b111939 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -38,9 +38,12 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
+        if CONF.scenario.large_ops_number < 1:
+            raise cls.skipException("large_ops_number not set to multiple "
+                                    "instances")
         cls.set_network_resources()
-        super(TestLargeOpsScenario, cls).setUpClass()
+        super(TestLargeOpsScenario, cls).resource_setup()
 
     def _wait_for_server_status(self, status):
         for server in self.servers:
@@ -75,8 +78,6 @@
         self._wait_for_server_status('ACTIVE')
 
     def _large_ops_scenario(self):
-        if CONF.scenario.large_ops_number < 1:
-            return
         self.glance_image_create()
         self.nova_boot()
 
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 5e83ff9..9e404c8 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -18,17 +18,17 @@
 import time
 import urllib2
 
-from tempest.api.network import common as net_common
 from tempest.common import commands
 from tempest import config
 from tempest import exceptions
 from tempest.scenario import manager
+from tempest.services.network import resources as net_resources
 from tempest import test
 
 config = config.CONF
 
 
-class TestLoadBalancerBasic(manager.NeutronScenarioTest):
+class TestLoadBalancerBasic(manager.NetworkScenarioTest):
 
     """
     This test checks basic load balancing.
@@ -38,9 +38,8 @@
     2. SSH to the instance and start two servers
     3. Create a load balancer with two members and with ROUND_ROBIN algorithm
        associate the VIP with a floating ip
-    4. Send 10 requests to the floating ip and check that they are shared
-       between the two servers and that both of them get equal portions
-    of the requests
+    4. Send NUM requests to the floating ip and check that they are shared
+       between the two servers.
     """
 
     @classmethod
@@ -58,8 +57,8 @@
             raise cls.skipException(msg)
 
     @classmethod
-    def setUpClass(cls):
-        super(TestLoadBalancerBasic, cls).setUpClass()
+    def resource_setup(cls):
+        super(TestLoadBalancerBasic, cls).resource_setup()
         cls.check_preconditions()
         cls.servers_keypairs = {}
         cls.members = []
@@ -67,6 +66,7 @@
         cls.server_ips = {}
         cls.port1 = 80
         cls.port2 = 88
+        cls.num = 50
 
     def setUp(self):
         super(TestLoadBalancerBasic, self).setUp()
@@ -89,7 +89,7 @@
 
         if tenant_net:
             tenant_subnet = self._list_subnets(tenant_id=self.tenant_id)[0]
-            self.subnet = net_common.DeletableSubnet(
+            self.subnet = net_resources.DeletableSubnet(
                 client=self.network_client,
                 **tenant_subnet)
             self.network = tenant_net
@@ -101,7 +101,7 @@
             # should instead pull a subnet id from config, which is set by
             # devstack/admin/etc.
             subnet = self._list_subnets(network_id=self.network['id'])[0]
-            self.subnet = net_common.AttributeDict(subnet)
+            self.subnet = net_resources.AttributeDict(subnet)
 
     def _create_security_group_for_test(self):
         self.security_group = self._create_security_group(
@@ -287,26 +287,21 @@
 
     def _check_load_balancing(self):
         """
-        1. Send 10 requests on the floating ip associated with the VIP
-        2. Check that the requests are shared between
-           the two servers and that both of them get equal portions
-           of the requests
+        1. Send NUM requests on the floating ip associated with the VIP
+        2. Check that the requests are shared between the two servers
         """
 
         self._check_connection(self.vip_ip)
-        self._send_requests(self.vip_ip, set(["server1", "server2"]))
+        self._send_requests(self.vip_ip, ["server1", "server2"])
 
-    def _send_requests(self, vip_ip, expected, num_req=10):
-        count = 0
-        while count < num_req:
-            resp = []
-            for i in range(len(self.members)):
-                resp.append(
-                    urllib2.urlopen(
-                        "http://{0}/".format(vip_ip)).read())
-            count += 1
-            self.assertEqual(expected,
-                             set(resp))
+    def _send_requests(self, vip_ip, servers):
+        counters = dict.fromkeys(servers, 0)
+        for i in range(self.num):
+            server = urllib2.urlopen("http://{0}/".format(vip_ip)).read()
+            counters[server] += 1
+        # Assert that each member of the pool gets balanced at least once
+        for member, counter in counters.iteritems():
+            self.assertGreater(counter, 0, 'Member %s never balanced' % member)
 
     @test.services('compute', 'network')
     def test_load_balancer_basic(self):
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 84e1048..58a028f 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -25,7 +25,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestNetworkAdvancedServerOps(manager.NeutronScenarioTest):
+class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
 
     """
     This test case checks VM connectivity after some advanced
@@ -50,10 +50,10 @@
             raise cls.skipException(msg)
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         # Create no network resources for these tests.
         cls.set_network_resources()
-        super(TestNetworkAdvancedServerOps, cls).setUpClass()
+        super(TestNetworkAdvancedServerOps, cls).resource_setup()
 
     def _setup_network_and_servers(self):
         self.keypair = self.create_keypair()
@@ -92,6 +92,7 @@
         self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
         self._check_network_connectivity()
 
+    @test.skip_because(bug="1323658")
     @test.services('compute', 'network')
     def test_server_connectivity_stop_start(self):
         self._setup_network_and_servers()
@@ -139,6 +140,7 @@
         self.servers_client.resume_server(self.server['id'])
         self._wait_server_status_and_check_network_connectivity()
 
+    @test.skip_because(bug="1323658")
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize is not available.')
     @test.services('compute', 'network')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 10dfb66..de60745 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -34,7 +34,7 @@
                                            ['floating_ip', 'server'])
 
 
-class TestNetworkBasicOps(manager.NeutronScenarioTest):
+class TestNetworkBasicOps(manager.NetworkScenarioTest):
 
     """
     This smoke test suite assumes that Nova has been configured to
@@ -88,10 +88,10 @@
             raise cls.skipException(msg)
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         # Create no network resources for these tests.
         cls.set_network_resources()
-        super(TestNetworkBasicOps, cls).setUpClass()
+        super(TestNetworkBasicOps, cls).resource_setup()
         for ext in ['router', 'security-group']:
             if not test.is_extension_enabled(ext, 'network'):
                 msg = "%s extension not enabled." % ext
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 20505eb..188dea8 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -26,7 +26,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestSecurityGroupsBasicOps(manager.NeutronScenarioTest):
+class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
 
     """
     This test suite assumes that Nova has been configured to
@@ -138,10 +138,10 @@
             raise cls.skipException(msg)
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         # Create no network resources for these tests.
         cls.set_network_resources()
-        super(TestSecurityGroupsBasicOps, cls).setUpClass()
+        super(TestSecurityGroupsBasicOps, cls).resource_setup()
         # TODO(mnewby) Consider looking up entities as needed instead
         # of storing them as collections on the class.
         cls.floating_ips = {}
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 463f5aa..c53e22b 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -35,9 +35,9 @@
     """
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         cls.set_network_resources()
-        super(TestServerAdvancedOps, cls).setUpClass()
+        super(TestServerAdvancedOps, cls).resource_setup()
 
         if CONF.compute.flavor_ref_alt == CONF.compute.flavor_ref:
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index b38b1a3..eb636f7 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -35,8 +35,7 @@
      * Create a security group to control network access in instance
      * Add simple permissive rules to the security group
      * Launch an instance
-     * Pause/unpause the instance
-     * Suspend/resume the instance
+     * Perform ssh to instance
      * Terminate the instance
     """
 
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index f2c3dcd..8ea2814 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -51,8 +51,8 @@
     """
 
     @classmethod
-    def setUpClass(cls):
-        super(TestStampPattern, cls).setUpClass()
+    def resource_setup(cls):
+        super(TestStampPattern, cls).resource_setup()
 
         if not CONF.volume_feature_enabled.snapshot:
             raise cls.skipException("Cinder volume snapshots are disabled")
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index ad74ec4..9e0fee0 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -41,13 +41,13 @@
 
     @test.services('object_storage')
     def test_swift_basic_ops(self):
-        self._get_swift_stat()
-        container_name = self._create_container()
-        obj_name, obj_data = self._upload_object_to_container(container_name)
-        self._list_and_check_container_objects(container_name, [obj_name])
-        self._download_and_verify(container_name, obj_name, obj_data)
-        self._delete_object(container_name, obj_name)
-        self._delete_container(container_name)
+        self.get_swift_stat()
+        container_name = self.create_container()
+        obj_name, obj_data = self.upload_object_to_container(container_name)
+        self.list_and_check_container_objects(container_name, [obj_name])
+        self.download_and_verify(container_name, obj_name, obj_data)
+        self.delete_object(container_name, obj_name)
+        self.delete_container(container_name)
 
     @test.services('object_storage')
     def test_swift_acl_anonymous_download(self):
@@ -58,15 +58,15 @@
         4. Check if the object can be download by anonymous user
         5. Delete the object and container
         """
-        container_name = self._create_container()
-        obj_name, _ = self._upload_object_to_container(container_name)
+        container_name = self.create_container()
+        obj_name, _ = self.upload_object_to_container(container_name)
         obj_url = '%s/%s/%s' % (self.object_client.base_url,
                                 container_name, obj_name)
         http_client = http.ClosingHttp()
         resp, _ = http_client.request(obj_url, 'GET')
         self.assertEqual(resp.status, 401)
-        self._change_container_acl(container_name, '.r:*')
+        self.change_container_acl(container_name, '.r:*')
         resp, _ = http_client.request(obj_url, 'GET')
         self.assertEqual(resp.status, 200)
-        self._delete_object(container_name, obj_name)
-        self._delete_container(container_name)
+        self.delete_object(container_name, obj_name)
+        self.delete_container(container_name)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index fdda423..a20db5c 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -36,8 +36,8 @@
      * Check written content in the instance booted from snapshot
     """
     @classmethod
-    def setUpClass(cls):
-        super(TestVolumeBootPattern, cls).setUpClass()
+    def resource_setup(cls):
+        super(TestVolumeBootPattern, cls).resource_setup()
 
         if not CONF.volume_feature_enabled.snapshot:
             raise cls.skipException("Cinder volume snapshots are disabled")
diff --git a/tempest/scenario/utils.py b/tempest/scenario/utils.py
index e2adb34..c20f20c 100644
--- a/tempest/scenario/utils.py
+++ b/tempest/scenario/utils.py
@@ -40,33 +40,33 @@
         self.non_ssh_image_pattern = \
             CONF.input_scenario.non_ssh_image_regex
         # Setup clients
-        ocm = clients.OfficialClientManager(
-            auth.get_default_credentials('user'))
-        self.client = ocm.compute_client
+        os = clients.Manager()
+        self.images_client = os.images_client
+        self.flavors_client = os.flavors_client
 
     def ssh_user(self, image_id):
-        _image = self.client.images.get(image_id)
+        _, _image = self.images_client.get_image(image_id)
         for regex, user in self.ssh_users:
             # First match wins
-            if re.match(regex, _image.name) is not None:
+            if re.match(regex, _image['name']) is not None:
                 return user
         else:
             return self.default_ssh_user
 
     def _is_sshable_image(self, image):
         return not re.search(pattern=self.non_ssh_image_pattern,
-                             string=str(image.name))
+                             string=str(image['name']))
 
     def is_sshable_image(self, image_id):
-        _image = self.client.images.get(image_id)
+        _, _image = self.images_client.get_image(image_id)
         return self._is_sshable_image(_image)
 
     def _is_flavor_enough(self, flavor, image):
-        return image.minDisk <= flavor.disk
+        return image['minDisk'] <= flavor['disk']
 
     def is_flavor_enough(self, flavor_id, image_id):
-        _image = self.client.images.get(image_id)
-        _flavor = self.client.flavors.get(flavor_id)
+        _, _image = self.images_client.get_image(image_id)
+        _, _flavor = self.flavors_client.get_flavor_details(flavor_id)
         return self._is_flavor_enough(_flavor, _image)
 
 
@@ -81,7 +81,7 @@
     load_tests = testscenarios.load_tests_apply_scenarios
 
 
-    class TestInputScenario(manager.OfficialClientTest):
+    class TestInputScenario(manager.ScenarioTest):
 
         scenario_utils = utils.InputScenarioUtils()
         scenario_flavor = scenario_utils.scenario_flavors
@@ -91,17 +91,18 @@
 
         def test_create_server_metadata(self):
             name = rand_name('instance')
-            _ = self.compute_client.servers.create(name=name,
-                                                   flavor=self.flavor_ref,
-                                                   image=self.image_ref)
+            self.servers_client.create_server(name=name,
+                                              flavor_ref=self.flavor_ref,
+                                              image_ref=self.image_ref)
     """
     validchars = "-_.{ascii}{digit}".format(ascii=string.ascii_letters,
                                             digit=string.digits)
 
     def __init__(self):
-        ocm = clients.OfficialClientManager(
+        os = clients.Manager(
             auth.get_default_credentials('user', fill_in=False))
-        self.client = ocm.compute_client
+        self.images_client = os.images_client
+        self.flavors_client = os.flavors_client
         self.image_pattern = CONF.input_scenario.image_regex
         self.flavor_pattern = CONF.input_scenario.flavor_regex
 
@@ -118,10 +119,11 @@
         if not CONF.service_available.glance:
             return []
         if not hasattr(self, '_scenario_images'):
-            images = self.client.images.list(detailed=False)
+            _, images = self.images_client.list_images()
             self._scenario_images = [
-                (self._normalize_name(i.name), dict(image_ref=i.id))
-                for i in images if re.search(self.image_pattern, str(i.name))
+                (self._normalize_name(i['name']), dict(image_ref=i['id']))
+                for i in images if re.search(self.image_pattern,
+                                             str(i['name']))
             ]
         return self._scenario_images
 
@@ -131,10 +133,11 @@
         :return: a scenario with name and uuid of flavors
         """
         if not hasattr(self, '_scenario_flavors'):
-            flavors = self.client.flavors.list(detailed=False)
+            _, flavors = self.flavors_client.list_flavors()
             self._scenario_flavors = [
-                (self._normalize_name(f.name), dict(flavor_ref=f.id))
-                for f in flavors if re.search(self.flavor_pattern, str(f.name))
+                (self._normalize_name(f['name']), dict(flavor_ref=f['id']))
+                for f in flavors if re.search(self.flavor_pattern,
+                                              str(f['name']))
             ]
         return self._scenario_flavors
 
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 9877391..4af8331 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -76,7 +76,7 @@
     def get_image(self, image_id):
         """Returns the details of a single image."""
         resp, body = self.get("images/%s" % str(image_id))
-        self.expected_success(200, resp)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         self.validate_response(schema.get_image, resp, body)
         return resp, body['image']
diff --git a/tempest/services/compute/json/security_group_default_rules_client.py b/tempest/services/compute/json/security_group_default_rules_client.py
index 6d29837..7743f9c 100644
--- a/tempest/services/compute/json/security_group_default_rules_client.py
+++ b/tempest/services/compute/json/security_group_default_rules_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.response.compute.v2 import \
+    security_group_default_rule as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -46,8 +48,9 @@
         post_body = json.dumps({'security_group_default_rule': post_body})
         url = 'os-security-group-default-rules'
         resp, body = self.post(url, post_body)
-        self.expected_success(200, resp.status)
         body = json.loads(body)
+        self.validate_response(schema.create_get_security_group_default_rule,
+                               resp, body)
         return resp, body['security_group_default_rule']
 
     def delete_security_group_default_rule(self,
@@ -55,20 +58,23 @@
         """Deletes the provided Security Group default rule."""
         resp, body = self.delete('os-security-group-default-rules/%s' % str(
             security_group_default_rule_id))
-        self.expected_success(204, resp.status)
+        self.validate_response(schema.delete_security_group_default_rule,
+                               resp, body)
         return resp, body
 
     def list_security_group_default_rules(self):
         """List all Security Group default rules."""
         resp, body = self.get('os-security-group-default-rules')
-        self.expected_success(200, resp.status)
         body = json.loads(body)
+        self.validate_response(schema.list_security_group_default_rules,
+                               resp, body)
         return resp, body['security_group_default_rules']
 
     def get_security_group_default_rule(self, security_group_default_rule_id):
         """Return the details of provided Security Group default rule."""
         resp, body = self.get('os-security-group-default-rules/%s' % str(
             security_group_default_rule_id))
-        self.expected_success(200, resp.status)
         body = json.loads(body)
+        self.validate_response(schema.create_get_security_group_default_rule,
+                               resp, body)
         return resp, body['security_group_default_rule']
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index 6b15404..94acf36 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -127,7 +127,7 @@
     def get_image(self, image_id):
         """Returns the details of a single image."""
         resp, body = self.get("images/%s" % str(image_id))
-        self.expected_success(200, resp)
+        self.expected_success(200, resp.status)
         body = self._parse_image(etree.fromstring(body))
         return resp, body
 
diff --git a/tempest/services/queuing/__init__.py b/tempest/services/messaging/__init__.py
similarity index 100%
rename from tempest/services/queuing/__init__.py
rename to tempest/services/messaging/__init__.py
diff --git a/tempest/services/queuing/json/__init__.py b/tempest/services/messaging/json/__init__.py
similarity index 100%
rename from tempest/services/queuing/json/__init__.py
rename to tempest/services/messaging/json/__init__.py
diff --git a/tempest/services/queuing/json/queuing_client.py b/tempest/services/messaging/json/messaging_client.py
similarity index 95%
rename from tempest/services/queuing/json/queuing_client.py
rename to tempest/services/messaging/json/messaging_client.py
index 14960ad..3e82399 100644
--- a/tempest/services/queuing/json/queuing_client.py
+++ b/tempest/services/messaging/json/messaging_client.py
@@ -16,7 +16,7 @@
 import json
 import urllib
 
-from tempest.api_schema.response.queuing.v1 import queues as queues_schema
+from tempest.api_schema.response.messaging.v1 import queues as queues_schema
 from tempest.common import rest_client
 from tempest.common.utils import data_utils
 from tempest import config
@@ -25,11 +25,11 @@
 CONF = config.CONF
 
 
-class QueuingClientJSON(rest_client.RestClient):
+class MessagingClientJSON(rest_client.RestClient):
 
     def __init__(self, auth_provider):
-        super(QueuingClientJSON, self).__init__(auth_provider)
-        self.service = CONF.queuing.catalog_type
+        super(MessagingClientJSON, self).__init__(auth_provider)
+        self.service = CONF.messaging.catalog_type
         self.version = '1'
         self.uri_prefix = 'v{0}'.format(self.version)
 
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index eca57c0..4dc588f 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -58,8 +58,6 @@
         """Delete an account."""
         url = ''
         if params:
-            if 'bulk-delete' in params:
-                url += 'bulk-delete&'
             url = '?%s%s' % (url, urllib.urlencode(params))
 
         resp, body = self.delete(url, headers={}, body=data)
@@ -74,13 +72,19 @@
         return resp, body
 
     def create_account_metadata(self, metadata,
-                                metadata_prefix='X-Account-Meta-'):
+                                metadata_prefix='X-Account-Meta-',
+                                data=None, params=None):
         """Creates an account metadata entry."""
         headers = {}
-        for key in metadata:
-            headers[metadata_prefix + key] = metadata[key]
+        if metadata:
+            for key in metadata:
+                headers[metadata_prefix + key] = metadata[key]
 
-        resp, body = self.post('', headers=headers, body=None)
+        url = ''
+        if params:
+            url = '?%s%s' % (url, urllib.urlencode(params))
+
+        resp, body = self.post(url, headers=headers, body=data)
         return resp, body
 
     def delete_account_metadata(self, metadata,
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index f50ba2f..1f8065b 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -24,15 +24,16 @@
 LOG = logging.getLogger(__name__)
 
 
-class SnapshotsClientJSON(rest_client.RestClient):
-    """Client class to send CRUD Volume API requests."""
+class BaseSnapshotsClientJSON(rest_client.RestClient):
+    """Base Client class to send CRUD Volume API requests."""
 
     def __init__(self, auth_provider):
-        super(SnapshotsClientJSON, self).__init__(auth_provider)
+        super(BaseSnapshotsClientJSON, self).__init__(auth_provider)
 
         self.service = CONF.volume.catalog_type
         self.build_interval = CONF.volume.build_interval
         self.build_timeout = CONF.volume.build_timeout
+        self.create_resp = 200
 
     def list_snapshots(self, params=None):
         """List all the snapshot."""
@@ -77,7 +78,7 @@
         post_body = json.dumps({'snapshot': post_body})
         resp, body = self.post('snapshots', post_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.expected_success(self.create_resp, resp.status)
         return resp, body['snapshot']
 
     def update_snapshot(self, snapshot_id, **kwargs):
@@ -203,3 +204,7 @@
         resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
         self.expected_success(202, resp.status)
         return resp, body
+
+
+class SnapshotsClientJSON(BaseSnapshotsClientJSON):
+    """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/services/volume/v2/json/snapshots_client.py b/tempest/services/volume/v2/json/snapshots_client.py
new file mode 100644
index 0000000..553176b
--- /dev/null
+++ b/tempest/services/volume/v2/json/snapshots_client.py
@@ -0,0 +1,23 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.volume.json import snapshots_client
+
+
+class SnapshotsV2ClientJSON(snapshots_client.BaseSnapshotsClientJSON):
+    """Client class to send CRUD Volume V2 API requests."""
+
+    def __init__(self, auth_provider):
+        super(SnapshotsV2ClientJSON, self).__init__(auth_provider)
+
+        self.api_version = "v2"
+        self.create_resp = 202
diff --git a/tempest/services/volume/v2/xml/snapshots_client.py b/tempest/services/volume/v2/xml/snapshots_client.py
new file mode 100644
index 0000000..b29d86c
--- /dev/null
+++ b/tempest/services/volume/v2/xml/snapshots_client.py
@@ -0,0 +1,23 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.volume.xml import snapshots_client
+
+
+class SnapshotsV2ClientXML(snapshots_client.BaseSnapshotsClientXML):
+    """Client class to send CRUD Volume V2 API requests."""
+
+    def __init__(self, auth_provider):
+        super(SnapshotsV2ClientXML, self).__init__(auth_provider)
+
+        self.api_version = "v2"
+        self.create_resp = 202
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index 7636707..ce98eea 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -26,16 +26,17 @@
 LOG = logging.getLogger(__name__)
 
 
-class SnapshotsClientXML(rest_client.RestClient):
-    """Client class to send CRUD Volume API requests."""
+class BaseSnapshotsClientXML(rest_client.RestClient):
+    """Base Client class to send CRUD Volume API requests."""
     TYPE = "xml"
 
     def __init__(self, auth_provider):
-        super(SnapshotsClientXML, self).__init__(auth_provider)
+        super(BaseSnapshotsClientXML, self).__init__(auth_provider)
 
         self.service = CONF.volume.catalog_type
         self.build_interval = CONF.volume.build_interval
         self.build_timeout = CONF.volume.build_timeout
+        self.create_resp = 200
 
     def list_snapshots(self, params=None):
         """List all snapshot."""
@@ -90,7 +91,7 @@
         resp, body = self.post('snapshots',
                                str(common.Document(snapshot)))
         body = common.xml_to_json(etree.fromstring(body))
-        self.expected_success(200, resp.status)
+        self.expected_success(self.create_resp, resp.status)
         return resp, body
 
     def update_snapshot(self, snapshot_id, **kwargs):
@@ -243,3 +244,7 @@
             body = common.xml_to_json(etree.fromstring(body))
         self.expected_success(202, resp.status)
         return resp, body
+
+
+class SnapshotsClientXML(BaseSnapshotsClientXML):
+    """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 9c13013..37ad18e 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -100,14 +100,6 @@
         self.assertFalse(checks.service_tags_not_in_module_path(
             "@test.services('compute')", './tempest/api/image/fake_test.py'))
 
-    def test_no_official_client_manager_in_api_tests(self):
-        self.assertTrue(checks.no_official_client_manager_in_api_tests(
-            "cls.official_client = clients.OfficialClientManager(credentials)",
-            "tempest/api/compute/base.py"))
-        self.assertFalse(checks.no_official_client_manager_in_api_tests(
-            "cls.official_client = clients.OfficialClientManager(credentials)",
-            "tempest/scenario/fake_test.py"))
-
     def test_no_mutable_default_args(self):
         self.assertEqual(1, len(list(checks.no_mutable_default_args(
             " def function1(para={}):"))))
diff --git a/tempest/tests/test_tenant_isolation.py b/tempest/tests/test_tenant_isolation.py
index 48c523e..27c45c2 100644
--- a/tempest/tests/test_tenant_isolation.py
+++ b/tempest/tests/test_tenant_isolation.py
@@ -12,12 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import keystoneclient.v2_0.client as keystoneclient
 import mock
-import neutronclient.v2_0.client as neutronclient
 from oslo.config import cfg
 
-from tempest import clients
 from tempest.common import http
 from tempest.common import isolated_creds
 from tempest import config
@@ -52,24 +49,6 @@
         self.assertTrue(isinstance(iso_creds.network_admin_client,
                                    json_network_client.NetworkClientJSON))
 
-    def test_official_client(self):
-        self.useFixture(mockpatch.PatchObject(keystoneclient.Client,
-                                              'authenticate'))
-        self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
-                                              '_get_image_client'))
-        self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
-                                              '_get_object_storage_client'))
-        self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
-                                              '_get_orchestration_client'))
-        self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
-                                              '_get_ceilometer_client'))
-        iso_creds = isolated_creds.IsolatedCreds('test class',
-                                                 tempest_client=False)
-        self.assertTrue(isinstance(iso_creds.identity_admin_client,
-                                   keystoneclient.Client))
-        self.assertTrue(isinstance(iso_creds.network_admin_client,
-                                   neutronclient.Client))
-
     def test_tempest_client_xml(self):
         iso_creds = isolated_creds.IsolatedCreds('test class', interface='xml')
         self.assertEqual(iso_creds.interface, 'xml')
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index f94d880..3496dce 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -195,8 +195,8 @@
     """Recommended to use as base class for boto related test."""
 
     @classmethod
-    def setUpClass(cls):
-        super(BotoTestCase, cls).setUpClass()
+    def resource_setup(cls):
+        super(BotoTestCase, cls).resource_setup()
         cls.conclusion = decision_maker()
         cls.os = cls.get_client_manager()
         # The trash contains cleanup functions and paramaters in tuples
@@ -245,7 +245,7 @@
             raise self.failureException, "BotoServerError not raised"
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         """Calls the callables added by addResourceCleanUp,
         when you overwrite this function don't forget to call this too.
         """
@@ -264,7 +264,7 @@
             finally:
                 del cls._resource_trash_bin[key]
         cls.clear_isolated_creds()
-        super(BotoTestCase, cls).tearDownClass()
+        super(BotoTestCase, cls).resource_cleanup()
         # NOTE(afazekas): let the super called even on exceptions
         # The real exceptions already logged, if the super throws another,
         # does not causes hidden issues
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index c0d3f7a..f3f11fd 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -30,8 +30,8 @@
 class InstanceRunTest(boto_test.BotoTestCase):
 
     @classmethod
-    def setUpClass(cls):
-        super(InstanceRunTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(InstanceRunTest, cls).resource_setup()
         if not cls.conclusion['A_I_IMAGES_READY']:
             raise cls.skipException("".join(("EC2 ", cls.__name__,
                                     ": requires ami/aki/ari manifest")))
diff --git a/tempest/thirdparty/boto/test_ec2_keys.py b/tempest/thirdparty/boto/test_ec2_keys.py
index 698e3e1..c3e1e2a 100644
--- a/tempest/thirdparty/boto/test_ec2_keys.py
+++ b/tempest/thirdparty/boto/test_ec2_keys.py
@@ -26,8 +26,8 @@
 class EC2KeysTest(boto_test.BotoTestCase):
 
     @classmethod
-    def setUpClass(cls):
-        super(EC2KeysTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(EC2KeysTest, cls).resource_setup()
         cls.client = cls.os.ec2api_client
         cls.ec = cls.ec2_error_code
 
diff --git a/tempest/thirdparty/boto/test_ec2_network.py b/tempest/thirdparty/boto/test_ec2_network.py
index 792dde3..a75fb7b 100644
--- a/tempest/thirdparty/boto/test_ec2_network.py
+++ b/tempest/thirdparty/boto/test_ec2_network.py
@@ -20,8 +20,8 @@
 class EC2NetworkTest(boto_test.BotoTestCase):
 
     @classmethod
-    def setUpClass(cls):
-        super(EC2NetworkTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(EC2NetworkTest, cls).resource_setup()
         cls.client = cls.os.ec2api_client
 
     # Note(afazekas): these tests for things duable without an instance
diff --git a/tempest/thirdparty/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
index 7d9bdab..fb3d32b 100644
--- a/tempest/thirdparty/boto/test_ec2_security_groups.py
+++ b/tempest/thirdparty/boto/test_ec2_security_groups.py
@@ -20,8 +20,8 @@
 class EC2SecurityGroupTest(boto_test.BotoTestCase):
 
     @classmethod
-    def setUpClass(cls):
-        super(EC2SecurityGroupTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(EC2SecurityGroupTest, cls).resource_setup()
         cls.client = cls.os.ec2api_client
 
     def test_create_authorize_security_group(self):
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
index b50c6b0..9cee8a4 100644
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ b/tempest/thirdparty/boto/test_ec2_volumes.py
@@ -29,8 +29,8 @@
 class EC2VolumesTest(boto_test.BotoTestCase):
 
     @classmethod
-    def setUpClass(cls):
-        super(EC2VolumesTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(EC2VolumesTest, cls).resource_setup()
 
         if not CONF.service_available.cinder:
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
diff --git a/tempest/thirdparty/boto/test_s3_buckets.py b/tempest/thirdparty/boto/test_s3_buckets.py
index 3a8dc89..342fc0e 100644
--- a/tempest/thirdparty/boto/test_s3_buckets.py
+++ b/tempest/thirdparty/boto/test_s3_buckets.py
@@ -14,18 +14,16 @@
 #    under the License.
 
 from tempest.common.utils import data_utils
-from tempest import test
 from tempest.thirdparty.boto import test as boto_test
 
 
 class S3BucketsTest(boto_test.BotoTestCase):
 
     @classmethod
-    def setUpClass(cls):
-        super(S3BucketsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(S3BucketsTest, cls).resource_setup()
         cls.client = cls.os.s3_client
 
-    @test.skip_because(bug="1076965")
     def test_create_and_get_delete_bucket(self):
         # S3 Create, get and delete bucket
         bucket_name = data_utils.rand_name("s3bucket-")
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
index 389e25c..f5dec95 100644
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -26,8 +26,8 @@
 class S3ImagesTest(boto_test.BotoTestCase):
 
     @classmethod
-    def setUpClass(cls):
-        super(S3ImagesTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(S3ImagesTest, cls).resource_setup()
         if not cls.conclusion['A_I_IMAGES_READY']:
             raise cls.skipException("".join(("EC2 ", cls.__name__,
                                     ": requires ami/aki/ari manifest")))
diff --git a/tempest/thirdparty/boto/test_s3_objects.py b/tempest/thirdparty/boto/test_s3_objects.py
index db3c1cf..43774c2 100644
--- a/tempest/thirdparty/boto/test_s3_objects.py
+++ b/tempest/thirdparty/boto/test_s3_objects.py
@@ -24,8 +24,8 @@
 class S3BucketsTest(boto_test.BotoTestCase):
 
     @classmethod
-    def setUpClass(cls):
-        super(S3BucketsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(S3BucketsTest, cls).resource_setup()
         cls.client = cls.os.s3_client
 
     def test_create_get_delete_object(self):
diff --git a/test-requirements.txt b/test-requirements.txt
index cd8154b..ba70259 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,10 +1,13 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
 hacking>=0.9.2,<0.10
 # needed for doc build
 sphinx>=1.1.2,!=1.2.0,<1.3
 python-subunit>=0.0.18
-oslosphinx
+oslosphinx>=2.2.0  # Apache-2.0
 mox>=0.5.3
 mock>=1.0
 coverage>=3.6
-oslotest
-stevedore>=0.14
+oslotest>=1.1.0  # Apache-2.0
+stevedore>=1.0.0  # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 492c4f6..cab59a8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,6 +8,8 @@
          OS_TEST_PATH=./tempest/test_discover
 usedevelop = True
 install_command = pip install -U {opts} {packages}
+whitelist_externals = bash
+
 
 [testenv:py26]
 setenv = OS_TEST_PATH=./tempest/tests
@@ -17,6 +19,11 @@
 setenv = OS_TEST_PATH=./tempest/tests
 commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
 
+[testenv:py34]
+setenv = OS_TEST_PATH=./tempest/tests
+         PYTHONHASHSEED=0
+commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+
 [testenv:py27]
 setenv = OS_TEST_PATH=./tempest/tests
 commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'