Merge "Add a NotLockingAccounts credentials provider"
diff --git a/HACKING.rst b/HACKING.rst
index 8652971..025bf74 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -12,6 +12,7 @@
 - [T104] Scenario tests require a services decorator
 - [T105] Unit tests cannot use setUpClass
 - [T106] vim configuration should not be kept in source files.
+- [N322] Method's default argument shouldn't be mutable
 
 Test Data/Configuration
 -----------------------
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index ef56ab3..3b0b834 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -747,10 +747,10 @@
 
 # The cidr block to allocate tenant ipv6 subnets from (string
 # value)
-#tenant_network_v6_cidr=2003::/64
+#tenant_network_v6_cidr=2003::/48
 
 # The mask bits for tenant ipv6 subnets (integer value)
-#tenant_network_v6_mask_bits=96
+#tenant_network_v6_mask_bits=64
 
 # Whether tenant network connectivity should be evaluated
 # directly (boolean value)
@@ -843,6 +843,15 @@
 # expected to be enabled (list value)
 #discoverable_apis=all
 
+# Execute (old style) container-sync tests (boolean value)
+#container_sync=true
+
+# Execute object-versioning tests (boolean value)
+#object_versioning=true
+
+# Execute discoverability tests (boolean value)
+#discoverability=true
+
 
 [orchestration]
 
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index ab6aed3..b9b9b55 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -106,3 +106,17 @@
         body = self.client.get_node_supported_boot_devices(self.node['uuid'])
         self.assertIn('supported_boot_devices', body)
         self.assertTrue(isinstance(body['supported_boot_devices'], list))
+
+    @test.attr(type='smoke')
+    def test_get_console(self):
+        _, body = self.client.get_console(self.node['uuid'])
+        con_info = ['console_enabled', 'console_info']
+        for key in con_info:
+            self.assertIn(key, body)
+
+    @test.attr(type='smoke')
+    def test_set_console_mode(self):
+        self.client.set_console_mode(self.node['uuid'], True)
+
+        _, body = self.client.get_console(self.node['uuid'])
+        self.assertEqual(True, body['console_enabled'])
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 4afda03..0b29dde 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -44,7 +44,6 @@
 
     # TODO(afazekas): Add dedicated tenant to the skiped quota tests
     # it can be moved into the setUpClass as well
-    @test.skip_because(bug="1298131")
     @test.attr(type=['negative', 'gate'])
     def test_create_server_when_cpu_quota_is_full(self):
         # Disallow server creation when tenant's vcpu quota is full
@@ -58,9 +57,9 @@
 
         self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
                         cores=default_vcpu_quota)
-        self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+                          self.create_test_server)
 
-    @test.skip_because(bug="1298131")
     @test.attr(type=['negative', 'gate'])
     def test_create_server_when_memory_quota_is_full(self):
         # Disallow server creation when tenant's memory quota is full
@@ -74,9 +73,9 @@
 
         self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
                         ram=default_mem_quota)
-        self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+                          self.create_test_server)
 
-    @test.skip_because(bug="1298131")
     @test.attr(type=['negative', 'gate'])
     def test_create_server_when_instances_quota_is_full(self):
         # Once instances quota limit is reached, disallow server creation
@@ -89,7 +88,8 @@
                                          instances=instances_quota)
         self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
                         instances=default_instances_quota)
-        self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+                          self.create_test_server)
 
     @test.skip_because(bug="1186354",
                        condition=CONF.service_available.neutron)
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index f4d010e..4d17557 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -54,7 +54,6 @@
             flavor_id = data_utils.rand_int_id(start=1000)
         return flavor_id
 
-    @test.skip_because(bug="1298131")
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     @test.attr(type=['negative', 'gate'])
@@ -70,12 +69,11 @@
                                                              ram, vcpus, disk,
                                                              flavor_id)
         self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
-        self.assertRaises(exceptions.Unauthorized,
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
                           self.client.resize,
                           self.servers[0]['id'],
                           flavor_ref['id'])
 
-    @test.skip_because(bug="1298131")
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     @test.attr(type=['negative', 'gate'])
@@ -91,7 +89,7 @@
                                                              ram, vcpus, disk,
                                                              flavor_id)
         self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
-        self.assertRaises(exceptions.Unauthorized,
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
                           self.client.resize,
                           self.servers[0]['id'],
                           flavor_ref['id'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 343a39a..47d1254 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -271,10 +271,10 @@
         return resp, body
 
     @classmethod
-    def create_test_server_group(cls, name="", policy=[]):
+    def create_test_server_group(cls, name="", policy=None):
         if not name:
             name = data_utils.rand_name(cls.__name__ + "-Server-Group")
-        if not policy:
+        if policy is None:
             policy = ['affinity']
         resp, body = cls.servers_client.create_server_group(name, policy)
         cls.server_groups.append(body['id'])
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 187c0d4..1c5d4a3 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -47,6 +47,7 @@
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ImagesOneServerTestJSON, cls).setUpClass()
         cls.client = cls.images_client
@@ -59,12 +60,8 @@
                         % cls.__name__)
             raise cls.skipException(skip_msg)
 
-        try:
-            resp, server = cls.create_test_server(wait_until='ACTIVE')
-            cls.server_id = server['id']
-        except Exception:
-            cls.tearDownClass()
-            raise
+        resp, server = cls.create_test_server(wait_until='ACTIVE')
+        cls.server_id = server['id']
 
     def _get_default_flavor_disk_size(self, flavor_id):
         resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 4e84e08..51d9b85 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -55,6 +55,7 @@
         self.__class__.server_id = self.rebuild_server(self.server_id)
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ImagesOneServerNegativeTestJSON, cls).setUpClass()
         cls.client = cls.images_client
@@ -67,12 +68,8 @@
                         % cls.__name__)
             raise cls.skipException(skip_msg)
 
-        try:
-            resp, server = cls.create_test_server(wait_until='ACTIVE')
-            cls.server_id = server['id']
-        except Exception:
-            cls.tearDownClass()
-            raise
+        resp, server = cls.create_test_server(wait_until='ACTIVE')
+        cls.server_id = server['id']
 
         cls.image_ids = []
 
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 68794b1..9f1cfc8 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -32,6 +32,7 @@
 class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ListImageFiltersTestJSON, cls).setUpClass()
         if not CONF.service_available.glance:
@@ -69,33 +70,28 @@
             return
 
         # Create instances and snapshots via nova
-        try:
-            resp, cls.server1 = cls.create_test_server()
-            resp, cls.server2 = cls.create_test_server(wait_until='ACTIVE')
-            # NOTE(sdague) this is faster than doing the sync wait_util on both
-            cls.servers_client.wait_for_server_status(cls.server1['id'],
-                                                      'ACTIVE')
+        resp, cls.server1 = cls.create_test_server()
+        resp, cls.server2 = cls.create_test_server(wait_until='ACTIVE')
+        # NOTE(sdague) this is faster than doing the sync wait_util on both
+        cls.servers_client.wait_for_server_status(cls.server1['id'],
+                                                  'ACTIVE')
 
-            # Create images to be used in the filter tests
-            resp, cls.snapshot1 = cls.create_image_from_server(
-                cls.server1['id'], wait_until='ACTIVE')
-            cls.snapshot1_id = cls.snapshot1['id']
+        # Create images to be used in the filter tests
+        resp, cls.snapshot1 = cls.create_image_from_server(
+            cls.server1['id'], wait_until='ACTIVE')
+        cls.snapshot1_id = cls.snapshot1['id']
 
-            # Servers have a hidden property for when they are being imaged
-            # Performing back-to-back create image calls on a single
-            # server will sometimes cause failures
-            resp, cls.snapshot3 = cls.create_image_from_server(
-                cls.server2['id'], wait_until='ACTIVE')
-            cls.snapshot3_id = cls.snapshot3['id']
+        # Servers have a hidden property for when they are being imaged
+        # Performing back-to-back create image calls on a single
+        # server will sometimes cause failures
+        resp, cls.snapshot3 = cls.create_image_from_server(
+            cls.server2['id'], wait_until='ACTIVE')
+        cls.snapshot3_id = cls.snapshot3['id']
 
-            # Wait for the server to be active after the image upload
-            resp, cls.snapshot2 = cls.create_image_from_server(
-                cls.server1['id'], wait_until='ACTIVE')
-            cls.snapshot2_id = cls.snapshot2['id']
-        except Exception:
-            LOG.exception('setUpClass failed')
-            cls.tearDownClass()
-            raise
+        # Wait for the server to be active after the image upload
+        resp, cls.snapshot2 = cls.create_image_from_server(
+            cls.server1['id'], wait_until='ACTIVE')
+        cls.snapshot2_id = cls.snapshot2['id']
 
     @test.attr(type='gate')
     def test_list_images_filter_by_status(self):
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 792b523..b9ec29e 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -404,13 +404,6 @@
                           nonexistent_server)
 
     @test.attr(type=['negative', 'gate'])
-    def test_force_delete_server_invalid_state(self):
-        # we can only force-delete a server in 'soft-delete' state
-        self.assertRaises(exceptions.Conflict,
-                          self.client.force_delete_server,
-                          self.server_id)
-
-    @test.attr(type=['negative', 'gate'])
     def test_restore_nonexistent_server_id(self):
         # restore-delete a non existent server
         nonexistent_server = data_utils.rand_uuid()
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index eeff3ce..d7eb7ad 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -45,17 +45,17 @@
         expected_quota_set = self.default_quota_set | set(['id'])
         resp, quota_set = self.client.get_quota_set(self.tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(sorted(expected_quota_set),
-                         sorted(quota_set.keys()))
         self.assertEqual(quota_set['id'], self.tenant_id)
+        for quota in expected_quota_set:
+            self.assertIn(quota, quota_set.keys())
 
         # get the quota set using user id
         resp, quota_set = self.client.get_quota_set(self.tenant_id,
                                                     self.user_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(sorted(expected_quota_set),
-                         sorted(quota_set.keys()))
         self.assertEqual(quota_set['id'], self.tenant_id)
+        for quota in expected_quota_set:
+            self.assertIn(quota, quota_set.keys())
 
     @test.attr(type='smoke')
     def test_get_default_quotas(self):
@@ -63,9 +63,9 @@
         expected_quota_set = self.default_quota_set | set(['id'])
         resp, quota_set = self.client.get_default_quota_set(self.tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(sorted(expected_quota_set),
-                         sorted(quota_set.keys()))
         self.assertEqual(quota_set['id'], self.tenant_id)
+        for quota in expected_quota_set:
+            self.assertIn(quota, quota_set.keys())
 
     @test.attr(type='smoke')
     def test_compare_tenant_quotas_with_default_quotas(self):
diff --git a/tempest/api/compute/v3/admin/test_quotas_negative.py b/tempest/api/compute/v3/admin/test_quotas_negative.py
index 7739f09..e115e7b 100644
--- a/tempest/api/compute/v3/admin/test_quotas_negative.py
+++ b/tempest/api/compute/v3/admin/test_quotas_negative.py
@@ -34,7 +34,6 @@
 
     # TODO(afazekas): Add dedicated tenant to the skiped quota tests
     # it can be moved into the setUpClass as well
-    @test.skip_because(bug="1298131")
     @test.attr(type=['negative', 'gate'])
     def test_create_server_when_cpu_quota_is_full(self):
         # Disallow server creation when tenant's vcpu quota is full
@@ -48,9 +47,9 @@
 
         self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
                         cores=default_vcpu_quota)
-        self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+                          self.create_test_server)
 
-    @test.skip_because(bug="1298131")
     @test.attr(type=['negative', 'gate'])
     def test_create_server_when_memory_quota_is_full(self):
         # Disallow server creation when tenant's memory quota is full
@@ -64,7 +63,8 @@
 
         self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
                         ram=default_mem_quota)
-        self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+                          self.create_test_server)
 
     @test.attr(type=['negative', 'gate'])
     def test_update_quota_normal_user(self):
@@ -73,7 +73,6 @@
                           self.demo_tenant_id,
                           ram=0)
 
-    @test.skip_because(bug="1298131")
     @test.attr(type=['negative', 'gate'])
     def test_create_server_when_instances_quota_is_full(self):
         # Once instances quota limit is reached, disallow server creation
@@ -86,4 +85,5 @@
                                          instances=instances_quota)
         self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
                         instances=default_instances_quota)
-        self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+                          self.create_test_server)
diff --git a/tempest/api/compute/v3/admin/test_servers_negative.py b/tempest/api/compute/v3/admin/test_servers_negative.py
index 5eb6395..070dc2b 100644
--- a/tempest/api/compute/v3/admin/test_servers_negative.py
+++ b/tempest/api/compute/v3/admin/test_servers_negative.py
@@ -54,7 +54,6 @@
             flavor_id = data_utils.rand_int_id(start=1000)
         return flavor_id
 
-    @test.skip_because(bug="1298131")
     @test.attr(type=['negative', 'gate'])
     def test_resize_server_using_overlimit_ram(self):
         flavor_name = data_utils.rand_name("flavor-")
@@ -68,12 +67,11 @@
                                                              ram, vcpus, disk,
                                                              flavor_id)
         self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
-        self.assertRaises(exceptions.Unauthorized,
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
                           self.client.resize,
                           self.servers[0]['id'],
                           flavor_ref['id'])
 
-    @test.skip_because(bug="1298131")
     @test.attr(type=['negative', 'gate'])
     def test_resize_server_using_overlimit_vcpus(self):
         flavor_name = data_utils.rand_name("flavor-")
@@ -87,7 +85,7 @@
                                                              ram, vcpus, disk,
                                                              flavor_id)
         self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
-        self.assertRaises(exceptions.Unauthorized,
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
                           self.client.resize,
                           self.servers[0]['id'],
                           flavor_ref['id'])
diff --git a/tempest/api/compute/v3/images/test_images_oneserver.py b/tempest/api/compute/v3/images/test_images_oneserver.py
index 795437b..edf91a7 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver.py
@@ -47,6 +47,7 @@
         super(ImagesOneServerV3Test, self).tearDown()
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ImagesOneServerV3Test, cls).setUpClass()
         cls.client = cls.images_client
@@ -54,12 +55,8 @@
             skip_msg = ("%s skipped as glance is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
 
-        try:
-            resp, server = cls.create_test_server(wait_until='ACTIVE')
-            cls.server_id = server['id']
-        except Exception:
-            cls.tearDownClass()
-            raise
+        resp, server = cls.create_test_server(wait_until='ACTIVE')
+        cls.server_id = server['id']
 
     def _get_default_flavor_disk_size(self, flavor_id):
         resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
diff --git a/tempest/api/compute/v3/images/test_images_oneserver_negative.py b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
index eed81c6..544a5a5 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
@@ -55,6 +55,7 @@
         self.__class__.server_id = self.rebuild_server(self.server_id)
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ImagesOneServerNegativeV3Test, cls).setUpClass()
         cls.client = cls.images_client
@@ -62,12 +63,8 @@
             skip_msg = ("%s skipped as glance is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
 
-        try:
-            resp, server = cls.create_test_server(wait_until='ACTIVE')
-            cls.server_id = server['id']
-        except Exception:
-            cls.tearDownClass()
-            raise
+        resp, server = cls.create_test_server(wait_until='ACTIVE')
+        cls.server_id = server['id']
 
         cls.image_ids = []
 
diff --git a/tempest/api/database/base.py b/tempest/api/database/base.py
index b68c84a..6e0f431 100644
--- a/tempest/api/database/base.py
+++ b/tempest/api/database/base.py
@@ -25,7 +25,6 @@
     """Base test case class for all Database API tests."""
 
     _interface = 'json'
-    force_tenant_isolation = False
 
     @classmethod
     def setUpClass(cls):
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index d40e0f3..433eaed 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -50,18 +50,16 @@
         super(CredentialsTestJSON, cls).tearDownClass()
 
     def _delete_credential(self, cred_id):
-        resp, body = self.creds_client.delete_credential(cred_id)
-        self.assertEqual(resp['status'], '204')
+        self.creds_client.delete_credential(cred_id)
 
     @test.attr(type='smoke')
     def test_credentials_create_get_update_delete(self):
         keys = [data_utils.rand_name('Access-'),
                 data_utils.rand_name('Secret-')]
-        resp, cred = self.creds_client.create_credential(
+        _, cred = self.creds_client.create_credential(
             keys[0], keys[1], self.user_body['id'],
             self.projects[0])
         self.addCleanup(self._delete_credential, cred['id'])
-        self.assertEqual(resp['status'], '201')
         for value1 in self.creds_list[0]:
             self.assertIn(value1, cred)
         for value2 in self.creds_list[1]:
@@ -69,18 +67,16 @@
 
         new_keys = [data_utils.rand_name('NewAccess-'),
                     data_utils.rand_name('NewSecret-')]
-        resp, update_body = self.creds_client.update_credential(
+        _, update_body = self.creds_client.update_credential(
             cred['id'], access_key=new_keys[0], secret_key=new_keys[1],
             project_id=self.projects[1])
-        self.assertEqual(resp['status'], '200')
         self.assertEqual(cred['id'], update_body['id'])
         self.assertEqual(self.projects[1], update_body['project_id'])
         self.assertEqual(self.user_body['id'], update_body['user_id'])
         self.assertEqual(update_body['blob']['access'], new_keys[0])
         self.assertEqual(update_body['blob']['secret'], new_keys[1])
 
-        resp, get_body = self.creds_client.get_credential(cred['id'])
-        self.assertEqual(resp['status'], '200')
+        _, get_body = self.creds_client.get_credential(cred['id'])
         for value1 in self.creds_list[0]:
             self.assertEqual(update_body[value1],
                              get_body[value1])
@@ -94,16 +90,14 @@
         fetched_cred_ids = list()
 
         for i in range(2):
-            resp, cred = self.creds_client.create_credential(
+            _, cred = self.creds_client.create_credential(
                 data_utils.rand_name('Access-'),
                 data_utils.rand_name('Secret-'),
                 self.user_body['id'], self.projects[0])
-            self.assertEqual(resp['status'], '201')
             created_cred_ids.append(cred['id'])
             self.addCleanup(self._delete_credential, cred['id'])
 
-        resp, creds = self.creds_client.list_credentials()
-        self.assertEqual(resp['status'], '200')
+        _, creds = self.creds_client.list_credentials()
 
         for i in creds:
             fetched_cred_ids.append(i['id'])
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 6beb8f2..ad46af2 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -57,9 +57,8 @@
     @test.attr(type='gate')
     def test_list_endpoints(self):
         # Get a list of endpoints
-        resp, fetched_endpoints = self.client.list_endpoints()
+        _, fetched_endpoints = self.client.list_endpoints()
         # Asserting LIST endpoints
-        self.assertEqual(resp['status'], '200')
         missing_endpoints =\
             [e for e in self.setup_endpoints if e not in fetched_endpoints]
         self.assertEqual(0, len(missing_endpoints),
@@ -71,11 +70,10 @@
         region = data_utils.rand_name('region')
         url = data_utils.rand_url()
         interface = 'public'
-        resp, endpoint =\
+        _, endpoint =\
             self.client.create_endpoint(self.service_id, interface, url,
                                         region=region, enabled=True)
         # Asserting Create Endpoint response body
-        self.assertEqual(resp['status'], '201')
         self.assertIn('id', endpoint)
         self.assertEqual(region, endpoint['region'])
         self.assertEqual(url, endpoint['url'])
@@ -84,8 +82,7 @@
         fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
         self.assertIn(endpoint['id'], fetched_endpoints_id)
         # Deleting the endpoint created in this method
-        resp, body = self.client.delete_endpoint(endpoint['id'])
-        self.assertEqual(resp['status'], '204')
+        _, body = self.client.delete_endpoint(endpoint['id'])
         self.assertEqual(body, '')
         # Checking whether endpoint is deleted successfully
         resp, fetched_endpoints = self.client.list_endpoints()
@@ -116,12 +113,11 @@
         region2 = data_utils.rand_name('region')
         url2 = data_utils.rand_url()
         interface2 = 'internal'
-        resp, endpoint = \
+        _, endpoint = \
             self.client.update_endpoint(endpoint_for_update['id'],
                                         service_id=service2['id'],
                                         interface=interface2, url=url2,
                                         region=region2, enabled=False)
-        self.assertEqual(resp['status'], '200')
         # Asserting if the attributes of endpoint are updated
         self.assertEqual(service2['id'], endpoint['service_id'])
         self.assertEqual(interface2, endpoint['interface'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 0e79440..65c5230 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -22,8 +22,7 @@
     _interface = 'json'
 
     def _delete_policy(self, policy_id):
-        resp, _ = self.policy_client.delete_policy(policy_id)
-        self.assertEqual(204, resp.status)
+        self.policy_client.delete_policy(policy_id)
 
     @test.attr(type='smoke')
     def test_list_policies(self):
@@ -39,8 +38,7 @@
             self.addCleanup(self._delete_policy, policy['id'])
             policy_ids.append(policy['id'])
         # List and Verify Policies
-        resp, body = self.policy_client.list_policies()
-        self.assertEqual(resp['status'], '200')
+        _, body = self.policy_client.list_policies()
         for p in body:
             fetched_ids.append(p['id'])
         missing_pols = [p for p in policy_ids if p not in fetched_ids]
@@ -51,7 +49,7 @@
         # Test to update policy
         blob = data_utils.rand_name('BlobName-')
         policy_type = data_utils.rand_name('PolicyType-')
-        resp, policy = self.policy_client.create_policy(blob, policy_type)
+        _, policy = self.policy_client.create_policy(blob, policy_type)
         self.addCleanup(self._delete_policy, policy['id'])
         self.assertIn('id', policy)
         self.assertIn('type', policy)
@@ -59,15 +57,13 @@
         self.assertIsNotNone(policy['id'])
         self.assertEqual(blob, policy['blob'])
         self.assertEqual(policy_type, policy['type'])
-        resp, fetched_policy = self.policy_client.get_policy(policy['id'])
-        self.assertEqual(resp['status'], '200')
         # Update policy
         update_type = data_utils.rand_name('UpdatedPolicyType-')
-        resp, data = self.policy_client.update_policy(
+        _, data = self.policy_client.update_policy(
             policy['id'], type=update_type)
         self.assertIn('type', data)
         # Assertion for updated value with fetched value
-        resp, fetched_policy = self.policy_client.get_policy(policy['id'])
+        _, fetched_policy = self.policy_client.get_policy(policy['id'])
         self.assertIn('id', fetched_policy)
         self.assertIn('blob', fetched_policy)
         self.assertIn('type', fetched_policy)
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index c8b034f..8fc0e22 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -40,34 +40,30 @@
         super(RegionsTestJSON, cls).tearDownClass()
 
     def _delete_region(self, region_id):
-        resp, _ = self.client.delete_region(region_id)
-        self.assertEqual(204, resp.status)
+        self.client.delete_region(region_id)
         self.assertRaises(exceptions.NotFound,
                           self.client.get_region, region_id)
 
     @test.attr(type='gate')
     def test_create_update_get_delete_region(self):
         r_description = data_utils.rand_name('description-')
-        resp, region = self.client.create_region(
+        _, region = self.client.create_region(
             r_description, parent_region_id=self.setup_regions[0]['id'])
-        self.assertEqual(201, resp.status)
         self.addCleanup(self._delete_region, region['id'])
         self.assertEqual(r_description, region['description'])
         self.assertEqual(self.setup_regions[0]['id'],
                          region['parent_region_id'])
         # Update region with new description and parent ID
         r_alt_description = data_utils.rand_name('description-')
-        resp, region = self.client.update_region(
+        _, region = self.client.update_region(
             region['id'],
             description=r_alt_description,
             parent_region_id=self.setup_regions[1]['id'])
-        self.assertEqual(200, resp.status)
         self.assertEqual(r_alt_description, region['description'])
         self.assertEqual(self.setup_regions[1]['id'],
                          region['parent_region_id'])
         # Get the details of region
-        resp, region = self.client.get_region(region['id'])
-        self.assertEqual(200, resp.status)
+        _, region = self.client.get_region(region['id'])
         self.assertEqual(r_alt_description, region['description'])
         self.assertEqual(self.setup_regions[1]['id'],
                          region['parent_region_id'])
@@ -77,19 +73,17 @@
         # Create a region with a specific id
         r_region_id = data_utils.rand_uuid()
         r_description = data_utils.rand_name('description-')
-        resp, region = self.client.create_region(
+        _, region = self.client.create_region(
             r_description, unique_region_id=r_region_id)
         self.addCleanup(self._delete_region, region['id'])
         # Asserting Create Region with specific id response body
-        self.assertEqual(201, resp.status)
         self.assertEqual(r_region_id, region['id'])
         self.assertEqual(r_description, region['description'])
 
     @test.attr(type='gate')
     def test_list_regions(self):
         # Get a list of regions
-        resp, fetched_regions = self.client.list_regions()
-        self.assertEqual(200, resp.status)
+        _, fetched_regions = self.client.list_regions()
         missing_regions =\
             [e for e in self.setup_regions if e not in fetched_regions]
         # Asserting List Regions response
diff --git a/tempest/api/network/test_metering_extensions.py b/tempest/api/network/test_metering_extensions.py
index 5b8db43..0cc218b 100644
--- a/tempest/api/network/test_metering_extensions.py
+++ b/tempest/api/network/test_metering_extensions.py
@@ -35,6 +35,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(MeteringJSON, cls).setUpClass()
         if not test.is_extension_enabled('metering', 'network'):
@@ -42,17 +43,12 @@
             raise cls.skipException(msg)
         description = "metering label created by tempest"
         name = data_utils.rand_name("metering-label")
-        try:
-            cls.metering_label = cls.create_metering_label(name, description)
-            remote_ip_prefix = "10.0.0.0/24"
-            direction = "ingress"
-            cls.metering_label_rule = cls.create_metering_label_rule(
-                remote_ip_prefix, direction,
-                metering_label_id=cls.metering_label['id'])
-        except Exception:
-            LOG.exception('setUpClass failed')
-            cls.tearDownClass()
-            raise
+        cls.metering_label = cls.create_metering_label(name, description)
+        remote_ip_prefix = "10.0.0.0/24"
+        direction = "ingress"
+        cls.metering_label_rule = cls.create_metering_label_rule(
+            remote_ip_prefix, direction,
+            metering_label_id=cls.metering_label['id'])
 
     def _delete_metering_label(self, metering_label_id):
         # Deletes a label and verifies if it is deleted or not
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index f06d17c..26f6b8f 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -16,6 +16,7 @@
 import socket
 
 from tempest.api.network import base
+from tempest.common import custom_matchers
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import test
@@ -72,18 +73,12 @@
         _, body = self.client.show_port(self.port['id'])
         port = body['port']
         self.assertIn('id', port)
-        self.assertEqual(port['id'], self.port['id'])
-        self.assertEqual(self.port['admin_state_up'], port['admin_state_up'])
-        self.assertEqual(self.port['device_id'], port['device_id'])
-        self.assertEqual(self.port['device_owner'], port['device_owner'])
-        self.assertEqual(self.port['mac_address'], port['mac_address'])
-        self.assertEqual(self.port['name'], port['name'])
-        self.assertEqual(self.port['security_groups'],
-                         port['security_groups'])
-        self.assertEqual(self.port['network_id'], port['network_id'])
-        self.assertEqual(self.port['security_groups'],
-                         port['security_groups'])
-        self.assertEqual(port['fixed_ips'], [])
+        # TODO(Santosh)- This is a temporary workaround to compare create_port
+        # and show_port dict elements.Remove this once extra_dhcp_opts issue
+        # gets fixed in neutron.( bug - 1365341.)
+        self.assertThat(self.port,
+                        custom_matchers.MatchesDictExceptForKeys
+                        (port, excluded_keys=['extra_dhcp_opts']))
 
     @test.attr(type='smoke')
     def test_show_port_fields(self):
@@ -134,6 +129,7 @@
         for port in ports:
             self.assertEqual(sorted(fields), sorted(port.keys()))
 
+    @test.skip_because(bug="1364166")
     @test.attr(type='smoke')
     def test_update_port_with_second_ip(self):
         # Create a network with two subnets
@@ -249,10 +245,10 @@
     _tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(PortsIpV6TestJSON, cls).setUpClass()
         if not CONF.network_feature_enabled.ipv6:
-            cls.tearDownClass()
             skip_msg = "IPv6 Tests are disabled."
             raise cls.skipException(skip_msg)
 
@@ -274,6 +270,5 @@
         super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
 
 
-class PortsAdminExtendedAttrsIpV6TestXML(
-    PortsAdminExtendedAttrsIpV6TestJSON):
+class PortsAdminExtendedAttrsIpV6TestXML(PortsAdminExtendedAttrsIpV6TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index ccc0067..a143659 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -69,10 +69,11 @@
         cls.object_client_alt.auth_provider.clear_auth()
         cls.container_client_alt.auth_provider.clear_auth()
 
-        cls.data = base.DataGenerator(cls.identity_admin_client)
+        cls.data = SwiftDataGenerator(cls.identity_admin_client)
 
     @classmethod
     def tearDownClass(cls):
+        cls.data.teardown_all()
         cls.isolated_creds.clear_isolated_creds()
         super(BaseObjectTest, cls).tearDownClass()
 
@@ -116,3 +117,28 @@
         self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
                         target, method))
         self.assertThat(resp, custom_matchers.AreAllWellFormatted())
+
+
+class SwiftDataGenerator(base.DataGenerator):
+
+    def setup_test_user(self, reseller=False):
+        super(SwiftDataGenerator, self).setup_test_user()
+        if reseller:
+            role_name = CONF.object_storage.reseller_admin_role
+        else:
+            role_name = CONF.object_storage.operator_role
+        role_id = self._get_role_id(role_name)
+        self._assign_role(role_id)
+
+    def _get_role_id(self, role_name):
+        try:
+            _, roles = self.client.list_roles()
+            return next(r['id'] for r in roles if r['name'] == role_name)
+        except StopIteration:
+            msg = "Role name '%s' is not found" % role_name
+            raise exceptions.NotFound(msg)
+
+    def _assign_role(self, role_id):
+        self.client.assign_user_role(self.tenant['id'],
+                                     self.user['id'],
+                                     role_id)
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index 19e3068..c1eb897 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -18,7 +18,6 @@
 from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import config
-from tempest import exceptions
 from tempest import test
 
 CONF = config.CONF
@@ -33,32 +32,10 @@
         cls.container_name = data_utils.rand_name(name="TestContainer")
         cls.container_client.create_container(cls.container_name)
 
-        cls.data.setup_test_user()
+        cls.data.setup_test_user(reseller=True)
 
         cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
 
-        # Retrieve the ResellerAdmin role id
-        reseller_role_id = None
-        try:
-            _, roles = cls.os_admin.identity_client.list_roles()
-            reseller_role_id = next(r['id'] for r in roles if r['name']
-                                    == CONF.object_storage.reseller_admin_role)
-        except StopIteration:
-            msg = "No ResellerAdmin role found"
-            raise exceptions.NotFound(msg)
-
-        # Retrieve the ResellerAdmin user id
-        reseller_user_id = cls.data.test_credentials.user_id
-
-        # Retrieve the ResellerAdmin tenant id
-        reseller_tenant_id = cls.data.test_credentials.tenant_id
-
-        # Assign the newly created user the appropriate ResellerAdmin role
-        cls.os_admin.identity_client.assign_user_role(
-            reseller_tenant_id,
-            reseller_user_id,
-            reseller_role_id)
-
         # Retrieve a ResellerAdmin auth data and use it to set a quota
         # on the client's account
         cls.reselleradmin_auth_data = \
@@ -97,7 +74,6 @@
     def tearDownClass(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        cls.data.teardown_all()
         super(AccountQuotasTest, cls).tearDownClass()
 
     @test.attr(type="smoke")
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 6afd381..7324c2e 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -33,32 +33,10 @@
         cls.container_name = data_utils.rand_name(name="TestContainer")
         cls.container_client.create_container(cls.container_name)
 
-        cls.data.setup_test_user()
+        cls.data.setup_test_user(reseller=True)
 
         cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
 
-        # Retrieve the ResellerAdmin role id
-        reseller_role_id = None
-        try:
-            _, roles = cls.os_admin.identity_client.list_roles()
-            reseller_role_id = next(r['id'] for r in roles if r['name']
-                                    == CONF.object_storage.reseller_admin_role)
-        except StopIteration:
-            msg = "No ResellerAdmin role found"
-            raise exceptions.NotFound(msg)
-
-        # Retrieve the ResellerAdmin tenant id
-        reseller_user_id = cls.data.test_credentials.user_id
-
-        # Retrieve the ResellerAdmin tenant id
-        reseller_tenant_id = cls.data.test_credentials.tenant_id
-
-        # Assign the newly created user the appropriate ResellerAdmin role
-        cls.os_admin.identity_client.assign_user_role(
-            reseller_tenant_id,
-            reseller_user_id,
-            reseller_role_id)
-
         # Retrieve a ResellerAdmin auth data and use it to set a quota
         # on the client's account
         cls.reselleradmin_auth_data = \
@@ -96,7 +74,6 @@
     def tearDownClass(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        cls.data.teardown_all()
         super(AccountQuotasNegativeTest, cls).tearDownClass()
 
     @test.attr(type=["negative", "smoke"])
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index d615374..69cba1e 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -14,15 +14,14 @@
 #    under the License.
 
 import random
-
 from six import moves
+import testtools
 
 from tempest.api.object_storage import base
 from tempest import clients
 from tempest.common import custom_matchers
 from tempest.common.utils import data_utils
 from tempest import config
-from tempest import exceptions
 from tempest import test
 
 CONF = config.CONF
@@ -45,7 +44,6 @@
     @classmethod
     def tearDownClass(cls):
         cls.delete_containers(cls.containers)
-        cls.data.teardown_all()
         super(AccountTest, cls).tearDownClass()
 
     @test.attr(type='smoke')
@@ -66,35 +64,7 @@
         # the base user of this instance.
         self.data.setup_test_user()
 
-        os_test_user = clients.Manager(
-            self.data.test_credentials)
-
-        # Retrieve the id of an operator role of object storage
-        test_role_id = None
-        swift_role = CONF.object_storage.operator_role
-        try:
-            _, roles = self.os_admin.identity_client.list_roles()
-            test_role_id = next(r['id'] for r in roles if r['name']
-                                == swift_role)
-        except StopIteration:
-            msg = "%s role found" % swift_role
-            raise exceptions.NotFound(msg)
-
-        # Retrieve the test_user id
-        _, users = self.os_admin.identity_client.get_users()
-        test_user_id = next(usr['id'] for usr in users if usr['name']
-                            == self.data.test_user)
-
-        # Retrieve the test_tenant id
-        _, tenants = self.os_admin.identity_client.list_tenants()
-        test_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
-                              == self.data.test_tenant)
-
-        # Assign the newly created user the appropriate operator role
-        self.os_admin.identity_client.assign_user_role(
-            test_tenant_id,
-            test_user_id,
-            test_role_id)
+        os_test_user = clients.Manager(self.data.test_credentials)
 
         resp, container_list = \
             os_test_user.account_client.list_account_containers()
@@ -148,6 +118,9 @@
         self.assertEqual(container_list.find(".//bytes").tag, 'bytes')
 
     @test.attr(type='smoke')
+    @testtools.skipIf(
+        not CONF.object_storage_feature_enabled.discoverability,
+        'Discoverability function is disabled')
     def test_list_extensions(self):
         resp, extensions = self.account_client.list_extensions()
 
diff --git a/tempest/api/object_storage/test_account_services_negative.py b/tempest/api/object_storage/test_account_services_negative.py
index 490672d..e4c46e2 100644
--- a/tempest/api/object_storage/test_account_services_negative.py
+++ b/tempest/api/object_storage/test_account_services_negative.py
@@ -47,5 +47,3 @@
         self.assertRaises(exceptions.Unauthorized,
                           self.custom_account_client.list_account_containers,
                           params=params)
-        # delete the user which was created
-        self.data.teardown_all()
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index fc51504..a7d45be 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -27,11 +27,6 @@
         test_os = clients.Manager(cls.data.test_credentials)
         cls.test_auth_data = test_os.auth_provider.auth_data
 
-    @classmethod
-    def tearDownClass(cls):
-        cls.data.teardown_all()
-        super(ObjectTestACLs, cls).tearDownClass()
-
     def setUp(self):
         super(ObjectTestACLs, self).setUp()
         self.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index ca53876..1a21ecc 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -29,11 +29,6 @@
         test_os = clients.Manager(cls.data.test_credentials)
         cls.test_auth_data = test_os.auth_provider.auth_data
 
-    @classmethod
-    def tearDownClass(cls):
-        cls.data.teardown_all()
-        super(ObjectACLsNegativeTest, cls).tearDownClass()
-
     def setUp(self):
         super(ObjectACLsNegativeTest, self).setUp()
         self.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 581c6d9..28bde24 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -48,7 +48,6 @@
     def tearDownClass(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        cls.data.teardown_all()
         super(StaticWebTest, cls).tearDownClass()
 
     @test.requires_ext(extension='staticweb', service='object')
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 5f46d01..3e6d58c 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -13,6 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
 import time
 import urlparse
 
@@ -68,6 +69,9 @@
 
     @test.attr(type='slow')
     @test.skip_because(bug='1317133')
+    @testtools.skipIf(
+        not CONF.object_storage_feature_enabled.container_sync,
+        'Old-style container sync function is disabled')
     def test_container_synchronization(self):
         # container to container synchronization
         # to allow/accept sync requests to/from other accounts
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index d1541b9..ad7e068 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -15,7 +15,6 @@
 # under the License.
 
 from tempest.api.object_storage import base
-from tempest import clients
 from tempest.common import custom_matchers
 from tempest import test
 
@@ -25,11 +24,6 @@
     @classmethod
     def setUpClass(cls):
         super(CrossdomainTest, cls).setUpClass()
-        # creates a test user. The test user will set its base_url to the Swift
-        # endpoint and test the healthcheck feature.
-        cls.data.setup_test_user()
-
-        cls.os_test_user = clients.Manager(cls.data.test_credentials)
 
         cls.xml_start = '<?xml version="1.0"?>\n' \
                         '<!DOCTYPE cross-domain-policy SYSTEM ' \
@@ -38,29 +32,16 @@
 
         cls.xml_end = "</cross-domain-policy>"
 
-    @classmethod
-    def tearDownClass(cls):
-        cls.data.teardown_all()
-        super(CrossdomainTest, cls).tearDownClass()
-
     def setUp(self):
         super(CrossdomainTest, self).setUp()
 
-        client = self.os_test_user.account_client
         # Turning http://.../v1/foobar into http://.../
-        client.skip_path()
-
-    def tearDown(self):
-        # clear the base_url for subsequent requests
-        self.os_test_user.account_client.reset_path()
-
-        super(CrossdomainTest, self).tearDown()
+        self.account_client.skip_path()
 
     @test.attr('gate')
     @test.requires_ext(extension='crossdomain', service='object')
     def test_get_crossdomain_policy(self):
-        resp, body = self.os_test_user.account_client.get("crossdomain.xml",
-                                                          {})
+        resp, body = self.account_client.get("crossdomain.xml", {})
 
         self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
         self.assertTrue(body.startswith(self.xml_start) and
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index dc5585e..a0fb708 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -59,7 +59,6 @@
     def tearDownClass(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
         cls.delete_containers(cls.containers)
-        cls.data.teardown_all()
         super(ObjectFormPostTest, cls).tearDownClass()
 
     def get_multipart_form(self, expires=600):
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index 878bf6d..103bc8e 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -59,7 +59,6 @@
     def tearDownClass(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
         cls.delete_containers(cls.containers)
-        cls.data.teardown_all()
         super(ObjectFormPostNegativeTest, cls).tearDownClass()
 
     def get_multipart_form(self, expires=600):
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index 264a18a..f5ebce7 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -59,8 +59,6 @@
 
         cls.delete_containers(cls.containers)
 
-        # delete the user setup created
-        cls.data.teardown_all()
         super(ObjectTempUrlTest, cls).tearDownClass()
 
     def setUp(self):
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 7d26433..fda6861 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -53,8 +53,6 @@
 
         cls.delete_containers(cls.containers)
 
-        # delete the user setup created
-        cls.data.teardown_all()
         super(ObjectTempUrlNegativeTest, cls).tearDownClass()
 
     def setUp(self):
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index 8d2ff9b..971449d 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -13,10 +13,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.api.object_storage import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import test
 
+CONF = config.CONF
+
 
 class ContainerTest(base.BaseObjectTest):
     @classmethod
@@ -41,6 +46,9 @@
         self.assertEqual(header_value, versioned)
 
     @test.attr(type='smoke')
+    @testtools.skipIf(
+        not CONF.object_storage_feature_enabled.object_versioning,
+        'Object-versioning is disabled')
     def test_versioned_container(self):
         # create container
         vers_container_name = data_utils.rand_name(name='TestVersionContainer')
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index d0fb825..0b22de5 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -64,8 +64,10 @@
         return admin_client
 
     @classmethod
-    def create_stack(cls, stack_name, template_data, parameters={},
+    def create_stack(cls, stack_name, template_data, parameters=None,
                      environment=None, files=None):
+        if parameters is None:
+            parameters = {}
         resp, body = cls.client.create_stack(
             stack_name,
             template=template_data,
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index cbe62a1..d7c2a0d 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -66,9 +66,9 @@
         params = {'format': 'json'}
         _, container_list = \
             self.account_client.list_account_containers(params=params)
-        self.assertEqual(2, len(container_list))
-        for cont in container_list:
-            self.assertTrue(cont['name'].startswith(self.stack_name))
+        created_containers = [cont for cont in container_list
+                              if cont['name'].startswith(self.stack_name)]
+        self.assertEqual(2, len(created_containers))
 
     @test.services('object_storage')
     def test_acl(self):
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 3cd0827..43f48ff 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -148,11 +148,11 @@
     _api_version = 1
 
 
-class BaseVolumeV1AdminTest(BaseVolumeV1Test):
+class BaseVolumeAdminTest(BaseVolumeTest):
     """Base test case class for all Volume Admin API tests."""
     @classmethod
     def setUpClass(cls):
-        super(BaseVolumeV1AdminTest, cls).setUpClass()
+        super(BaseVolumeAdminTest, cls).setUpClass()
         cls.adm_user = CONF.identity.admin_username
         cls.adm_pass = CONF.identity.admin_password
         cls.adm_tenant = CONF.identity.admin_tenant_name
@@ -160,11 +160,62 @@
             msg = ("Missing Volume Admin API credentials "
                    "in configuration.")
             raise cls.skipException(msg)
+
         if CONF.compute.allow_tenant_isolation:
             cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
                                          interface=cls._interface)
         else:
             cls.os_adm = clients.AdminManager(interface=cls._interface)
+
+        cls.qos_specs = []
+
         cls.client = cls.os_adm.volume_types_client
         cls.hosts_client = cls.os_adm.volume_hosts_client
         cls.quotas_client = cls.os_adm.volume_quotas_client
+        cls.volume_types_client = cls.os_adm.volume_types_client
+
+        if cls._api_version == 1:
+            if not CONF.volume_feature_enabled.api_v1:
+                msg = "Volume API v1 is disabled"
+                raise cls.skipException(msg)
+            cls.volume_qos_client = cls.os_adm.volume_qos_client
+        elif cls._api_version == 2:
+            if not CONF.volume_feature_enabled.api_v2:
+                msg = "Volume API v2 is disabled"
+                raise cls.skipException(msg)
+            cls.volume_qos_client = cls.os_adm.volume_qos_v2_client
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.clear_qos_specs()
+        super(BaseVolumeAdminTest, cls).tearDownClass()
+
+    @classmethod
+    def create_test_qos_specs(cls, name=None, consumer=None, **kwargs):
+        """create a test Qos-Specs."""
+        name = name or data_utils.rand_name(cls.__name__ + '-QoS')
+        consumer = consumer or 'front-end'
+        _, qos_specs = cls.volume_qos_client.create_qos(name, consumer,
+                                                        **kwargs)
+        cls.qos_specs.append(qos_specs['id'])
+        return qos_specs
+
+    @classmethod
+    def clear_qos_specs(cls):
+        for qos_id in cls.qos_specs:
+            try:
+                cls.volume_qos_client.delete_qos(qos_id)
+            except exceptions.NotFound:
+                # The qos_specs may have already been deleted which is OK.
+                pass
+
+        for qos_id in cls.qos_specs:
+            try:
+                cls.volume_qos_client.wait_for_resource_deletion(qos_id)
+            except exceptions.NotFound:
+                # The qos_specs may have already been deleted which is OK.
+                pass
+
+
+class BaseVolumeV1AdminTest(BaseVolumeAdminTest):
+    _api_version = 1
diff --git a/tempest/api/volume/test_qos.py b/tempest/api/volume/test_qos.py
new file mode 100644
index 0000000..8b6ba49
--- /dev/null
+++ b/tempest/api/volume/test_qos.py
@@ -0,0 +1,176 @@
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.volume import base
+from tempest.common.utils import data_utils as utils
+from tempest import test
+
+
+class QosSpecsV2TestJSON(base.BaseVolumeAdminTest):
+    """Test the Cinder QoS-specs.
+
+    Tests for  create, list, delete, show, associate,
+    disassociate, set/unset key V2 APIs.
+    """
+
+    @classmethod
+    @test.safe_setup
+    def setUpClass(cls):
+        super(QosSpecsV2TestJSON, cls).setUpClass()
+        # Create admin qos client
+        # Create a test shared qos-specs for tests
+        cls.qos_name = utils.rand_name(cls.__name__ + '-QoS')
+        cls.qos_consumer = 'front-end'
+
+        cls.created_qos = cls.create_test_qos_specs(cls.qos_name,
+                                                    cls.qos_consumer,
+                                                    read_iops_sec='2000')
+
+    def _create_delete_test_qos_with_given_consumer(self, consumer):
+        name = utils.rand_name('qos')
+        qos = {'name': name, 'consumer': consumer}
+        body = self.create_test_qos_specs(name, consumer)
+        for key in ['name', 'consumer']:
+            self.assertEqual(qos[key], body[key])
+
+        self.volume_qos_client.delete_qos(body['id'])
+        self.volume_qos_client.wait_for_resource_deletion(body['id'])
+
+        # validate the deletion
+        _, list_qos = self.volume_qos_client.list_qos()
+        self.assertNotIn(body, list_qos)
+
+    def _create_test_volume_type(self):
+        vol_type_name = utils.rand_name("volume-type")
+        _, vol_type = self.volume_types_client.create_volume_type(
+            vol_type_name)
+        self.addCleanup(self.volume_types_client.delete_volume_type,
+                        vol_type['id'])
+        return vol_type
+
+    def _test_associate_qos(self, vol_type_id):
+        self.volume_qos_client.associate_qos(
+            self.created_qos['id'], vol_type_id)
+
+    def _test_get_association_qos(self):
+        _, body = self.volume_qos_client.get_association_qos(
+            self.created_qos['id'])
+
+        associations = []
+        for association in body:
+            associations.append(association['id'])
+
+        return associations
+
+    def test_create_delete_qos_with_front_end_consumer(self):
+        """Tests the creation and deletion of QoS specs
+
+        With consumer as front end
+        """
+        self._create_delete_test_qos_with_given_consumer('front-end')
+
+    def test_create_delete_qos_with_back_end_consumer(self):
+        """Tests the creation and deletion of QoS specs
+
+        With consumer as back-end
+        """
+        self._create_delete_test_qos_with_given_consumer('back-end')
+
+    @test.attr(type='smoke')
+    def test_create_delete_qos_with_both_consumer(self):
+        """Tests the creation and deletion of QoS specs
+
+        With consumer as both front end and back end
+        """
+        self._create_delete_test_qos_with_given_consumer('both')
+
+    @test.attr(type='smoke')
+    def test_get_qos(self):
+        """Tests the detail of a given qos-specs"""
+        _, body = self.volume_qos_client.get_qos(self.created_qos['id'])
+        self.assertEqual(self.qos_name, body['name'])
+        self.assertEqual(self.qos_consumer, body['consumer'])
+
+    @test.attr(type='smoke')
+    def test_list_qos(self):
+        """Tests the list of all qos-specs"""
+        _, body = self.volume_qos_client.list_qos()
+        self.assertIn(self.created_qos, body)
+
+    @test.attr(type='smoke')
+    def test_set_unset_qos_key(self):
+        """Test the addition of a specs key to qos-specs"""
+        args = {'iops_bytes': '500'}
+        _, body = self.volume_qos_client.set_qos_key(self.created_qos['id'],
+                                                     iops_bytes='500')
+        self.assertEqual(args, body)
+        _, body = self.volume_qos_client.get_qos(self.created_qos['id'])
+        self.assertEqual(args['iops_bytes'], body['specs']['iops_bytes'])
+
+        # test the deletion of a specs key from qos-specs
+        keys = ['iops_bytes']
+        self.volume_qos_client.unset_qos_key(self.created_qos['id'], keys)
+        operation = 'qos-key-unset'
+        self.volume_qos_client.wait_for_qos_operations(self.created_qos['id'],
+                                                       operation, keys)
+        _, body = self.volume_qos_client.get_qos(self.created_qos['id'])
+        self.assertNotIn(keys[0], body['specs'])
+
+    @test.attr(type='smoke')
+    def test_associate_disassociate_qos(self):
+        """Test the following operations :
+
+        1. associate_qos
+        2. get_association_qos
+        3. disassociate_qos
+        4. disassociate_all_qos
+        """
+
+        # create a test volume-type
+        vol_type = []
+        for _ in range(0, 3):
+            vol_type.append(self._create_test_volume_type())
+
+        # associate the qos-specs with volume-types
+        for i in range(0, 3):
+            self._test_associate_qos(vol_type[i]['id'])
+
+        # get the association of the qos-specs
+        associations = self._test_get_association_qos()
+
+        for i in range(0, 3):
+            self.assertIn(vol_type[i]['id'], associations)
+
+        # disassociate a volume-type with qos-specs
+        self.volume_qos_client.disassociate_qos(
+            self.created_qos['id'], vol_type[0]['id'])
+        operation = 'disassociate'
+        self.volume_qos_client.wait_for_qos_operations(self.created_qos['id'],
+                                                       operation,
+                                                       vol_type[0]['id'])
+        associations = self._test_get_association_qos()
+        self.assertNotIn(vol_type[0]['id'], associations)
+
+        # disassociate all volume-types from qos-specs
+        self.volume_qos_client.disassociate_all_qos(
+            self.created_qos['id'])
+        operation = 'disassociate-all'
+        self.volume_qos_client.wait_for_qos_operations(self.created_qos['id'],
+                                                       operation)
+        associations = self._test_get_association_qos()
+        self.assertEmpty(associations)
+
+
+class QosSpecsV1TestJSON(QosSpecsV2TestJSON):
+    _api_version = 1
diff --git a/tempest/api_schema/response/compute/v2/hypervisors.py b/tempest/api_schema/response/compute/v2/hypervisors.py
index 1878881..cbb7698 100644
--- a/tempest/api_schema/response/compute/v2/hypervisors.py
+++ b/tempest/api_schema/response/compute/v2/hypervisors.py
@@ -26,11 +26,7 @@
         'items': {
             'type': 'object',
             'properties': {
-                # NOTE: Now the type of 'id' is integer,
-                # but here allows 'string' also because we
-                # will be able to change it to 'uuid' in
-                # the future.
-                'id': {'type': ['integer', 'string']},
+                'uuid': {'type': 'string'},
                 'name': {'type': 'string'}
             }
         }
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/compute/test_nova.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_nova.py
rename to tempest/cli/simple_read_only/compute/test_nova.py
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/compute/test_nova_manage.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_nova_manage.py
rename to tempest/cli/simple_read_only/compute/test_nova_manage.py
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/data_processing/test_sahara.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_sahara.py
rename to tempest/cli/simple_read_only/data_processing/test_sahara.py
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/identity/test_keystone.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_keystone.py
rename to tempest/cli/simple_read_only/identity/test_keystone.py
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/image/test_glance.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_glance.py
rename to tempest/cli/simple_read_only/image/test_glance.py
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/network/test_neutron.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_neutron.py
rename to tempest/cli/simple_read_only/network/test_neutron.py
diff --git a/tempest/cli/simple_read_only/test_swift.py b/tempest/cli/simple_read_only/object_storage/test_swift.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_swift.py
rename to tempest/cli/simple_read_only/object_storage/test_swift.py
diff --git a/tempest/cli/simple_read_only/test_heat.py b/tempest/cli/simple_read_only/orchestration/test_heat.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_heat.py
rename to tempest/cli/simple_read_only/orchestration/test_heat.py
diff --git a/tempest/cli/simple_read_only/test_ceilometer.py b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
similarity index 93%
rename from tempest/cli/simple_read_only/test_ceilometer.py
rename to tempest/cli/simple_read_only/telemetry/test_ceilometer.py
index b622dd4..1d2822d 100644
--- a/tempest/cli/simple_read_only/test_ceilometer.py
+++ b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
@@ -39,19 +39,15 @@
             raise cls.skipException(msg)
         super(SimpleReadOnlyCeilometerClientTest, cls).setUpClass()
 
-    @test.services('telemetry')
     def test_ceilometer_meter_list(self):
         self.ceilometer('meter-list')
 
     @test.attr(type='slow')
-    @test.services('telemetry')
     def test_ceilometer_resource_list(self):
         self.ceilometer('resource-list')
 
-    @test.services('telemetry')
     def test_ceilometermeter_alarm_list(self):
         self.ceilometer('alarm-list')
 
-    @test.services('telemetry')
     def test_ceilometer_version(self):
         self.ceilometer('', flags='--version')
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/volume/test_cinder.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_cinder.py
rename to tempest/cli/simple_read_only/volume/test_cinder.py
diff --git a/tempest/clients.py b/tempest/clients.py
index 2b8b6fb..eab496e 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -180,12 +180,14 @@
 from tempest.services.volume.json.backups_client import BackupsClientJSON
 from tempest.services.volume.json.extensions_client import \
     ExtensionsClientJSON as VolumeExtensionClientJSON
+from tempest.services.volume.json.qos_client import QosSpecsClientJSON
 from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
 from tempest.services.volume.json.volumes_client import VolumesClientJSON
 from tempest.services.volume.v2.json.availability_zone_client import \
     VolumeV2AvailabilityZoneClientJSON
 from tempest.services.volume.v2.json.extensions_client import \
     ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
+from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
 from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
 from tempest.services.volume.v2.xml.availability_zone_client import \
     VolumeV2AvailabilityZoneClientXML
@@ -428,6 +430,13 @@
         self.security_group_default_rules_client = (
             SecurityGroupDefaultRulesClientJSON(self.auth_provider))
         self.networks_client = NetworksClientJSON(self.auth_provider)
+        # NOTE : As XML clients are not implemented for Qos-specs.
+        # So, setting the qos_client here. Once client are implemented,
+        # qos_client would be moved to its respective if/else.
+        # Bug : 1312553
+        self.volume_qos_client = QosSpecsClientJSON(self.auth_provider)
+        self.volume_qos_v2_client = QosSpecsV2ClientJSON(
+            self.auth_provider)
 
 
 class AltManager(Manager):
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 6761a69..87b7cd7 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -20,6 +20,7 @@
 """
 
 import argparse
+import datetime
 import logging
 import os
 import sys
@@ -30,12 +31,14 @@
 import tempest.auth
 from tempest import config
 from tempest import exceptions
+from tempest.openstack.common import timeutils
 from tempest.services.compute.json import flavors_client
 from tempest.services.compute.json import servers_client
 from tempest.services.identity.json import identity_client
 from tempest.services.image.v2.json import image_client
 from tempest.services.object_storage import container_client
 from tempest.services.object_storage import object_client
+from tempest.services.telemetry.json import telemetry_client
 from tempest.services.volume.json import volumes_client
 
 OPTS = {}
@@ -44,6 +47,8 @@
 
 LOG = None
 
+JAVELIN_START = datetime.datetime.utcnow()
+
 
 class OSClient(object):
     _creds = None
@@ -62,6 +67,7 @@
         self.containers = container_client.ContainerClient(_auth)
         self.images = image_client.ImageClientV2JSON(_auth)
         self.flavors = flavors_client.FlavorsClientJSON(_auth)
+        self.telemetry = telemetry_client.TelemetryClientJSON(_auth)
         self.volumes = volumes_client.VolumesClientJSON(_auth)
 
 
@@ -104,6 +110,13 @@
         else:
             LOG.warn("Tenant '%s' already exists in this environment" % tenant)
 
+
+def destroy_tenants(tenants):
+    admin = keystone_admin()
+    for tenant in tenants:
+        tenant_id = admin.identity.get_tenant_by_name(tenant)['id']
+        r, body = admin.identity.delete_tenant(tenant_id)
+
 ##############
 #
 # USERS
@@ -168,6 +181,13 @@
                 enabled=True)
 
 
+def destroy_users(users):
+    admin = keystone_admin()
+    for user in users:
+        user_id = admin.identity.get_user_by_name(user['name'])['id']
+        r, body = admin.identity.delete_user(user_id)
+
+
 def collect_users(users):
     global USERS
     LOG.info("Collecting users")
@@ -196,6 +216,7 @@
         # TODO(sdague): Volumes not yet working, bring it back once the
         # code is self testing.
         # self.check_volumes()
+        self.check_telemetry()
 
     def check_users(self):
         """Check that the users we expect to exist, do.
@@ -252,6 +273,26 @@
                                 "Server %s is not pingable at %s" % (
                                     server['name'], addr))
 
+    def check_telemetry(self):
+        """Check that ceilometer provides a sane sample.
+
+        Confirm that there are more than one sample and that they have the
+        expected metadata.
+
+        If in check mode confirm that the oldest sample available is from
+        before the upgrade.
+        """
+        LOG.info("checking telemetry")
+        for server in self.res['servers']:
+            client = client_for_user(server['owner'])
+            response, body = client.telemetry.list_samples(
+                'instance',
+                query=('metadata.display_name', 'eq', server['name'])
+            )
+            self.assertEqual(response.status, 200)
+            self.assertTrue(len(body) >= 1, 'expecting at least one sample')
+            self._confirm_telemetry_sample(server, body[-1])
+
     def check_volumes(self):
         """Check that the volumes are still there and attached."""
         if not self.res.get('volumes'):
@@ -270,6 +311,26 @@
             self.assertEqual(volume['id'], attachment['volume_id'])
             self.assertEqual(server_id, attachment['server_id'])
 
+    def _confirm_telemetry_sample(self, server, sample):
+        """Check this sample matches the expected resource metadata."""
+        # Confirm display_name
+        self.assertEqual(server['name'],
+                         sample['resource_metadata']['display_name'])
+        # Confirm instance_type of flavor
+        flavor = sample['resource_metadata'].get(
+            'flavor.name',
+            sample['resource_metadata'].get('instance_type')
+        )
+        self.assertEqual(server['flavor'], flavor)
+        # Confirm the oldest sample was created before upgrade.
+        if OPTS.mode == 'check':
+            oldest_timestamp = timeutils.normalize_time(
+                timeutils.parse_isotime(sample['timestamp']))
+            self.assertTrue(
+                oldest_timestamp < JAVELIN_START,
+                'timestamp should come before start of second javelin run'
+            )
+
 
 #######################
 #
@@ -296,6 +357,15 @@
             obj['container'], obj['name'],
             _file_contents(obj['file']))
 
+
+def destroy_objects(objects):
+    for obj in objects:
+        client = client_for_user(obj['owner'])
+        r, body = client.objects.delete_object(obj['container'], obj['name'])
+        if not (200 <= int(r['status']) < 299):
+            raise ValueError("unable to destroy object: [%s] %s" % (r, body))
+
+
 #######################
 #
 # IMAGES
@@ -449,6 +519,13 @@
         client.volumes.create_volume(volume['name'], volume['size'])
 
 
+def destroy_volumes(volumes):
+    for volume in volumes:
+        client = client_for_user(volume['owner'])
+        volume_id = _get_volume_by_name(client, volume['name'])['id']
+        r, body = client.volumes.delete_volume(volume_id)
+
+
 def attach_volumes(volumes):
     for volume in volumes:
         client = client_for_user(volume['owner'])
@@ -484,18 +561,12 @@
 def destroy_resources():
     LOG.info("Destroying Resources")
     # Destroy in inverse order of create
-
-    # Future
-    # detach_volumes
-    # destroy_volumes
-
     destroy_servers(RES['servers'])
     destroy_images(RES['images'])
-    # destroy_objects
-
-    # destroy_users
-    # destroy_tenants
-
+    destroy_objects(RES['objects'])
+    destroy_volumes(RES['volumes'])
+    destroy_users(RES['users'])
+    destroy_tenants(RES['tenants'])
     LOG.warn("Destroy mode incomplete")
 
 
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index 7348a7d..39e3a67 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -13,7 +13,6 @@
 #    under the License.
 
 import re
-from unittest import util
 
 from testtools import helpers
 
@@ -204,24 +203,35 @@
         self.intersect = set(self.expected) & set(self.actual)
         self.symmetric_diff = set(self.expected) ^ set(self.actual)
 
+    def _format_dict(self, dict_to_format):
+        # Ensure the error string dict is printed in a set order
+        # NOTE(mtreinish): needed to ensure a deterministic error msg for
+        # testing. Otherwise the error message will be dependent on the
+        # dict ordering.
+        dict_string = "{"
+        for key in sorted(dict_to_format):
+            dict_string += "'%s': %s, " % (key, dict_to_format[key])
+        dict_string = dict_string[:-2] + '}'
+        return dict_string
+
     def describe(self):
         msg = ""
         if self.symmetric_diff:
             only_expected = helpers.dict_subtract(self.expected, self.actual)
             only_actual = helpers.dict_subtract(self.actual, self.expected)
             if only_expected:
-                msg += "Only in expected:\n  %s\n" % \
-                       util.safe_repr(only_expected)
+                msg += "Only in expected:\n  %s\n" % self._format_dict(
+                    only_expected)
             if only_actual:
-                msg += "Only in actual:\n  %s\n" % \
-                       util.safe_repr(only_actual)
+                msg += "Only in actual:\n  %s\n" % self._format_dict(
+                    only_actual)
         diff_set = set(o for o in self.intersect if
                        self.expected[o] != self.actual[o])
         if diff_set:
             msg += "Differences:\n"
-        for o in diff_set:
-            msg += "  %s: expected %s, actual %s\n" % (
-                o, self.expected[o], self.actual[o])
+            for o in diff_set:
+                msg += "  %s: expected %s, actual %s\n" % (
+                    o, self.expected[o], self.actual[o])
         return msg
 
     def get_details(self):
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index ff92b67..e584cbf 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -248,8 +248,10 @@
                 return resp[i]
         return ""
 
-    def _log_request_start(self, method, req_url, req_headers={},
+    def _log_request_start(self, method, req_url, req_headers=None,
                            req_body=None):
+        if req_headers is None:
+            req_headers = {}
         caller_name = misc_utils.find_test_caller()
         trace_regex = CONF.debug.trace_requests
         if trace_regex and re.search(trace_regex, caller_name):
@@ -257,8 +259,10 @@
                            (caller_name, method, req_url))
 
     def _log_request(self, method, req_url, resp,
-                     secs="", req_headers={},
+                     secs="", req_headers=None,
                      req_body=None, resp_body=None):
+        if req_headers is None:
+            req_headers = {}
         # if we have the request id, put it in the right part of the log
         extra = dict(request_id=self._get_request_id(resp))
         # NOTE(sdague): while we still have 6 callers to this function
@@ -548,7 +552,13 @@
             if self.is_resource_deleted(id):
                 return
             if int(time.time()) - start_time >= self.build_timeout:
-                raise exceptions.TimeoutException
+                message = ('Failed to delete resource %(id)s within the '
+                           'required time (%(timeout)s s).' %
+                           {'id': id, 'timeout': self.build_timeout})
+                caller = misc_utils.find_test_caller()
+                if caller:
+                    message = '(%s) %s' % (caller, message)
+                raise exceptions.TimeoutException(message)
             time.sleep(self.build_interval)
 
     def is_resource_deleted(self, id):
diff --git a/tempest/config.py b/tempest/config.py
index 93d4874..d3449a7 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -419,10 +419,10 @@
                default=28,
                help="The mask bits for tenant ipv4 subnets"),
     cfg.StrOpt('tenant_network_v6_cidr',
-               default="2003::/64",
+               default="2003::/48",
                help="The cidr block to allocate tenant ipv6 subnets from"),
     cfg.IntOpt('tenant_network_v6_mask_bits',
-               default=96,
+               default=64,
                help="The mask bits for tenant ipv6 subnets"),
     cfg.BoolOpt('tenant_networks_reachable',
                 default=False,
@@ -622,6 +622,15 @@
                 help="A list of the enabled optional discoverable apis. "
                      "A single entry, all, indicates that all of these "
                      "features are expected to be enabled"),
+    cfg.BoolOpt('container_sync',
+                default=True,
+                help="Execute (old style) container-sync tests"),
+    cfg.BoolOpt('object_versioning',
+                default=True,
+                help="Execute object-versioning tests"),
+    cfg.BoolOpt('discoverability',
+                default=True,
+                help="Execute discoverability tests"),
 ]
 
 database_group = cfg.OptGroup(name='database',
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index cef010e..abc60cb 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -27,6 +27,7 @@
 SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
 SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
 VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
+mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
 
 
 def import_no_clients_in_api(physical_line, filename):
@@ -119,6 +120,16 @@
                     'tests')
 
 
+def no_mutable_default_args(logical_line):
+    """Check that mutable object isn't used as default argument
+
+    N322: Method's default argument shouldn't be mutable
+    """
+    msg = "N322: Method's default argument shouldn't be mutable!"
+    if mutable_default_args.match(logical_line):
+        yield (0, msg)
+
+
 def factory(register):
     register(import_no_clients_in_api)
     register(scenario_tests_need_service_tags)
@@ -126,3 +137,4 @@
     register(no_vi_headers)
     register(service_tags_not_in_module_path)
     register(no_official_client_manager_in_api_tests)
+    register(no_mutable_default_args)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 0f14c94..cabefc8 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -39,6 +39,7 @@
 from tempest import exceptions
 from tempest.openstack.common import log
 from tempest.openstack.common import timeutils
+from tempest.services.network import resources as net_resources
 import tempest.test
 
 CONF = config.CONF
@@ -88,6 +89,16 @@
         cls.servers_client = cls.manager.servers_client
         cls.volumes_client = cls.manager.volumes_client
         cls.snapshots_client = cls.manager.snapshots_client
+        cls.interface_client = cls.manager.interfaces_client
+        # Neutron network client
+        cls.network_client = cls.manager.network_client
+
+    @classmethod
+    def tearDownClass(cls):
+        # Isolated creds also manages network resources, which should
+        # be cleaned up at the end of the test case
+        cls.isolated_creds.clear_isolated_creds()
+        super(ScenarioTest, cls).tearDownClass()
 
     @classmethod
     def _get_credentials(cls, get_creds, ctype):
@@ -103,6 +114,11 @@
                                     'user')
 
     @classmethod
+    def alt_credentials(cls):
+        return cls._get_credentials(cls.isolated_creds.get_alt_creds,
+                                    'alt_user')
+
+    @classmethod
     def admin_credentials(cls):
         return cls._get_credentials(cls.isolated_creds.get_admin_creds,
                                     'identity_admin')
@@ -121,23 +137,24 @@
         # not at the end of the class
         self.addCleanup(self._wait_for_cleanups)
 
-    def delete_wrapper(self, delete_thing, thing_id):
+    def delete_wrapper(self, delete_thing, *args, **kwargs):
         """Ignores NotFound exceptions for delete operations.
 
-        @param delete_thing: delete method of a resource
-        @param thing_id: id of the resource to be deleted
+        @param delete_thing: delete method of a resource. method will be
+            executed as delete_thing(*args, **kwargs)
+
         """
         try:
             # Tempest clients return dicts, so there is no common delete
             # method available. Using a callable instead
-            delete_thing(thing_id)
+            delete_thing(*args, **kwargs)
         except exceptions.NotFound:
             # If the resource is already missing, mission accomplished.
             pass
 
     def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
-                             cleanup_callable, cleanup_args=[],
-                             cleanup_kwargs={}, ignore_error=True):
+                             cleanup_callable, cleanup_args=None,
+                             cleanup_kwargs=None, ignore_error=True):
         """Adds wait for ansyc resource deletion at the end of cleanups
 
         @param waiter_callable: callable to wait for the resource to delete
@@ -147,6 +164,10 @@
             the following *cleanup_args, **cleanup_kwargs.
             usually a delete method.
         """
+        if cleanup_args is None:
+            cleanup_args = []
+        if cleanup_kwargs is None:
+            cleanup_kwargs = {}
         self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
         wait_dict = {
             'waiter_callable': waiter_callable,
@@ -172,16 +193,18 @@
     # The create_[resource] functions only return body and discard the
     # resp part which is not used in scenario tests
 
-    def create_keypair(self):
+    def create_keypair(self, client=None):
+        if not client:
+            client = self.keypairs_client
         name = data_utils.rand_name(self.__class__.__name__)
         # We don't need to create a keypair by pubkey in scenario
-        resp, body = self.keypairs_client.create_keypair(name)
-        self.addCleanup(self.keypairs_client.delete_keypair, name)
+        resp, body = client.create_keypair(name)
+        self.addCleanup(client.delete_keypair, name)
         return body
 
     def create_server(self, name=None, image=None, flavor=None,
                       wait_on_boot=True, wait_on_delete=True,
-                      create_kwargs={}):
+                      create_kwargs=None):
         """Creates VM instance.
 
         @param image: image from which to create the instance
@@ -196,6 +219,8 @@
             image = CONF.compute.image_ref
         if flavor is None:
             flavor = CONF.compute.flavor_ref
+        if create_kwargs is None:
+            create_kwargs = {}
 
         fixed_network_name = CONF.compute.fixed_network_name
         if 'nics' not in create_kwargs and fixed_network_name:
@@ -262,7 +287,7 @@
         _, volume = self.volumes_client.get_volume(volume['id'])
         return volume
 
-    def _create_loginable_secgroup_rule_nova(self, secgroup_id=None):
+    def _create_loginable_secgroup_rule(self, secgroup_id=None):
         _client = self.security_groups_client
         if secgroup_id is None:
             _, sgs = _client.list_security_groups()
@@ -300,7 +325,7 @@
             rules.append(sg_rule)
         return rules
 
-    def _create_security_group_nova(self):
+    def _create_security_group(self):
         # Create security group
         sg_name = data_utils.rand_name(self.__class__.__name__)
         sg_desc = sg_name + " description"
@@ -313,7 +338,7 @@
                         secgroup['id'])
 
         # Add rules to the security group
-        self._create_loginable_secgroup_rule_nova(secgroup['id'])
+        self._create_loginable_secgroup_rule(secgroup['id'])
 
         return secgroup
 
@@ -338,7 +363,9 @@
 
         return linux_client
 
-    def _image_create(self, name, fmt, path, properties={}):
+    def _image_create(self, name, fmt, path, properties=None):
+        if properties is None:
+            properties = {}
         name = data_utils.rand_name('%s-' % name)
         image_file = open(path, 'rb')
         self.addCleanup(image_file.close)
@@ -385,6 +412,9 @@
         LOG.debug("image:%s" % self.image)
 
     def _log_console_output(self, servers=None):
+        if not CONF.compute_feature_enabled.console_output:
+            LOG.debug('Console output not supported, cannot log')
+            return
         if not servers:
             _, servers = self.servers_client.list_servers()
             servers = servers['servers']
@@ -417,6 +447,518 @@
         return snapshot_image
 
 
+# TODO(yfried): change this class name to NetworkScenarioTest once client
+# migration is complete
+class NeutronScenarioTest(ScenarioTest):
+    """Base class for network scenario tests.
+    This class provide helpers for network scenario tests, using the neutron
+    API. Helpers from ancestor which use the nova network API are overridden
+    with the neutron API.
+
+    This Class also enforces using Neutron instead of novanetwork.
+    Subclassed tests will be skipped if Neutron is not enabled
+
+    """
+
+    @classmethod
+    def check_preconditions(cls):
+        if CONF.service_available.neutron:
+            cls.enabled = True
+            # verify that neutron_available is telling the truth
+            try:
+                cls.network_client.list_networks()
+            except exc.EndpointNotFound:
+                cls.enabled = False
+                raise
+        else:
+            cls.enabled = False
+            msg = 'Neutron not available'
+            raise cls.skipException(msg)
+
+    @classmethod
+    def setUpClass(cls):
+        super(NeutronScenarioTest, cls).setUpClass()
+        cls.tenant_id = cls.manager.identity_client.tenant_id
+        cls.check_preconditions()
+
+    def _create_network(self, client=None, tenant_id=None,
+                        namestart='network-smoke-'):
+        if not client:
+            client = self.network_client
+        if not tenant_id:
+            tenant_id = client.rest_client.tenant_id
+        name = data_utils.rand_name(namestart)
+        _, result = client.create_network(name=name, tenant_id=tenant_id)
+        network = net_resources.DeletableNetwork(client=client,
+                                                 **result['network'])
+        self.assertEqual(network.name, name)
+        self.addCleanup(self.delete_wrapper, network.delete)
+        return network
+
+    def _list_networks(self, *args, **kwargs):
+        """List networks using admin creds """
+        return self._admin_lister('networks')(*args, **kwargs)
+
+    def _list_subnets(self, *args, **kwargs):
+        """List subnets using admin creds """
+        return self._admin_lister('subnets')(*args, **kwargs)
+
+    def _list_routers(self, *args, **kwargs):
+        """List routers using admin creds """
+        return self._admin_lister('routers')(*args, **kwargs)
+
+    def _list_ports(self, *args, **kwargs):
+        """List ports using admin creds """
+        return self._admin_lister('ports')(*args, **kwargs)
+
+    def _admin_lister(self, resource_type):
+        def temp(*args, **kwargs):
+            temp_method = self.admin_manager.network_client.__getattr__(
+                'list_%s' % resource_type)
+            _, resource_list = temp_method(*args, **kwargs)
+            return resource_list[resource_type]
+        return temp
+
+    def _create_subnet(self, network, client=None, namestart='subnet-smoke',
+                       **kwargs):
+        """
+        Create a subnet for the given network within the cidr block
+        configured for tenant networks.
+        """
+        if not client:
+            client = self.network_client
+
+        def cidr_in_use(cidr, tenant_id):
+            """
+            :return True if subnet with cidr already exist in tenant
+                False else
+            """
+            cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
+            return len(cidr_in_use) != 0
+
+        tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+        result = None
+        # Repeatedly attempt subnet creation with sequential cidr
+        # blocks until an unallocated block is found.
+        for subnet_cidr in tenant_cidr.subnet(
+                CONF.network.tenant_network_mask_bits):
+            str_cidr = str(subnet_cidr)
+            if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
+                continue
+
+            subnet = dict(
+                name=data_utils.rand_name(namestart),
+                ip_version=4,
+                network_id=network.id,
+                tenant_id=network.tenant_id,
+                cidr=str_cidr,
+                **kwargs
+            )
+            try:
+                _, result = client.create_subnet(**subnet)
+                break
+            except exceptions.Conflict as e:
+                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+                if not is_overlapping_cidr:
+                    raise
+        self.assertIsNotNone(result, 'Unable to allocate tenant network')
+        subnet = net_resources.DeletableSubnet(client=client,
+                                               **result['subnet'])
+        self.assertEqual(subnet.cidr, str_cidr)
+        self.addCleanup(self.delete_wrapper, subnet.delete)
+        return subnet
+
+    def _create_port(self, network, client=None, namestart='port-quotatest'):
+        if not client:
+            client = self.network_client
+        name = data_utils.rand_name(namestart)
+        _, result = client.create_port(
+            name=name,
+            network_id=network.id,
+            tenant_id=network.tenant_id)
+        self.assertIsNotNone(result, 'Unable to allocate port')
+        port = net_resources.DeletablePort(client=client,
+                                           **result['port'])
+        self.addCleanup(self.delete_wrapper, port.delete)
+        return port
+
+    def _get_server_port_id(self, server, ip_addr=None):
+        ports = self._list_ports(device_id=server['id'],
+                                 fixed_ip=ip_addr)
+        self.assertEqual(len(ports), 1,
+                         "Unable to determine which port to target.")
+        return ports[0]['id']
+
+    def _get_network_by_name(self, network_name):
+        net = self._list_networks(name=network_name)
+        return net_common.AttributeDict(net[0])
+
+    def _create_floating_ip(self, thing, external_network_id, port_id=None,
+                            client=None):
+        if not client:
+            client = self.network_client
+        if not port_id:
+            port_id = self._get_server_port_id(thing)
+        _, result = client.create_floatingip(
+            floating_network_id=external_network_id,
+            port_id=port_id,
+            tenant_id=thing['tenant_id']
+        )
+        floating_ip = net_resources.DeletableFloatingIp(
+            client=client,
+            **result['floatingip'])
+        self.addCleanup(self.delete_wrapper, floating_ip.delete)
+        return floating_ip
+
+    def _associate_floating_ip(self, floating_ip, server):
+        port_id = self._get_server_port_id(server)
+        floating_ip.update(port_id=port_id)
+        self.assertEqual(port_id, floating_ip.port_id)
+        return floating_ip
+
+    def _disassociate_floating_ip(self, floating_ip):
+        """
+        :param floating_ip: type DeletableFloatingIp
+        """
+        floating_ip.update(port_id=None)
+        self.assertIsNone(floating_ip.port_id)
+        return floating_ip
+
+    def _ping_ip_address(self, ip_address, should_succeed=True):
+        cmd = ['ping', '-c1', '-w1', ip_address]
+
+        def ping():
+            proc = subprocess.Popen(cmd,
+                                    stdout=subprocess.PIPE,
+                                    stderr=subprocess.PIPE)
+            proc.wait()
+            return (proc.returncode == 0) == should_succeed
+
+        return tempest.test.call_until_true(
+            ping, CONF.compute.ping_timeout, 1)
+
+    def _check_vm_connectivity(self, ip_address,
+                               username=None,
+                               private_key=None,
+                               should_connect=True):
+        """
+        :param ip_address: server to test against
+        :param username: server's ssh username
+        :param private_key: server's ssh private key to be used
+        :param should_connect: True/False indicates positive/negative test
+            positive - attempt ping and ssh
+            negative - attempt ping and fail if succeed
+
+        :raises: AssertError if the result of the connectivity check does
+            not match the value of the should_connect param
+        """
+        if should_connect:
+            msg = "Timed out waiting for %s to become reachable" % ip_address
+        else:
+            msg = "ip address %s is reachable" % ip_address
+        self.assertTrue(self._ping_ip_address(ip_address,
+                                              should_succeed=should_connect),
+                        msg=msg)
+        if should_connect:
+            # no need to check ssh for negative connectivity
+            self.get_remote_client(ip_address, username, private_key)
+
+    def _check_public_network_connectivity(self, ip_address, username,
+                                           private_key, should_connect=True,
+                                           msg=None, servers=None):
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        LOG.debug('checking network connections to IP %s with user: %s' %
+                  (ip_address, username))
+        try:
+            self._check_vm_connectivity(ip_address,
+                                        username,
+                                        private_key,
+                                        should_connect=should_connect)
+        except Exception as e:
+            ex_msg = 'Public network connectivity check failed'
+            if msg:
+                ex_msg += ": " + msg
+            LOG.exception(ex_msg)
+            self._log_console_output(servers)
+            # network debug is called as part of ssh init
+            if not isinstance(e, exceptions.SSHTimeout):
+                debug.log_net_debug()
+            raise
+
+    def _check_tenant_network_connectivity(self, server,
+                                           username,
+                                           private_key,
+                                           should_connect=True,
+                                           servers_for_debug=None):
+        if not CONF.network.tenant_networks_reachable:
+            msg = 'Tenant networks not configured to be reachable.'
+            LOG.info(msg)
+            return
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        try:
+            for net_name, ip_addresses in server['networks'].iteritems():
+                for ip_address in ip_addresses:
+                    self._check_vm_connectivity(ip_address,
+                                                username,
+                                                private_key,
+                                                should_connect=should_connect)
+        except Exception as e:
+            LOG.exception('Tenant network connectivity check failed')
+            self._log_console_output(servers_for_debug)
+            # network debug is called as part of ssh init
+            if not isinstance(e, exceptions.SSHTimeout):
+                debug.log_net_debug()
+            raise
+
+    def _check_remote_connectivity(self, source, dest, should_succeed=True):
+        """
+        check ping server via source ssh connection
+
+        :param source: RemoteClient: an ssh connection from which to ping
+        :param dest: and IP to ping against
+        :param should_succeed: boolean should ping succeed or not
+        :returns: boolean -- should_succeed == ping
+        :returns: ping is false if ping failed
+        """
+        def ping_remote():
+            try:
+                source.ping_host(dest)
+            except exceptions.SSHExecCommandFailed:
+                LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
+                         % (dest, source.ssh_client.host))
+                return not should_succeed
+            return should_succeed
+
+        return tempest.test.call_until_true(ping_remote,
+                                            CONF.compute.ping_timeout,
+                                            1)
+
+    def _create_security_group(self, client=None, tenant_id=None,
+                               namestart='secgroup-smoke'):
+        if client is None:
+            client = self.network_client
+        if tenant_id is None:
+            tenant_id = client.rest_client.tenant_id
+        secgroup = self._create_empty_security_group(namestart=namestart,
+                                                     client=client,
+                                                     tenant_id=tenant_id)
+
+        # Add rules to the security group
+        rules = self._create_loginable_secgroup_rule(secgroup=secgroup)
+        for rule in rules:
+            self.assertEqual(tenant_id, rule.tenant_id)
+            self.assertEqual(secgroup.id, rule.security_group_id)
+        return secgroup
+
+    def _create_empty_security_group(self, client=None, tenant_id=None,
+                                     namestart='secgroup-smoke'):
+        """Create a security group without rules.
+
+        Default rules will be created:
+         - IPv4 egress to any
+         - IPv6 egress to any
+
+        :param tenant_id: secgroup will be created in this tenant
+        :returns: DeletableSecurityGroup -- containing the secgroup created
+        """
+        if client is None:
+            client = self.network_client
+        if not tenant_id:
+            tenant_id = client.rest_client.tenant_id
+        sg_name = data_utils.rand_name(namestart)
+        sg_desc = sg_name + " description"
+        sg_dict = dict(name=sg_name,
+                       description=sg_desc)
+        sg_dict['tenant_id'] = tenant_id
+        _, result = client.create_security_group(**sg_dict)
+        secgroup = net_resources.DeletableSecurityGroup(
+            client=client,
+            **result['security_group']
+        )
+        self.assertEqual(secgroup.name, sg_name)
+        self.assertEqual(tenant_id, secgroup.tenant_id)
+        self.assertEqual(secgroup.description, sg_desc)
+        self.addCleanup(self.delete_wrapper, secgroup.delete)
+        return secgroup
+
+    def _default_security_group(self, client=None, tenant_id=None):
+        """Get default secgroup for given tenant_id.
+
+        :returns: DeletableSecurityGroup -- default secgroup for given tenant
+        """
+        if client is None:
+            client = self.network_client
+        if not tenant_id:
+            tenant_id = client.rest_client.tenant_id
+        sgs = [
+            sg for sg in client.list_security_groups().values()[0]
+            if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
+        ]
+        msg = "No default security group for tenant %s." % (tenant_id)
+        self.assertTrue(len(sgs) > 0, msg)
+        if len(sgs) > 1:
+            msg = "Found %d default security groups" % len(sgs)
+            raise exc.NeutronClientNoUniqueMatch(msg=msg)
+        return net_resources.DeletableSecurityGroup(client=client,
+                                                    **sgs[0])
+
+    def _create_security_group_rule(self, secgroup=None, client=None,
+                                    tenant_id=None, **kwargs):
+        """Create a rule from a dictionary of rule parameters.
+
+        Create a rule in a secgroup. if secgroup not defined will search for
+        default secgroup in tenant_id.
+
+        :param secgroup: type DeletableSecurityGroup.
+        :param tenant_id: if secgroup not passed -- the tenant in which to
+            search for default secgroup
+        :param kwargs: a dictionary containing rule parameters:
+            for example, to allow incoming ssh:
+            rule = {
+                    direction: 'ingress'
+                    protocol:'tcp',
+                    port_range_min: 22,
+                    port_range_max: 22
+                    }
+        """
+        if client is None:
+            client = self.network_client
+        if not tenant_id:
+            tenant_id = client.rest_client.tenant_id
+        if secgroup is None:
+            secgroup = self._default_security_group(client=client,
+                                                    tenant_id=tenant_id)
+
+        ruleset = dict(security_group_id=secgroup.id,
+                       tenant_id=secgroup.tenant_id)
+        ruleset.update(kwargs)
+
+        _, sg_rule = client.create_security_group_rule(**ruleset)
+        sg_rule = net_resources.DeletableSecurityGroupRule(
+            client=client,
+            **sg_rule['security_group_rule']
+        )
+        self.addCleanup(self.delete_wrapper, sg_rule.delete)
+        self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
+        self.assertEqual(secgroup.id, sg_rule.security_group_id)
+
+        return sg_rule
+
+    def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
+        """These rules are intended to permit inbound ssh and icmp
+        traffic from all sources, so no group_id is provided.
+        Setting a group_id would only permit traffic from ports
+        belonging to the same security group.
+        """
+
+        if client is None:
+            client = self.network_client
+        rules = []
+        rulesets = [
+            dict(
+                # ssh
+                protocol='tcp',
+                port_range_min=22,
+                port_range_max=22,
+            ),
+            dict(
+                # ping
+                protocol='icmp',
+            )
+        ]
+        for ruleset in rulesets:
+            for r_direction in ['ingress', 'egress']:
+                ruleset['direction'] = r_direction
+                try:
+                    sg_rule = self._create_security_group_rule(
+                        client=client, secgroup=secgroup, **ruleset)
+                except exceptions.Conflict as ex:
+                    # if rule already exist - skip rule and continue
+                    msg = 'Security group rule already exists'
+                    if msg not in ex._error_string:
+                        raise ex
+                else:
+                    self.assertEqual(r_direction, sg_rule.direction)
+                    rules.append(sg_rule)
+
+        return rules
+
+    def _ssh_to_server(self, server, private_key):
+        ssh_login = CONF.compute.image_ssh_user
+        return self.get_remote_client(server,
+                                      username=ssh_login,
+                                      private_key=private_key)
+
+    def _get_router(self, client=None, tenant_id=None):
+        """Retrieve a router for the given tenant id.
+
+        If a public router has been configured, it will be returned.
+
+        If a public router has not been configured, but a public
+        network has, a tenant router will be created and returned that
+        routes traffic to the public network.
+        """
+        if not client:
+            client = self.network_client
+        if not tenant_id:
+            tenant_id = client.rest_client.tenant_id
+        router_id = CONF.network.public_router_id
+        network_id = CONF.network.public_network_id
+        if router_id:
+            result = client.show_router(router_id)
+            return net_resources.AttributeDict(**result['router'])
+        elif network_id:
+            router = self._create_router(client, tenant_id)
+            router.set_gateway(network_id)
+            return router
+        else:
+            raise Exception("Neither of 'public_router_id' or "
+                            "'public_network_id' has been defined.")
+
+    def _create_router(self, client=None, tenant_id=None,
+                       namestart='router-smoke'):
+        if not client:
+            client = self.network_client
+        if not tenant_id:
+            tenant_id = client.rest_client.tenant_id
+        name = data_utils.rand_name(namestart)
+        _, result = client.create_router(name=name,
+                                         admin_state_up=True,
+                                         tenant_id=tenant_id)
+        router = net_resources.DeletableRouter(client=client,
+                                               **result['router'])
+        self.assertEqual(router.name, name)
+        self.addCleanup(self.delete_wrapper, router.delete)
+        return router
+
+    def create_networks(self, client=None, tenant_id=None):
+        """Create a network with a subnet connected to a router.
+
+        The baremetal driver is a special case since all nodes are
+        on the same shared network.
+
+        :returns: network, subnet, router
+        """
+        if CONF.baremetal.driver_enabled:
+            # NOTE(Shrews): This exception is for environments where tenant
+            # credential isolation is available, but network separation is
+            # not (the current baremetal case). Likely can be removed when
+            # test account mgmt is reworked:
+            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+            network = self._get_network_by_name(
+                CONF.compute.fixed_network_name)
+            router = None
+            subnet = None
+        else:
+            network = self._create_network(client=client, tenant_id=tenant_id)
+            router = self._get_router(client=client, tenant_id=tenant_id)
+            subnet = self._create_subnet(network=network, client=client)
+            subnet.add_to_router(router.id)
+        return network, subnet, router
+
+
 class OfficialClientTest(tempest.test.BaseTestCase):
     """
     Official Client test base class for scenario testing.
@@ -531,8 +1073,8 @@
     def addCleanup_with_wait(self, things, thing_id,
                              error_status='ERROR',
                              exc_type=nova_exceptions.NotFound,
-                             cleanup_callable=None, cleanup_args=[],
-                             cleanup_kwargs={}):
+                             cleanup_callable=None, cleanup_args=None,
+                             cleanup_kwargs=None):
         """Adds wait for ansyc resource deletion at the end of cleanups
 
         @param things: type of the resource to delete
@@ -544,6 +1086,10 @@
             usually a delete method. if not used, will try to use:
             things.delete(thing_id)
         """
+        if cleanup_args is None:
+            cleanup_args = []
+        if cleanup_kwargs is None:
+            cleanup_kwargs = {}
         if cleanup_callable is None:
             LOG.debug("no delete method passed. using {rclass}.delete({id}) as"
                       " default".format(rclass=things, id=thing_id))
@@ -725,7 +1271,7 @@
 
     def create_server(self, client=None, name=None, image=None, flavor=None,
                       wait_on_boot=True, wait_on_delete=True,
-                      create_kwargs={}):
+                      create_kwargs=None):
         """Creates VM instance.
 
         @param client: compute client to create the instance
@@ -743,6 +1289,8 @@
             image = CONF.compute.image_ref
         if flavor is None:
             flavor = CONF.compute.flavor_ref
+        if create_kwargs is None:
+            create_kwargs = {}
 
         fixed_network_name = CONF.compute.fixed_network_name
         if 'nics' not in create_kwargs and fixed_network_name:
@@ -872,7 +1420,9 @@
         self.status_timeout(
             self.volume_client.volumes, volume_id, status)
 
-    def _image_create(self, name, fmt, path, properties={}):
+    def _image_create(self, name, fmt, path, properties=None):
+        if properties is None:
+            properties = {}
         name = data_utils.rand_name('%s-' % name)
         image_file = open(path, 'rb')
         self.addCleanup(image_file.close)
@@ -1274,6 +1824,10 @@
                          "Unable to determine which port to target.")
         return ports[0]['id']
 
+    def _get_network_by_name(self, network_name):
+        net = self._list_networks(name=network_name)
+        return net_common.AttributeDict(net[0])
+
     def _create_floating_ip(self, thing, external_network_id, port_id=None):
         if not port_id:
             port_id = self._get_server_port_id(thing)
@@ -1671,17 +2225,31 @@
         self.addCleanup(self.delete_wrapper, router)
         return router
 
-    def _create_networks(self, tenant_id=None):
+    def create_networks(self, tenant_id=None):
         """Create a network with a subnet connected to a router.
 
+        The baremetal driver is a special case since all nodes are
+        on the same shared network.
+
         :returns: network, subnet, router
         """
-        if tenant_id is None:
-            tenant_id = self.tenant_id
-        network = self._create_network(tenant_id)
-        router = self._get_router(tenant_id)
-        subnet = self._create_subnet(network)
-        subnet.add_to_router(router.id)
+        if CONF.baremetal.driver_enabled:
+            # NOTE(Shrews): This exception is for environments where tenant
+            # credential isolation is available, but network separation is
+            # not (the current baremetal case). Likely can be removed when
+            # test account mgmt is reworked:
+            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+            network = self._get_network_by_name(
+                CONF.compute.fixed_network_name)
+            router = None
+            subnet = None
+        else:
+            if tenant_id is None:
+                tenant_id = self.tenant_id
+            network = self._create_network(tenant_id)
+            router = self._get_router(tenant_id)
+            subnet = self._create_subnet(network)
+            subnet.add_to_router(router.id)
         return network, subnet, router
 
 
@@ -1866,12 +2434,17 @@
         self._list_and_check_container_objects(container_name,
                                                not_present_obj=[filename])
 
-    def _list_and_check_container_objects(self, container_name, present_obj=[],
-                                          not_present_obj=[]):
+    def _list_and_check_container_objects(self, container_name,
+                                          present_obj=None,
+                                          not_present_obj=None):
         """
         List objects for a given container and assert which are present and
         which are not.
         """
+        if present_obj is None:
+            present_obj = []
+        if not_present_obj is None:
+            not_present_obj = []
         _, object_list = self.container_client.list_container_contents(
             container_name)
         if present_obj:
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 8191984..35e50e8 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -73,6 +73,35 @@
         self.server_ips = {}
         self.server_fixed_ips = {}
         self._create_security_group()
+        self._set_net_and_subnet()
+
+    def _set_net_and_subnet(self):
+        """
+        Query and set appropriate network and subnet attributes to be used
+        for the test.  Existing tenant networks are used if they are found.
+        The configured private network and associated subnet is used as a
+        fallback in absence of tenant networking.
+        """
+        try:
+            tenant_net = self._list_networks(tenant_id=self.tenant_id)[0]
+        except IndexError:
+            tenant_net = None
+
+        if tenant_net:
+            tenant_subnet = self._list_subnets(tenant_id=self.tenant_id)[0]
+            self.subnet = net_common.DeletableSubnet(
+                client=self.network_client,
+                **tenant_subnet)
+            self.network = tenant_net
+        else:
+            self.network = self._get_network_by_name(
+                config.compute.fixed_network_name)
+            # TODO(adam_g): We are assuming that the first subnet associated
+            # with the fixed network is the one we want.  In the future, we
+            # should instead pull a subnet id from config, which is set by
+            # devstack/admin/etc.
+            subnet = self._list_subnets(network_id=self.network['id'])[0]
+            self.subnet = net_common.AttributeDict(subnet)
 
     def _create_security_group(self):
         self.security_group = self._create_security_group_neutron(
@@ -96,10 +125,9 @@
     def _create_server(self, name):
         keypair = self.create_keypair(name='keypair-%s' % name)
         security_groups = [self.security_group.name]
-        net = self._list_networks(tenant_id=self.tenant_id)[0]
         create_kwargs = {
             'nics': [
-                {'net-id': net['id']},
+                {'net-id': self.network['id']},
             ],
             'key_name': keypair.name,
             'security_groups': security_groups,
@@ -107,6 +135,7 @@
         server = self.create_server(name=name,
                                     create_kwargs=create_kwargs)
         self.servers_keypairs[server.id] = keypair
+        net_name = self.network['name']
         if (config.network.public_network_id and not
                 config.network.tenant_networks_reachable):
             public_network_id = config.network.public_network_id
@@ -115,8 +144,8 @@
             self.floating_ips[floating_ip] = server
             self.server_ips[server.id] = floating_ip.floating_ip_address
         else:
-            self.server_ips[server.id] = server.networks[net['name']][0]
-        self.server_fixed_ips[server.id] = server.networks[net['name']][0]
+            self.server_ips[server.id] = server.networks[net_name][0]
+        self.server_fixed_ips[server.id] = server.networks[net_name][0]
         self.assertTrue(self.servers_keypairs)
         return server
 
@@ -132,7 +161,6 @@
         1. SSH to the instance
         2. Start two http backends listening on ports 80 and 88 respectively
         """
-
         for server_id, ip in self.server_ips.iteritems():
             private_key = self.servers_keypairs[server_id].private_key
             server_name = self.compute_client.servers.get(server_id).name
@@ -196,10 +224,6 @@
 
     def _create_pool(self):
         """Create a pool with ROUND_ROBIN algorithm."""
-        # get tenant subnet and verify there's only one
-        subnet = self._list_subnets(tenant_id=self.tenant_id)[0]
-        self.subnet = net_common.DeletableSubnet(client=self.network_client,
-                                                 **subnet)
         self.pool = super(TestLoadBalancerBasic, self)._create_pool(
             lb_method='ROUND_ROBIN',
             protocol='HTTP',
@@ -288,7 +312,6 @@
             self.assertEqual(expected,
                              set(resp))
 
-    @test.attr(type='smoke')
     @test.services('compute', 'network')
     def test_load_balancer_basic(self):
         self._create_server('server1')
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 4bc4a98..8a8e387 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -124,7 +124,7 @@
         self.assertEqual('available', volume['status'])
 
     def create_and_add_security_group(self):
-        secgroup = self._create_security_group_nova()
+        secgroup = self._create_security_group()
         self.servers_client.add_security_group(self.server['id'],
                                                secgroup['name'])
         self.addCleanup(self.servers_client.remove_security_group,
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 431de9a..47f2f1a 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -50,16 +50,13 @@
             cls.enabled = False
             raise cls.skipException(msg)
 
-    def setUp(self):
-        super(TestNetworkAdvancedServerOps, self).setUp()
+    def _setup_network_and_servers(self):
         key_name = data_utils.rand_name('keypair-smoke-')
         self.keypair = self.create_keypair(name=key_name)
         security_group =\
             self._create_security_group_neutron(tenant_id=self.tenant_id)
-        network = self._create_network(self.tenant_id)
-        router = self._get_router(self.tenant_id)
-        subnet = self._create_subnet(network)
-        subnet.add_to_router(router.id)
+        network, subnet, router = self.create_networks(self.tenant_id)
+
         public_network_id = CONF.network.public_network_id
         create_kwargs = {
             'nics': [
@@ -68,11 +65,14 @@
             'key_name': self.keypair.name,
             'security_groups': [security_group.name],
         }
-        server_name = data_utils.rand_name('server-smoke-%d-')
+        server_name = data_utils.rand_name('server-smoke')
         self.server = self.create_server(name=server_name,
                                          create_kwargs=create_kwargs)
         self.floating_ip = self._create_floating_ip(self.server,
                                                     public_network_id)
+        # Verify that we can indeed connect to the server before we mess with
+        # it's state
+        self._wait_server_status_and_check_network_connectivity()
 
     def _check_network_connectivity(self, should_connect=True):
         username = CONF.compute.image_ssh_user
@@ -92,6 +92,7 @@
 
     @test.services('compute', 'network')
     def test_server_connectivity_stop_start(self):
+        self._setup_network_and_servers()
         self.server.stop()
         self.status_timeout(self.compute_client.servers, self.server.id,
                             'SHUTOFF')
@@ -101,11 +102,13 @@
 
     @test.services('compute', 'network')
     def test_server_connectivity_reboot(self):
+        self._setup_network_and_servers()
         self.server.reboot()
         self._wait_server_status_and_check_network_connectivity()
 
     @test.services('compute', 'network')
     def test_server_connectivity_rebuild(self):
+        self._setup_network_and_servers()
         image_ref_alt = CONF.compute.image_ref_alt
         self.server.rebuild(image_ref_alt)
         self._wait_server_status_and_check_network_connectivity()
@@ -114,6 +117,7 @@
                           'Pause is not available.')
     @test.services('compute', 'network')
     def test_server_connectivity_pause_unpause(self):
+        self._setup_network_and_servers()
         self.server.pause()
         self.status_timeout(self.compute_client.servers, self.server.id,
                             'PAUSED')
@@ -125,6 +129,7 @@
                           'Suspend is not available.')
     @test.services('compute', 'network')
     def test_server_connectivity_suspend_resume(self):
+        self._setup_network_and_servers()
         self.server.suspend()
         self.status_timeout(self.compute_client.servers, self.server.id,
                             'SUSPENDED')
@@ -140,6 +145,7 @@
         if resize_flavor == CONF.compute.flavor_ref:
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
             raise self.skipException(msg)
+        self._setup_network_and_servers()
         resize_flavor = CONF.compute.flavor_ref_alt
         self.server.resize(resize_flavor)
         self.status_timeout(self.compute_client.servers, self.server.id,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index bba034b..e8dba6a 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -18,13 +18,13 @@
 
 import testtools
 
-from tempest.api.network import common as net_common
 from tempest.common import debug
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
+from tempest.services.network import resources as net_resources
 from tempest import test
 
 CONF = config.CONF
@@ -34,7 +34,7 @@
                                            ['floating_ip', 'server'])
 
 
-class TestNetworkBasicOps(manager.NetworkScenarioTest):
+class TestNetworkBasicOps(manager.NeutronScenarioTest):
 
     """
     This smoke test suite assumes that Nova has been configured to
@@ -96,21 +96,23 @@
             if not test.is_extension_enabled(ext, 'network'):
                 msg = "%s extension not enabled." % ext
                 raise cls.skipException(msg)
-        cls.check_preconditions()
 
     def setUp(self):
         super(TestNetworkBasicOps, self).setUp()
+        self.keypairs = {}
+        self.servers = []
+
+    def _setup_network_and_servers(self):
         self.security_group = \
-            self._create_security_group_neutron(tenant_id=self.tenant_id)
-        self.network, self.subnet, self.router = self._create_networks()
+            self._create_security_group(tenant_id=self.tenant_id)
+        self.network, self.subnet, self.router = self.create_networks()
         self.check_networks()
-        self.servers = {}
+
         name = data_utils.rand_name('server-smoke')
-        serv_dict = self._create_server(name, self.network)
-        self.servers[serv_dict['server']] = serv_dict['keypair']
+        server = self._create_server(name, self.network)
         self._check_tenant_network_connectivity()
 
-        self._create_and_associate_floating_ips()
+        self._create_and_associate_floating_ips(server)
 
     def check_networks(self):
         """
@@ -124,47 +126,53 @@
         self.assertIn(self.network.name, seen_names)
         self.assertIn(self.network.id, seen_ids)
 
-        seen_subnets = self._list_subnets()
-        seen_net_ids = [n['network_id'] for n in seen_subnets]
-        seen_subnet_ids = [n['id'] for n in seen_subnets]
-        self.assertIn(self.network.id, seen_net_ids)
-        self.assertIn(self.subnet.id, seen_subnet_ids)
+        if self.subnet:
+            seen_subnets = self._list_subnets()
+            seen_net_ids = [n['network_id'] for n in seen_subnets]
+            seen_subnet_ids = [n['id'] for n in seen_subnets]
+            self.assertIn(self.network.id, seen_net_ids)
+            self.assertIn(self.subnet.id, seen_subnet_ids)
 
-        seen_routers = self._list_routers()
-        seen_router_ids = [n['id'] for n in seen_routers]
-        seen_router_names = [n['name'] for n in seen_routers]
-        self.assertIn(self.router.name,
-                      seen_router_names)
-        self.assertIn(self.router.id,
-                      seen_router_ids)
+        if self.router:
+            seen_routers = self._list_routers()
+            seen_router_ids = [n['id'] for n in seen_routers]
+            seen_router_names = [n['name'] for n in seen_routers]
+            self.assertIn(self.router.name,
+                          seen_router_names)
+            self.assertIn(self.router.id,
+                          seen_router_ids)
 
     def _create_server(self, name, network):
-        keypair = self.create_keypair(name='keypair-%s' % name)
-        security_groups = [self.security_group.name]
+        keypair = self.create_keypair()
+        self.keypairs[keypair['name']] = keypair
+        security_groups = [self.security_group]
         create_kwargs = {
             'nics': [
                 {'net-id': network.id},
             ],
-            'key_name': keypair.name,
+            'key_name': keypair['name'],
             'security_groups': security_groups,
         }
         server = self.create_server(name=name, create_kwargs=create_kwargs)
-        return dict(server=server, keypair=keypair)
+        self.servers.append(server)
+        return server
+
+    def _get_server_key(self, server):
+        return self.keypairs[server['key_name']]['private_key']
 
     def _check_tenant_network_connectivity(self):
         ssh_login = CONF.compute.image_ssh_user
-        for server, key in self.servers.iteritems():
+        for server in self.servers:
             # call the common method in the parent class
             super(TestNetworkBasicOps, self).\
                 _check_tenant_network_connectivity(
-                    server, ssh_login, key.private_key,
-                    servers_for_debug=self.servers.keys())
+                    server, ssh_login, self._get_server_key(server),
+                    servers_for_debug=self.servers)
 
-    def _create_and_associate_floating_ips(self):
+    def _create_and_associate_floating_ips(self, server):
         public_network_id = CONF.network.public_network_id
-        for server in self.servers.keys():
-            floating_ip = self._create_floating_ip(server, public_network_id)
-            self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
+        floating_ip = self._create_floating_ip(server, public_network_id)
+        self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
 
     def _check_public_network_connectivity(self, should_connect=True,
                                            msg=None):
@@ -173,11 +181,11 @@
         ip_address = floating_ip.floating_ip_address
         private_key = None
         if should_connect:
-            private_key = self.servers[server].private_key
+            private_key = self._get_server_key(server)
         # call the common method in the parent class
         super(TestNetworkBasicOps, self)._check_public_network_connectivity(
             ip_address, ssh_login, private_key, should_connect, msg,
-            self.servers.keys())
+            self.servers)
 
     def _disassociate_floating_ips(self):
         floating_ip, server = self.floating_ip_tuple
@@ -189,14 +197,13 @@
         floating_ip, server = self.floating_ip_tuple
         name = data_utils.rand_name('new_server-smoke-')
         # create a new server for the floating ip
-        serv_dict = self._create_server(name, self.network)
-        self.servers[serv_dict['server']] = serv_dict['keypair']
-        self._associate_floating_ip(floating_ip, serv_dict['server'])
+        server = self._create_server(name, self.network)
+        self._associate_floating_ip(floating_ip, server)
         self.floating_ip_tuple = Floating_IP_tuple(
-            floating_ip, serv_dict['server'])
+            floating_ip, server)
 
     def _create_new_network(self):
-        self.new_net = self._create_network(self.tenant_id)
+        self.new_net = self._create_network(tenant_id=self.tenant_id)
         self.new_subnet = self._create_subnet(
             network=self.new_net,
             gateway_ip=None)
@@ -204,27 +211,27 @@
     def _hotplug_server(self):
         old_floating_ip, server = self.floating_ip_tuple
         ip_address = old_floating_ip.floating_ip_address
-        private_key = self.servers[server].private_key
+        private_key = self._get_server_key(server)
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key)
         old_nic_list = self._get_server_nics(ssh_client)
         # get a port from a list of one item
-        port_list = self._list_ports(device_id=server.id)
+        port_list = self._list_ports(device_id=server['id'])
         self.assertEqual(1, len(port_list))
         old_port = port_list[0]
-        self.compute_client.servers.interface_attach(server=server,
-                                                     net_id=self.new_net.id,
-                                                     port_id=None,
-                                                     fixed_ip=None)
-        # move server to the head of the cleanup list
-        self.addCleanup(self.delete_timeout,
-                        self.compute_client.servers,
-                        server.id)
-        self.addCleanup(self.delete_wrapper, server)
+        _, interface = self.interface_client.create_interface(
+            server=server['id'],
+            network_id=self.new_net.id)
+        self.addCleanup(self.network_client.wait_for_resource_deletion,
+                        'port',
+                        interface['port_id'])
+        self.addCleanup(self.delete_wrapper,
+                        self.interface_client.delete_interface,
+                        server['id'], interface['port_id'])
 
         def check_ports():
             self.new_port_list = [port for port in
-                                  self._list_ports(device_id=server.id)
+                                  self._list_ports(device_id=server['id'])
                                   if port != old_port]
             return len(self.new_port_list) == 1
 
@@ -233,8 +240,8 @@
             raise exceptions.TimeoutException("No new port attached to the "
                                               "server in time (%s sec) !"
                                               % CONF.network.build_timeout)
-        new_port = net_common.DeletablePort(client=self.network_client,
-                                            **self.new_port_list[0])
+        new_port = net_resources.DeletablePort(client=self.network_client,
+                                               **self.new_port_list[0])
 
         def check_new_nic():
             new_nic_list = self._get_server_nics(ssh_client)
@@ -267,7 +274,7 @@
         # get internal ports' ips:
         # get all network ports in the new network
         internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
-                        self._list_ports(tenant_id=server.tenant_id,
+                        self._list_ports(tenant_id=server['tenant_id'],
                                          network_id=network.id)
                         if p['device_owner'].startswith('network'))
 
@@ -283,8 +290,8 @@
             LOG.info(msg)
             return
 
-        subnet = self.network_client.list_subnets(
-            network_id=CONF.network.public_network_id)['subnets']
+        subnet = self._list_subnets(
+            network_id=CONF.network.public_network_id)
         self.assertEqual(1, len(subnet), "Found %d subnets" % len(subnet))
 
         external_ips = [subnet[0]['gateway_ip']]
@@ -293,7 +300,7 @@
 
     def _check_server_connectivity(self, floating_ip, address_list):
         ip_address = floating_ip.floating_ip_address
-        private_key = self.servers[self.floating_ip_tuple.server].private_key
+        private_key = self._get_server_key(self.floating_ip_tuple.server)
         ssh_source = self._ssh_to_server(ip_address, private_key)
 
         for remote_ip in address_list:
@@ -345,6 +352,7 @@
 
 
         """
+        self._setup_network_and_servers()
         self._check_public_network_connectivity(should_connect=True)
         self._check_network_internal_connectivity(network=self.network)
         self._check_network_external_connectivity()
@@ -370,7 +378,7 @@
         4. check VM can ping new network dhcp port
 
         """
-
+        self._setup_network_and_servers()
         self._check_public_network_connectivity(should_connect=True)
         self._create_new_network()
         self._hotplug_server()
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index ecb802f..520c232 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -26,7 +26,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
+class TestSecurityGroupsBasicOps(manager.NeutronScenarioTest):
 
     """
     This test suite assumes that Nova has been configured to
@@ -99,7 +99,7 @@
         """
 
         def __init__(self, credentials):
-            self.manager = clients.OfficialClientManager(credentials)
+            self.manager = clients.Manager(credentials)
             # Credentials from manager are filled with both names and IDs
             self.creds = self.manager.credentials
             self.network = None
@@ -113,13 +113,18 @@
             self.subnet = subnet
             self.router = router
 
-        def _get_tenant_credentials(self):
-            # FIXME(andreaf) Unused method
-            return self.creds
-
     @classmethod
     def check_preconditions(cls):
+        if CONF.baremetal.driver_enabled:
+            msg = ('Not currently supported by baremetal.')
+            cls.enabled = False
+            raise cls.skipException(msg)
         super(TestSecurityGroupsBasicOps, cls).check_preconditions()
+        # need alt_creds here to check preconditions
+        cls.alt_creds = cls.alt_credentials()
+        cls.alt_manager = clients.Manager(cls.alt_creds)
+        # Credentials from the manager are filled with both IDs and Names
+        cls.alt_creds = cls.alt_manager.credentials
         if (cls.alt_creds is None) or \
                 (cls.tenant_id is cls.alt_creds.tenant_id):
             msg = 'No alt_tenant defined'
@@ -137,11 +142,6 @@
         # Create no network resources for these tests.
         cls.set_network_resources()
         super(TestSecurityGroupsBasicOps, cls).setUpClass()
-        cls.alt_creds = cls.alt_credentials()
-        cls.alt_manager = clients.OfficialClientManager(cls.alt_creds)
-        # Credentials from the manager are filled with both IDs and Names
-        cls.alt_creds = cls.alt_manager.credentials
-        cls.check_preconditions()
         # TODO(mnewby) Consider looking up entities as needed instead
         # of storing them as collections on the class.
         cls.floating_ips = {}
@@ -162,21 +162,22 @@
         self._verify_network_details(self.primary_tenant)
         self._verify_mac_addr(self.primary_tenant)
 
-    def _create_tenant_keypairs(self, tenant_id):
-        keypair = self.create_keypair(
-            name=data_utils.rand_name('keypair-smoke-'))
-        self.tenants[tenant_id].keypair = keypair
+    def _create_tenant_keypairs(self, tenant):
+        keypair = self.create_keypair(tenant.manager.keypairs_client)
+        tenant.keypair = keypair
 
     def _create_tenant_security_groups(self, tenant):
         access_sg = self._create_empty_security_group(
             namestart='secgroup_access-',
-            tenant_id=tenant.creds.tenant_id
+            tenant_id=tenant.creds.tenant_id,
+            client=tenant.manager.network_client
         )
 
         # don't use default secgroup since it allows in-tenant traffic
         def_sg = self._create_empty_security_group(
             namestart='secgroup_general-',
-            tenant_id=tenant.creds.tenant_id
+            tenant_id=tenant.creds.tenant_id,
+            client=tenant.manager.network_client
         )
         tenant.security_groups.update(access=access_sg, default=def_sg)
         ssh_rule = dict(
@@ -185,7 +186,9 @@
             port_range_max=22,
             direction='ingress',
         )
-        self._create_security_group_rule(secgroup=access_sg, **ssh_rule)
+        self._create_security_group_rule(secgroup=access_sg,
+                                         client=tenant.manager.network_client,
+                                         **ssh_rule)
 
     def _verify_network_details(self, tenant):
         # Checks that we see the newly created network/subnet/router via
@@ -212,7 +215,7 @@
 
         myport = (tenant.router.id, tenant.subnet.id)
         router_ports = [(i['device_id'], i['fixed_ips'][0]['subnet_id']) for i
-                        in self.network_client.list_ports()['ports']
+                        in self._list_ports()
                         if self._is_router_port(i)]
 
         self.assertIn(myport, router_ports)
@@ -229,17 +232,16 @@
         """
         self._set_compute_context(tenant)
         if security_groups is None:
-            security_groups = [tenant.security_groups['default'].name]
+            security_groups = [tenant.security_groups['default']]
         create_kwargs = {
             'nics': [
                 {'net-id': tenant.network.id},
             ],
-            'key_name': tenant.keypair.name,
+            'key_name': tenant.keypair['name'],
             'security_groups': security_groups,
             'tenant_id': tenant.creds.tenant_id
         }
-        server = self.create_server(name=name, create_kwargs=create_kwargs)
-        return server
+        return self.create_server(name=name, create_kwargs=create_kwargs)
 
     def _create_tenant_servers(self, tenant, num=1):
         for i in range(num):
@@ -257,27 +259,30 @@
         in order to access tenant internal network
         workaround ip namespace
         """
-        secgroups = [sg.name for sg in tenant.security_groups.values()]
+        secgroups = tenant.security_groups.values()
         name = 'server-{tenant}-access_point-'.format(
             tenant=tenant.creds.tenant_name)
         name = data_utils.rand_name(name)
         server = self._create_server(name, tenant,
                                      security_groups=secgroups)
         tenant.access_point = server
-        self._assign_floating_ips(server)
+        self._assign_floating_ips(tenant, server)
 
-    def _assign_floating_ips(self, server):
+    def _assign_floating_ips(self, tenant, server):
         public_network_id = CONF.network.public_network_id
-        floating_ip = self._create_floating_ip(server, public_network_id)
-        self.floating_ips.setdefault(server, floating_ip)
+        floating_ip = self._create_floating_ip(
+            server, public_network_id,
+            client=tenant.manager.network_client)
+        self.floating_ips.setdefault(server['id'], floating_ip)
 
     def _create_tenant_network(self, tenant):
-        network, subnet, router = self._create_networks(tenant.creds.tenant_id)
+        network, subnet, router = self.create_networks(
+            client=tenant.manager.network_client)
         tenant.set_network(network, subnet, router)
 
     def _set_compute_context(self, tenant):
-        self.compute_client = tenant.manager.compute_client
-        return self.compute_client
+        self.servers_client = tenant.manager.servers_client
+        return self.servers_client
 
     def _deploy_tenant(self, tenant_or_id):
         """
@@ -290,12 +295,10 @@
         """
         if not isinstance(tenant_or_id, self.TenantProperties):
             tenant = self.tenants[tenant_or_id]
-            tenant_id = tenant_or_id
         else:
             tenant = tenant_or_id
-            tenant_id = tenant.creds.tenant_id
         self._set_compute_context(tenant)
-        self._create_tenant_keypairs(tenant_id)
+        self._create_tenant_keypairs(tenant)
         self._create_tenant_network(tenant)
         self._create_tenant_security_groups(tenant)
         self._set_access_point(tenant)
@@ -305,12 +308,12 @@
         returns the ip (floating/internal) of a server
         """
         if floating:
-            server_ip = self.floating_ips[server].floating_ip_address
+            server_ip = self.floating_ips[server['id']].floating_ip_address
         else:
             server_ip = None
-            network_name = self.tenants[server.tenant_id].network.name
-            if network_name in server.networks:
-                server_ip = server.networks[network_name][0]
+            network_name = self.tenants[server['tenant_id']].network.name
+            if network_name in server['addresses']:
+                server_ip = server['addresses'][network_name][0]['addr']
         return server_ip
 
     def _connect_to_access_point(self, tenant):
@@ -318,8 +321,8 @@
         create ssh connection to tenant access point
         """
         access_point_ssh = \
-            self.floating_ips[tenant.access_point].floating_ip_address
-        private_key = tenant.keypair.private_key
+            self.floating_ips[tenant.access_point['id']].floating_ip_address
+        private_key = tenant.keypair['private_key']
         access_point_ssh = self._ssh_to_server(access_point_ssh,
                                                private_key=private_key)
         return access_point_ssh
@@ -383,6 +386,7 @@
         )
         self._create_security_group_rule(
             secgroup=dest_tenant.security_groups['default'],
+            client=dest_tenant.manager.network_client,
             **ruleset
         )
         access_point_ssh = self._connect_to_access_point(source_tenant)
@@ -396,6 +400,7 @@
         # allow reverse traffic and check
         self._create_security_group_rule(
             secgroup=source_tenant.security_groups['default'],
+            client=source_tenant.manager.network_client,
             **ruleset
         )
 
@@ -414,8 +419,7 @@
         mac_addr = mac_addr.strip().lower()
         # Get the fixed_ips and mac_address fields of all ports. Select
         # only those two columns to reduce the size of the response.
-        port_list = self.network_client.list_ports(
-            fields=['fixed_ips', 'mac_address'])['ports']
+        port_list = self._list_ports(fields=['fixed_ips', 'mac_address'])
         port_detail_list = [
             (port['fixed_ips'][0]['subnet_id'],
              port['fixed_ips'][0]['ip_address'],
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 77e195d..b38b1a3 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -102,7 +102,7 @@
     @test.services('compute', 'network')
     def test_server_basicops(self):
         self.add_keypair()
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
         self.boot_instance()
         self.verify_ssh()
         self.servers_client.delete_server(self.instance['id'])
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index d500065..dc32edc 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -82,7 +82,7 @@
     def test_snapshot_pattern(self):
         # prepare for booting a instance
         self._add_keypair()
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
 
         # boot a instance and create a timestamp file in it
         server = self._boot_image(CONF.compute.image_ref)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index ec8575a..fdda423 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -56,7 +56,7 @@
             'device_name': 'vda',
             'volume_id': vol_id,
             'delete_on_termination': '0'}]
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
         security_groups = [{'name': self.security_group['name']}]
         create_kwargs = {
             'block_device_mapping': bd_map,
@@ -117,7 +117,7 @@
                 private_key=keypair['private_key'])
         except Exception:
             LOG.exception('ssh to server failed')
-            self._log_console_output(self)
+            self._log_console_output(servers=[server])
             raise
 
     def _get_content(self, ssh_client):
@@ -140,7 +140,7 @@
     @test.services('compute', 'volume', 'image')
     def test_volume_boot_pattern(self):
         keypair = self.create_keypair()
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
 
         # create an instance from volume
         volume_origin = self._create_volume_from_image()
@@ -187,7 +187,7 @@
         bdms = [{'uuid': vol_id, 'source_type': 'volume',
                  'destination_type': 'volume', 'boot_index': 0,
                  'delete_on_termination': False}]
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
         security_groups = [{'name': self.security_group['name']}]
         create_kwargs = {
             'block_device_mapping_v2': bdms,
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 07eee8a..032e1da 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -308,3 +308,33 @@
         resp, body = self._list_request(path)
         self.expected_success(200, resp.status)
         return body
+
+    @base.handle_errors
+    def get_console(self, node_uuid):
+        """
+        Get connection information about the console.
+
+        :param node_uuid: Unique identifier of the node in UUID format.
+
+        """
+
+        resp, body = self._show_request('nodes/states/console', node_uuid)
+        self.expected_success(200, resp.status)
+        return resp, body
+
+    @base.handle_errors
+    def set_console_mode(self, node_uuid, enabled):
+        """
+        Start and stop the node console.
+
+        :param node_uuid: Unique identifier of the node in UUID format.
+        :param enabled: Boolean value; whether to enable or disable the
+                        console.
+
+        """
+
+        enabled = {'enabled': enabled}
+        resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
+                                       enabled)
+        self.expected_success(202, resp.status)
+        return resp, body
diff --git a/tempest/services/identity/v3/json/credentials_client.py b/tempest/services/identity/v3/json/credentials_client.py
index f795c7b..d424f4c 100644
--- a/tempest/services/identity/v3/json/credentials_client.py
+++ b/tempest/services/identity/v3/json/credentials_client.py
@@ -41,13 +41,14 @@
         }
         post_body = json.dumps({'credential': post_body})
         resp, body = self.post('credentials', post_body)
+        self.expected_success(201, resp.status)
         body = json.loads(body)
         body['credential']['blob'] = json.loads(body['credential']['blob'])
         return resp, body['credential']
 
     def update_credential(self, credential_id, **kwargs):
         """Updates a credential."""
-        resp, body = self.get_credential(credential_id)
+        _, body = self.get_credential(credential_id)
         cred_type = kwargs.get('type', body['type'])
         access_key = kwargs.get('access_key', body['blob']['access'])
         secret_key = kwargs.get('secret_key', body['blob']['secret'])
@@ -63,6 +64,7 @@
         }
         post_body = json.dumps({'credential': post_body})
         resp, body = self.patch('credentials/%s' % credential_id, post_body)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         body['credential']['blob'] = json.loads(body['credential']['blob'])
         return resp, body['credential']
@@ -70,6 +72,7 @@
     def get_credential(self, credential_id):
         """To GET Details of a credential."""
         resp, body = self.get('credentials/%s' % credential_id)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         body['credential']['blob'] = json.loads(body['credential']['blob'])
         return resp, body['credential']
@@ -77,10 +80,12 @@
     def list_credentials(self):
         """Lists out all the available credentials."""
         resp, body = self.get('credentials')
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['credentials']
 
     def delete_credential(self, credential_id):
         """Deletes a credential."""
         resp, body = self.delete('credentials/%s' % credential_id)
+        self.expected_success(204, resp.status)
         return resp, body
diff --git a/tempest/services/identity/v3/json/endpoints_client.py b/tempest/services/identity/v3/json/endpoints_client.py
index f7a894b..c3fedb2 100644
--- a/tempest/services/identity/v3/json/endpoints_client.py
+++ b/tempest/services/identity/v3/json/endpoints_client.py
@@ -32,6 +32,7 @@
     def list_endpoints(self):
         """GET endpoints."""
         resp, body = self.get('endpoints')
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['endpoints']
 
@@ -56,6 +57,7 @@
         }
         post_body = json.dumps({'endpoint': post_body})
         resp, body = self.post('endpoints', post_body)
+        self.expected_success(201, resp.status)
         body = json.loads(body)
         return resp, body['endpoint']
 
@@ -82,10 +84,12 @@
             post_body['enabled'] = enabled
         post_body = json.dumps({'endpoint': post_body})
         resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['endpoint']
 
     def delete_endpoint(self, endpoint_id):
         """Delete endpoint."""
         resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
+        self.expected_success(204, resp_header.status)
         return resp_header, resp_body
diff --git a/tempest/services/identity/v3/json/policy_client.py b/tempest/services/identity/v3/json/policy_client.py
index 3c90fa1..e093260 100644
--- a/tempest/services/identity/v3/json/policy_client.py
+++ b/tempest/services/identity/v3/json/policy_client.py
@@ -37,12 +37,14 @@
         }
         post_body = json.dumps({'policy': post_body})
         resp, body = self.post('policies', post_body)
+        self.expected_success(201, resp.status)
         body = json.loads(body)
         return resp, body['policy']
 
     def list_policies(self):
         """Lists the policies."""
         resp, body = self.get('policies')
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['policies']
 
@@ -50,12 +52,12 @@
         """Lists out the given policy."""
         url = 'policies/%s' % policy_id
         resp, body = self.get(url)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['policy']
 
     def update_policy(self, policy_id, **kwargs):
         """Updates a policy."""
-        resp, body = self.get_policy(policy_id)
         type = kwargs.get('type')
         post_body = {
             'type': type
@@ -63,10 +65,13 @@
         post_body = json.dumps({'policy': post_body})
         url = 'policies/%s' % policy_id
         resp, body = self.patch(url, post_body)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['policy']
 
     def delete_policy(self, policy_id):
         """Deletes the policy."""
         url = "policies/%s" % policy_id
-        return self.delete(url)
+        resp, body = self.delete(url)
+        self.expected_success(204, resp.status)
+        return resp, body
diff --git a/tempest/services/identity/v3/json/region_client.py b/tempest/services/identity/v3/json/region_client.py
index c078765..becea6b 100644
--- a/tempest/services/identity/v3/json/region_client.py
+++ b/tempest/services/identity/v3/json/region_client.py
@@ -43,6 +43,7 @@
                 'regions/%s' % kwargs.get('unique_region_id'), req_body)
         else:
             resp, body = self.post('regions', req_body)
+        self.expected_success(201, resp.status)
         body = json.loads(body)
         return resp, body['region']
 
@@ -55,6 +56,7 @@
             post_body['parent_region_id'] = kwargs.get('parent_region_id')
         post_body = json.dumps({'region': post_body})
         resp, body = self.patch('regions/%s' % region_id, post_body)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['region']
 
@@ -62,6 +64,7 @@
         """Get region."""
         url = 'regions/%s' % region_id
         resp, body = self.get(url)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['region']
 
@@ -71,10 +74,12 @@
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
+        self.expected_success(200, resp.status)
         body = json.loads(body)
         return resp, body['regions']
 
     def delete_region(self, region_id):
         """Delete region."""
         resp, body = self.delete('regions/%s' % region_id)
+        self.expected_success(204, resp.status)
         return resp, body
diff --git a/tempest/services/identity/v3/xml/credentials_client.py b/tempest/services/identity/v3/xml/credentials_client.py
index 3c44188..37513d0 100644
--- a/tempest/services/identity/v3/xml/credentials_client.py
+++ b/tempest/services/identity/v3/xml/credentials_client.py
@@ -60,13 +60,14 @@
                                     type=cred_type, user_id=user_id)
         credential.append(blob)
         resp, body = self.post('credentials', str(common.Document(credential)))
+        self.expected_success(201, resp.status)
         body = self._parse_body(etree.fromstring(body))
         body['blob'] = json.loads(body['blob'])
         return resp, body
 
     def update_credential(self, credential_id, **kwargs):
         """Updates a credential."""
-        resp, body = self.get_credential(credential_id)
+        _, body = self.get_credential(credential_id)
         cred_type = kwargs.get('type', body['type'])
         access_key = kwargs.get('access_key', body['blob']['access'])
         secret_key = kwargs.get('secret_key', body['blob']['secret'])
@@ -83,6 +84,7 @@
         credential.append(blob)
         resp, body = self.patch('credentials/%s' % credential_id,
                                 str(common.Document(credential)))
+        self.expected_success(200, resp.status)
         body = self._parse_body(etree.fromstring(body))
         body['blob'] = json.loads(body['blob'])
         return resp, body
@@ -90,6 +92,7 @@
     def get_credential(self, credential_id):
         """To GET Details of a credential."""
         resp, body = self.get('credentials/%s' % credential_id)
+        self.expected_success(200, resp.status)
         body = self._parse_body(etree.fromstring(body))
         body['blob'] = json.loads(body['blob'])
         return resp, body
@@ -97,10 +100,12 @@
     def list_credentials(self):
         """Lists out all the available credentials."""
         resp, body = self.get('credentials')
+        self.expected_success(200, resp.status)
         body = self._parse_creds(etree.fromstring(body))
         return resp, body
 
     def delete_credential(self, credential_id):
         """Deletes a credential."""
         resp, body = self.delete('credentials/%s' % credential_id)
+        self.expected_success(204, resp.status)
         return resp, body
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
index 6490e34..892fb58 100644
--- a/tempest/services/identity/v3/xml/endpoints_client.py
+++ b/tempest/services/identity/v3/xml/endpoints_client.py
@@ -65,6 +65,7 @@
     def list_endpoints(self):
         """Get the list of endpoints."""
         resp, body = self.get("endpoints")
+        self.expected_success(200, resp.status)
         body = self._parse_array(etree.fromstring(body))
         return resp, body
 
@@ -90,6 +91,7 @@
                                          enabled=enabled)
         resp, body = self.post('endpoints',
                                str(common.Document(create_endpoint)))
+        self.expected_success(201, resp.status)
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
@@ -120,10 +122,12 @@
             endpoint.add_attr("enabled", str(enabled).lower())
 
         resp, body = self.patch('endpoints/%s' % str(endpoint_id), str(doc))
+        self.expected_success(200, resp.status)
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
     def delete_endpoint(self, endpoint_id):
         """Delete endpoint."""
         resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
+        self.expected_success(204, resp_header.status)
         return resp_header, resp_body
diff --git a/tempest/services/identity/v3/xml/policy_client.py b/tempest/services/identity/v3/xml/policy_client.py
index 73d831b..41bbfe5 100644
--- a/tempest/services/identity/v3/xml/policy_client.py
+++ b/tempest/services/identity/v3/xml/policy_client.py
@@ -67,12 +67,14 @@
         create_policy = common.Element("policy", xmlns=XMLNS,
                                        blob=blob, type=type)
         resp, body = self.post('policies', str(common.Document(create_policy)))
+        self.expected_success(201, resp.status)
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
     def list_policies(self):
         """Lists the policies."""
         resp, body = self.get('policies')
+        self.expected_success(200, resp.status)
         body = self._parse_array(etree.fromstring(body))
         return resp, body
 
@@ -80,20 +82,23 @@
         """Lists out the given policy."""
         url = 'policies/%s' % policy_id
         resp, body = self.get(url)
+        self.expected_success(200, resp.status)
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
     def update_policy(self, policy_id, **kwargs):
         """Updates a policy."""
-        resp, body = self.get_policy(policy_id)
         type = kwargs.get('type')
         update_policy = common.Element("policy", xmlns=XMLNS, type=type)
         url = 'policies/%s' % policy_id
         resp, body = self.patch(url, str(common.Document(update_policy)))
+        self.expected_success(200, resp.status)
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
     def delete_policy(self, policy_id):
         """Deletes the policy."""
         url = "policies/%s" % policy_id
-        return self.delete(url)
+        resp, body = self.delete(url)
+        self.expected_success(204, resp.status)
+        return resp, body
diff --git a/tempest/services/identity/v3/xml/region_client.py b/tempest/services/identity/v3/xml/region_client.py
index f854138..7669678 100644
--- a/tempest/services/identity/v3/xml/region_client.py
+++ b/tempest/services/identity/v3/xml/region_client.py
@@ -79,6 +79,7 @@
         else:
             resp, body = self.post('regions',
                                    str(common.Document(create_region)))
+        self.expected_success(201, resp.status)
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
@@ -95,6 +96,7 @@
 
         resp, body = self.patch('regions/%s' % str(region_id),
                                 str(common.Document(update_region)))
+        self.expected_success(200, resp.status)
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
@@ -102,6 +104,7 @@
         """Get Region."""
         url = 'regions/%s' % region_id
         resp, body = self.get(url)
+        self.expected_success(200, resp.status)
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
@@ -111,10 +114,12 @@
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
+        self.expected_success(200, resp.status)
         body = self._parse_array(etree.fromstring(body))
         return resp, body
 
     def delete_region(self, region_id):
         """Delete region."""
         resp, body = self.delete('regions/%s' % region_id)
+        self.expected_success(204, resp.status)
         return resp, body
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
new file mode 100644
index 0000000..b2feb87
--- /dev/null
+++ b/tempest/services/network/resources.py
@@ -0,0 +1,163 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import abc
+
+import six
+
+
+class AttributeDict(dict):
+
+    """
+    Provide attribute access (dict.key) to dictionary values.
+    """
+
+    def __getattr__(self, name):
+        """Allow attribute access for all keys in the dict."""
+        if name in self:
+            return self[name]
+        return super(AttributeDict, self).__getattribute__(name)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class DeletableResource(AttributeDict):
+
+    """
+    Support deletion of neutron resources (networks, subnets) via a
+    delete() method, as is supported by keystone and nova resources.
+    """
+
+    def __init__(self, *args, **kwargs):
+        self.client = kwargs.pop('client', None)
+        super(DeletableResource, self).__init__(*args, **kwargs)
+
+    def __str__(self):
+        return '<%s id="%s" name="%s">' % (self.__class__.__name__,
+                                           self.id, self.name)
+
+    @abc.abstractmethod
+    def delete(self):
+        return
+
+    def __hash__(self):
+        return hash(self.id)
+
+
+class DeletableNetwork(DeletableResource):
+
+    def delete(self):
+        self.client.delete_network(self.id)
+
+
+class DeletableSubnet(DeletableResource):
+
+    def __init__(self, *args, **kwargs):
+        super(DeletableSubnet, self).__init__(*args, **kwargs)
+        self._router_ids = set()
+
+    def update(self, *args, **kwargs):
+        _, result = self.client.update_subnet(subnet=self.id, *args, **kwargs)
+        super(DeletableSubnet, self).update(**result['subnet'])
+
+    def add_to_router(self, router_id):
+        self._router_ids.add(router_id)
+        self.client.add_router_interface_with_subnet_id(router_id,
+                                                        subnet_id=self.id)
+
+    def delete(self):
+        for router_id in self._router_ids.copy():
+            self.client.remove_router_interface_with_subnet_id(
+                router_id,
+                subnet_id=self.id)
+            self._router_ids.remove(router_id)
+        self.client.delete_subnet(self.id)
+
+
+class DeletableRouter(DeletableResource):
+
+    def set_gateway(self, network_id):
+        return self.update(external_gateway_info=dict(network_id=network_id))
+
+    def unset_gateway(self):
+        return self.update(external_gateway_info=dict())
+
+    def update(self, *args, **kwargs):
+        _, result = self.client.update_router(self.id,
+                                              *args,
+                                              **kwargs)
+        return super(DeletableRouter, self).update(**result['router'])
+
+    def delete(self):
+        self.unset_gateway()
+        self.client.delete_router(self.id)
+
+
+class DeletableFloatingIp(DeletableResource):
+
+    def update(self, *args, **kwargs):
+        _, result = self.client.update_floatingip(self.id,
+                                                  *args,
+                                                  **kwargs)
+        super(DeletableFloatingIp, self).update(**result['floatingip'])
+
+    def __repr__(self):
+        return '<%s addr="%s">' % (self.__class__.__name__,
+                                   self.floating_ip_address)
+
+    def __str__(self):
+        return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
+                                                     self.id)
+
+    def delete(self):
+        self.client.delete_floatingip(self.id)
+
+
+class DeletablePort(DeletableResource):
+
+    def delete(self):
+        self.client.delete_port(self.id)
+
+
+class DeletableSecurityGroup(DeletableResource):
+
+    def delete(self):
+        self.client.delete_security_group(self.id)
+
+
+class DeletableSecurityGroupRule(DeletableResource):
+
+    def __repr__(self):
+        return '<%s id="%s">' % (self.__class__.__name__, self.id)
+
+    def delete(self):
+        self.client.delete_security_group_rule(self.id)
+
+
+class DeletablePool(DeletableResource):
+
+    def delete(self):
+        self.client.delete_pool(self.id)
+
+
+class DeletableMember(DeletableResource):
+
+    def delete(self):
+        self.client.delete_member(self.id)
+
+
+class DeletableVip(DeletableResource):
+
+    def delete(self):
+        self.client.delete_vip(self.id)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index be0f888..eca57c0 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -32,11 +32,15 @@
 
     def create_account(self, data=None,
                        params=None,
-                       metadata={},
-                       remove_metadata={},
+                       metadata=None,
+                       remove_metadata=None,
                        metadata_prefix='X-Account-Meta-',
                        remove_metadata_prefix='X-Remove-Account-Meta-'):
         """Create an account."""
+        if metadata is None:
+            metadata = {}
+        if remove_metadata is None:
+            remove_metadata = {}
         url = ''
         if params:
             url += '?%s' % urllib.urlencode(params)
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 9c76f51..15306a0 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -45,9 +45,11 @@
         body = json.loads(body)
         return resp, body['stacks']
 
-    def create_stack(self, name, disable_rollback=True, parameters={},
+    def create_stack(self, name, disable_rollback=True, parameters=None,
                      timeout_mins=60, template=None, template_url=None,
                      environment=None, files=None):
+        if parameters is None:
+            parameters = {}
         headers, body = self._prepare_update_create(
             name,
             disable_rollback,
@@ -60,11 +62,14 @@
         uri = 'stacks'
         resp, body = self.post(uri, headers=headers, body=body)
         self.expected_success(201, resp.status)
+        body = json.loads(body)
         return resp, body
 
     def update_stack(self, stack_identifier, name, disable_rollback=True,
-                     parameters={}, timeout_mins=60, template=None,
+                     parameters=None, timeout_mins=60, template=None,
                      template_url=None, environment=None, files=None):
+        if parameters is None:
+            parameters = {}
         headers, body = self._prepare_update_create(
             name,
             disable_rollback,
@@ -80,9 +85,11 @@
         return resp, body
 
     def _prepare_update_create(self, name, disable_rollback=True,
-                               parameters={}, timeout_mins=60,
+                               parameters=None, timeout_mins=60,
                                template=None, template_url=None,
                                environment=None, files=None):
+        if parameters is None:
+            parameters = {}
         post_body = {
             "stack_name": name,
             "disable_rollback": disable_rollback,
@@ -264,16 +271,20 @@
         body = json.loads(body)
         return resp, body
 
-    def validate_template(self, template, parameters={}):
+    def validate_template(self, template, parameters=None):
         """Returns the validation result for a template with parameters."""
+        if parameters is None:
+            parameters = {}
         post_body = {
             'template': template,
             'parameters': parameters,
         }
         return self._validate_template(post_body)
 
-    def validate_template_url(self, template_url, parameters={}):
+    def validate_template_url(self, template_url, parameters=None):
         """Returns the validation result for a template with parameters."""
+        if parameters is None:
+            parameters = {}
         post_body = {
             'template_url': template_url,
             'parameters': parameters,
diff --git a/tempest/services/volume/json/qos_client.py b/tempest/services/volume/json/qos_client.py
new file mode 100644
index 0000000..6e0bee9
--- /dev/null
+++ b/tempest/services/volume/json/qos_client.py
@@ -0,0 +1,161 @@
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import time
+
+from tempest.common import rest_client
+from tempest import config
+from tempest import exceptions
+
+CONF = config.CONF
+
+
+class BaseQosSpecsClientJSON(rest_client.RestClient):
+    """Client class to send CRUD QoS API requests"""
+
+    def __init__(self, auth_provider):
+        super(BaseQosSpecsClientJSON, self).__init__(auth_provider)
+        self.service = CONF.volume.catalog_type
+        self.build_interval = CONF.volume.build_interval
+        self.build_timeout = CONF.volume.build_timeout
+
+    def is_resource_deleted(self, qos_id):
+        try:
+            self.get_qos(qos_id)
+        except exceptions.NotFound:
+            return True
+        return False
+
+    def wait_for_qos_operations(self, qos_id, operation, args=None):
+        """Waits for a qos operations to be completed.
+
+        NOTE : operation value is required for  wait_for_qos_operations()
+        operation = 'qos-key' / 'disassociate' / 'disassociate-all'
+        args = keys[] when operation = 'qos-key'
+        args = volume-type-id disassociated when operation = 'disassociate'
+        args = None when operation = 'disassociate-all'
+        """
+        start_time = int(time.time())
+        while True:
+            if operation == 'qos-key-unset':
+                resp, body = self.get_qos(qos_id)
+                self.expected_success(200, resp.status)
+                if not any(key in body['specs'] for key in args):
+                    return
+            elif operation == 'disassociate':
+                resp, body = self.get_association_qos(qos_id)
+                self.expected_success(200, resp.status)
+                if not any(args in body[i]['id'] for i in range(0, len(body))):
+                    return
+            elif operation == 'disassociate-all':
+                resp, body = self.get_association_qos(qos_id)
+                self.expected_success(200, resp.status)
+                if not body:
+                    return
+            else:
+                msg = (" operation value is either not defined or incorrect.")
+                raise exceptions.UnprocessableEntity(msg)
+
+            if int(time.time()) - start_time >= self.build_timeout:
+                raise exceptions.TimeoutException
+            time.sleep(self.build_interval)
+
+    def create_qos(self, name, consumer, **kwargs):
+        """Create a QoS Specification.
+
+        name : name of the QoS specifications
+        consumer : conumer of Qos ( front-end / back-end / both )
+        """
+        post_body = {'name': name, 'consumer': consumer}
+        post_body.update(kwargs)
+        post_body = json.dumps({'qos_specs': post_body})
+        resp, body = self.post('qos-specs', post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return resp, body['qos_specs']
+
+    def delete_qos(self, qos_id, force=False):
+        """Delete the specified QoS specification."""
+        resp, body = self.delete(
+            "qos-specs/%s?force=%s" % (str(qos_id), force))
+        self.expected_success(202, resp.status)
+
+    def list_qos(self):
+        """List all the QoS specifications created."""
+        url = 'qos-specs'
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.expected_success(200, resp.status)
+        return resp, body['qos_specs']
+
+    def get_qos(self, qos_id):
+        """Get the specified QoS specification."""
+        url = "qos-specs/%s" % str(qos_id)
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.expected_success(200, resp.status)
+        return resp, body['qos_specs']
+
+    def set_qos_key(self, qos_id, **kwargs):
+        """Set the specified keys/values of QoS specification.
+
+        kwargs : it is the dictionary of the key=value pairs to set
+        """
+        put_body = json.dumps({"qos_specs": kwargs})
+        resp, body = self.put('qos-specs/%s' % qos_id, put_body)
+        body = json.loads(body)
+        self.expected_success(200, resp.status)
+        return resp, body['qos_specs']
+
+    def unset_qos_key(self, qos_id, keys):
+        """Unset the specified keys of QoS specification.
+
+        keys : it is the array of the keys to unset
+        """
+        put_body = json.dumps({'keys': keys})
+        resp, _ = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
+        self.expected_success(202, resp.status)
+
+    def associate_qos(self, qos_id, vol_type_id):
+        """Associate the specified QoS with specified volume-type."""
+        url = "qos-specs/%s/associate" % str(qos_id)
+        url += "?vol_type_id=%s" % vol_type_id
+        resp, _ = self.get(url)
+        self.expected_success(202, resp.status)
+
+    def get_association_qos(self, qos_id):
+        """Get the association of the specified QoS specification."""
+        url = "qos-specs/%s/associations" % str(qos_id)
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.expected_success(200, resp.status)
+        return resp, body['qos_associations']
+
+    def disassociate_qos(self, qos_id, vol_type_id):
+        """Disassociate the specified QoS with specified volume-type."""
+        url = "qos-specs/%s/disassociate" % str(qos_id)
+        url += "?vol_type_id=%s" % vol_type_id
+        resp, _ = self.get(url)
+        self.expected_success(202, resp.status)
+
+    def disassociate_all_qos(self, qos_id):
+        """Disassociate the specified QoS with all associations."""
+        url = "qos-specs/%s/disassociate_all" % str(qos_id)
+        resp, _ = self.get(url)
+        self.expected_success(202, resp.status)
+
+
+class QosSpecsClientJSON(BaseQosSpecsClientJSON):
+    """Volume V1 QoS client."""
diff --git a/tempest/services/volume/v2/json/qos_client.py b/tempest/services/volume/v2/json/qos_client.py
new file mode 100644
index 0000000..a734df8
--- /dev/null
+++ b/tempest/services/volume/v2/json/qos_client.py
@@ -0,0 +1,23 @@
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.volume.json import qos_client
+
+
+class QosSpecsV2ClientJSON(qos_client.BaseQosSpecsClientJSON):
+
+    def __init__(self, auth_provider):
+        super(QosSpecsV2ClientJSON, self).__init__(auth_provider)
+
+        self.api_version = "v2"
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index d0140dd..a28684e 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -238,8 +238,8 @@
                                                           'neutron', {})
         self.assertIn('neutron', results)
         self.assertIn('extensions', results['neutron'])
-        self.assertEqual(['fake1', 'fake2', 'not_fake'],
-                         results['neutron']['extensions'])
+        self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
+                         sorted(results['neutron']['extensions']))
 
     def test_verify_extensions_cinder(self):
         def fake_list_extensions():
@@ -277,8 +277,8 @@
                                                           'cinder', {})
         self.assertIn('cinder', results)
         self.assertIn('extensions', results['cinder'])
-        self.assertEqual(['fake1', 'fake2', 'not_fake'],
-                         results['cinder']['extensions'])
+        self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
+                         sorted(results['cinder']['extensions']))
 
     def test_verify_extensions_nova(self):
         def fake_list_extensions():
@@ -316,8 +316,8 @@
                                                           'nova', {})
         self.assertIn('nova', results)
         self.assertIn('extensions', results['nova'])
-        self.assertEqual(['fake1', 'fake2', 'not_fake'],
-                         results['nova']['extensions'])
+        self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
+                         sorted(results['nova']['extensions']))
 
     def test_verify_extensions_nova_v3(self):
         def fake_list_extensions():
@@ -355,8 +355,8 @@
                                                           'nova_v3', {})
         self.assertIn('nova_v3', results)
         self.assertIn('extensions', results['nova_v3'])
-        self.assertEqual(['fake1', 'fake2', 'not_fake'],
-                         results['nova_v3']['extensions'])
+        self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
+                         sorted(results['nova_v3']['extensions']))
 
     def test_verify_extensions_swift(self):
         def fake_list_extensions():
@@ -395,5 +395,5 @@
                                                           'swift', {})
         self.assertIn('swift', results)
         self.assertIn('extensions', results['swift'])
-        self.assertEqual(['not_fake', 'fake1', 'fake2'],
-                         results['swift']['extensions'])
+        self.assertEqual(sorted(['not_fake', 'fake1', 'fake2']),
+                         sorted(results['swift']['extensions']))
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
index 9da5f92..ea576c4 100644
--- a/tempest/tests/test_credentials.py
+++ b/tempest/tests/test_credentials.py
@@ -128,12 +128,22 @@
         creds = self._get_credentials()
         self.assertTrue(creds.is_valid())
 
-    def test_is_not_valid(self):
+    def _test_is_not_valid(self, ignore_key):
         creds = self._get_credentials()
         for attr in self.attributes.keys():
+            if attr == ignore_key:
+                continue
+            temp_attr = getattr(creds, attr)
             delattr(creds, attr)
             self.assertFalse(creds.is_valid(),
                              "Credentials should be invalid without %s" % attr)
+            setattr(creds, attr, temp_attr)
+
+    def test_is_not_valid(self):
+        # NOTE(mtreinish): A KeystoneV2 credential object is valid without
+        # a tenant_name. So skip that check. See tempest.auth for the valid
+        # credential requirements
+        self._test_is_not_valid('tenant_name')
 
     def test_default(self):
         self.useFixture(fixtures.LockFixture('auth_version'))
@@ -205,6 +215,12 @@
                     config_value = 'fake_' + attr
                 self.assertEqual(getattr(creds, attr), config_value)
 
+    def test_is_not_valid(self):
+        # NOTE(mtreinish) For a Keystone V3 credential object a project name
+        # is not required to be valid, so we skip that check. See tempest.auth
+        # for the valid credential requirements
+        self._test_is_not_valid('project_name')
+
     def test_synced_attributes(self):
         attributes = self.attributes
         # Create V3 credentials with tenant instead of project, and user_domain
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 52fdf7e..9c13013 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -107,3 +107,16 @@
         self.assertFalse(checks.no_official_client_manager_in_api_tests(
             "cls.official_client = clients.OfficialClientManager(credentials)",
             "tempest/scenario/fake_test.py"))
+
+    def test_no_mutable_default_args(self):
+        self.assertEqual(1, len(list(checks.no_mutable_default_args(
+            " def function1(para={}):"))))
+
+        self.assertEqual(1, len(list(checks.no_mutable_default_args(
+            "def function2(para1, para2, para3=[])"))))
+
+        self.assertEqual(0, len(list(checks.no_mutable_default_args(
+            "defined = []"))))
+
+        self.assertEqual(0, len(list(checks.no_mutable_default_args(
+            "defined, undefined = [], {}"))))
diff --git a/tools/check_logs.py b/tools/check_logs.py
index eab9f73..917aaaf 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -26,8 +26,9 @@
 import yaml
 
 
-is_grenade = (os.environ.get('DEVSTACK_GATE_GRENADE', "0") == "1" or
-              os.environ.get('DEVSTACK_GATE_GRENADE_FORWARD', "0") == "1")
+# DEVSTACK_GATE_GRENADE is either unset if grenade is not running
+# or a string describing what type of grenade run to perform.
+is_grenade = os.environ.get('DEVSTACK_GATE_GRENADE') is not None
 dump_all_errors = True
 
 # As logs are made clean, add to this set