Merge "Add negative tests for snapshot pagination"
diff --git a/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml b/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
index 484d543..d07448a 100644
--- a/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
+++ b/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
@@ -4,7 +4,7 @@
     plugins to declare and automatically register any service client defined
     in the plugin.
   - tempest.lib exposes a new stable interface, the clients module and
-    ServiceClients class, which provides a convinient way for plugin tests to
+    ServiceClients class, which provides a convenient way for plugin tests to
     access service clients defined in Tempest as well as service clients
     defined in all loaded plugins.
     The new ServiceClients class only exposes for now the service clients
diff --git a/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml b/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
index 543cf7b..52c04af 100644
--- a/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
+++ b/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
@@ -2,4 +2,4 @@
 deprecations:
   - The ``call_until_true`` function is moved from the ``tempest.test`` module
    to the ``tempest.lib.common.utils.test_utils`` module. Backward
-   compatibilty is preserved until Ocata.
+   compatibility is preserved until Ocata.
diff --git a/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml b/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
index 20f310d..813e47f 100644
--- a/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
+++ b/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
@@ -1,5 +1,5 @@
 ---
 upgrade:
-  - the already depreacted tempest-cleanup standalone command has been
+  - the already deprecated tempest-cleanup standalone command has been
     removed. The corresponding functionalities can be accessed through
     the unified `tempest` command (`tempest cleanup`).
diff --git a/releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml b/releasenotes/notes/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
similarity index 100%
rename from releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
rename to releasenotes/notes/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
diff --git a/releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml b/releasenotes/notes/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
similarity index 100%
rename from releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml
rename to releasenotes/notes/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
diff --git a/releasenotes/notes/add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml b/releasenotes/notes/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
similarity index 100%
rename from releasenotes/notes/add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
rename to releasenotes/notes/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
diff --git a/releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml b/releasenotes/notes/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
similarity index 100%
rename from releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
rename to releasenotes/notes/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
diff --git a/releasenotes/notes/deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml b/releasenotes/notes/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
rename to releasenotes/notes/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
diff --git a/releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml b/releasenotes/notes/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
rename to releasenotes/notes/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
diff --git a/releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml b/releasenotes/notes/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
rename to releasenotes/notes/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
diff --git a/releasenotes/notes/jsonschema-validator-2377ba131e12d3c7.yaml b/releasenotes/notes/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
similarity index 100%
rename from releasenotes/notes/jsonschema-validator-2377ba131e12d3c7.yaml
rename to releasenotes/notes/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
diff --git a/releasenotes/notes/remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml b/releasenotes/notes/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
diff --git a/releasenotes/notes/remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml b/releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
diff --git a/releasenotes/notes/remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml b/releasenotes/notes/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
diff --git a/releasenotes/notes/remove-deprecated-network-config-options-f9ce276231578fe6.yaml b/releasenotes/notes/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-network-config-options-f9ce276231578fe6.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
diff --git a/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml b/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml
new file mode 100644
index 0000000..5555949
--- /dev/null
+++ b/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml
@@ -0,0 +1,13 @@
+---
+prelude: >
+    This release is marking the start of Ocata release support in Tempest
+other:
+  - |
+    OpenStack releases supported at this time are **Mitaka**, **Newton**,
+    and **Ocata**.
+
+    The release under current development as of this tag is Pike,
+    meaning that every Tempest commit is also tested against master during
+    the Pike cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Pike (or future releases)
+    cloud.
diff --git a/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml b/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
new file mode 100644
index 0000000..67f9541
--- /dev/null
+++ b/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add the list security groups by server API to the servers_client
+    library. This feature enables the possibility to list security
+    groups for a server instance.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 242d133..cea76b4 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
     :maxdepth: 1
 
     unreleased
+    v15.0.0
     v14.0.0
     v13.0.0
     v12.0.0
diff --git a/releasenotes/source/v15.0.0.rst b/releasenotes/source/v15.0.0.rst
new file mode 100644
index 0000000..2ee1894
--- /dev/null
+++ b/releasenotes/source/v15.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v15.0.0 Release Notes
+=====================
+
+.. release-notes:: 15.0.0 Release Notes
+   :version: 15.0.0
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 0a9db46..c3c88a5 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -101,7 +101,7 @@
             # check some extensions for the flavor create/show/detail response
             self.assertEqual(flavor['swap'], '')
             self.assertEqual(int(flavor['rxtx_factor']), 1)
-            self.assertEqual(int(flavor['OS-FLV-EXT-DATA:ephemeral']), 0)
+            self.assertEqual(flavor['OS-FLV-EXT-DATA:ephemeral'], 0)
             self.assertEqual(flavor['os-flavor-access:is_public'], True)
 
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index bd72d13..9fe1f74 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -26,6 +26,8 @@
     Add and remove Flavor Access require admin privileges.
     """
 
+    credentials = ['primary', 'admin', 'alt']
+
     @classmethod
     def skip_checks(cls):
         super(FlavorsAccessNegativeTestJSON, cls).skip_checks()
@@ -151,4 +153,4 @@
         self.assertRaises(lib_exc.NotFound,
                           self.admin_flavors_client.remove_flavor_access,
                           new_flavor['id'],
-                          data_utils.rand_uuid())
+                          self.os_alt.servers_client.tenant_id)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 8ec6400..ee1e3a0 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -41,7 +41,7 @@
         vcpus = 1
         disk = 10
         ephemeral = 10
-        cls.new_flavor_id = data_utils.rand_int_id(start=1000)
+        new_flavor_id = data_utils.rand_int_id(start=1000)
         swap = 1024
         rxtx = 1
         # Create a flavor so as to set/get/unset extra specs
@@ -49,7 +49,7 @@
             name=flavor_name,
             ram=ram, vcpus=vcpus,
             disk=disk,
-            id=cls.new_flavor_id,
+            id=new_flavor_id,
             ephemeral=ephemeral,
             swap=swap,
             rxtx_factor=rxtx)['flavor']
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index 79a9068..dab83e5 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -43,7 +43,7 @@
         vcpus = 1
         disk = 10
         ephemeral = 10
-        cls.new_flavor_id = data_utils.rand_int_id(start=1000)
+        new_flavor_id = data_utils.rand_int_id(start=1000)
         swap = 1024
         rxtx = 1
         # Create a flavor
@@ -51,7 +51,7 @@
             name=flavor_name,
             ram=ram, vcpus=vcpus,
             disk=disk,
-            id=cls.new_flavor_id,
+            id=new_flavor_id,
             ephemeral=ephemeral,
             swap=swap,
             rxtx_factor=rxtx)['flavor']
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 39797f7..3ffd238 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -45,7 +45,6 @@
     def setup_clients(cls):
         super(LiveBlockMigrationTestJSON, cls).setup_clients()
         cls.admin_hosts_client = cls.os_adm.hosts_client
-        cls.admin_servers_client = cls.os_adm.servers_client
         cls.admin_migration_client = cls.os_adm.migrations_client
 
     @classmethod
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 21f5c68..aa75348 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -31,7 +31,6 @@
         super(MigrationsAdminTest, cls).setup_clients()
         cls.client = cls.os_adm.migrations_client
         cls.flavors_admin_client = cls.os_adm.flavors_client
-        cls.admin_servers_client = cls.os_adm.servers_client
 
     @decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
     def test_list_migrations(self):
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 0850205..ca8382f 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -87,6 +87,7 @@
 
     @decorators.skip_because(bug="1186354",
                              condition=CONF.service_available.neutron)
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('7c6c8f3b-2bf6-4918-b240-57b136a66aa0')
     @test.services('network')
     def test_security_groups_exceed_limit(self):
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index b53ced9..5220c97 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -66,7 +66,7 @@
         flavor_id = self._get_unused_flavor_id()
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
-        ram = int(quota_set['ram'])
+        ram = quota_set['ram']
         if ram == -1:
             raise self.skipException("ram quota set is -1,"
                                      " cannot test overlimit")
@@ -94,7 +94,7 @@
         flavor_id = self._get_unused_flavor_id()
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
-        vcpus = int(quota_set['cores'])
+        vcpus = quota_set['cores']
         if vcpus == -1:
             raise self.skipException("cores quota set is -1,"
                                      " cannot test overlimit")
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 5f2444a..45472df 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -38,12 +38,6 @@
         if not CONF.compute_feature_enabled.swap_volume:
             raise cls.skipException("Swapping volumes is not supported.")
 
-    @classmethod
-    def setup_clients(cls):
-        super(TestVolumeSwap, cls).setup_clients()
-        # We need the admin client for performing the update (swap) volume call
-        cls.servers_admin_client = cls.os_adm.servers_client
-
     @decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
     @test.services('volume')
     def test_volume_swap(self):
@@ -58,12 +52,12 @@
         # Attach "volume1" to server
         self.attach_volume(server, volume1)
         # Swap volume from "volume1" to "volume2"
-        self.servers_admin_client.update_attached_volume(
+        self.admin_servers_client.update_attached_volume(
             server['id'], volume1['id'], volumeId=volume2['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume1['id'], 'available')
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume2['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume1['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume2['id'], 'in-use')
         self.addCleanup(self.servers_client.detach_volume,
                         server['id'], volume2['id'])
         # Verify "volume2" is attached to the server
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 1f85c18..905bc3d 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -32,25 +32,22 @@
             raise cls.skipException(skip_msg)
 
     @classmethod
-    def setup_clients(cls):
-        super(VolumesAdminNegativeTest, cls).setup_clients()
-        cls.servers_admin_client = cls.os_adm.servers_client
-
-    @classmethod
     def resource_setup(cls):
         super(VolumesAdminNegativeTest, cls).resource_setup()
         cls.server = cls.create_test_server(wait_until='ACTIVE')
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
     def test_update_attached_volume_with_nonexistent_volume_in_uri(self):
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
-                          self.servers_admin_client.update_attached_volume,
+                          self.admin_servers_client.update_attached_volume,
                           self.server['id'], nonexistent_volume,
                           volumeId=volume['id'])
 
     @test.related_bug('1629110', status_code=400)
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
     def test_update_attached_volume_with_nonexistent_volume_in_body(self):
         volume = self.create_volume()
@@ -58,6 +55,6 @@
 
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.BadRequest,
-                          self.servers_admin_client.update_attached_volume,
+                          self.admin_servers_client.update_attached_volume,
                           self.server['id'], volume['id'],
                           volumeId=nonexistent_volume)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index c636894..706b859 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -326,6 +326,10 @@
                     raise
             image = cls.compute_images_client.show_image(image_id)['image']
 
+            if kwargs['wait_until'] == 'ACTIVE':
+                if kwargs.get('wait_for_server', True):
+                    waiters.wait_for_server_status(cls.servers_client,
+                                                   server_id, 'ACTIVE')
         return image
 
     @classmethod
@@ -406,8 +410,8 @@
             kwargs['imageRef'] = image_ref
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
         cls.volumes.append(volume)
-        waiters.wait_for_volume_status(cls.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.volumes_client,
+                                                volume['id'], 'available')
         return volume
 
     @classmethod
@@ -446,15 +450,15 @@
         # On teardown detach the volume and wait for it to be available. This
         # is so we don't error out when trying to delete the volume during
         # teardown.
-        self.addCleanup(waiters.wait_for_volume_status,
+        self.addCleanup(waiters.wait_for_volume_resource_status,
                         self.volumes_client, volume['id'], 'available')
         # Ignore 404s on detach in case the server is deleted or the volume
         # is already detached.
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.servers_client.detach_volume,
                         server['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'in-use')
 
 
 class BaseV2ComputeAdminTest(BaseV2ComputeTest):
@@ -468,6 +472,7 @@
         cls.availability_zone_admin_client = (
             cls.os_adm.availability_zone_client)
         cls.admin_flavors_client = cls.os_adm.flavors_client
+        cls.admin_servers_client = cls.os_adm.servers_client
 
     def create_flavor(self, ram, vcpus, disk, name=None,
                       is_public='True', **kwargs):
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index d9db0b5..a0c860a 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -60,6 +60,7 @@
         snapshot_name = data_utils.rand_name('test-snap')
         image = self.create_image_from_server(server['id'],
                                               name=snapshot_name,
-                                              wait_until='ACTIVE')
+                                              wait_until='ACTIVE',
+                                              wait_for_server=False)
         self.addCleanup(self.client.delete_image, image['id'])
         self.assertEqual(snapshot_name, image['name'])
diff --git a/tempest/api/compute/limits/test_absolute_limits_negative.py b/tempest/api/compute/limits/test_absolute_limits_negative.py
index b9ae0c6..21b4b1c 100644
--- a/tempest/api/compute/limits/test_absolute_limits_negative.py
+++ b/tempest/api/compute/limits/test_absolute_limits_negative.py
@@ -41,11 +41,11 @@
         max_meta = limits['absolute']['maxImageMeta']
 
         # No point in running this test if there is no limit.
-        if int(max_meta) == -1:
+        if max_meta == -1:
             raise self.skipException('no limit for maxImageMeta')
 
         # Create server should fail, since we are passing > metadata Limit!
-        max_meta_data = int(max_meta) + 1
+        max_meta_data = max_meta + 1
 
         meta_data = {}
         for xx in range(max_meta_data):
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index e070336..349bfda 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -144,3 +144,31 @@
                          ['security_group'])
         self.assertEqual(s_new_name, fetched_group['name'])
         self.assertEqual(s_new_des, fetched_group['description'])
+
+    @test.idempotent_id('79517d60-535a-438f-af3d-e6feab1cbea7')
+    @test.services('network')
+    def test_list_security_groups_by_server(self):
+        # Create a couple security groups that we will use
+        # for the server resource this test creates
+        sg = self.create_security_group()
+        sg2 = self.create_security_group()
+        assigned_security_groups_ids = [sg['id'], sg2['id']]
+        # Create server and add the security group created
+        # above to the server we just created
+        server_id = self.create_test_server(wait_until='ACTIVE')['id']
+        # add security groups to server
+        self.servers_client.add_security_group(server_id, name=sg['name'])
+        self.servers_client.add_security_group(server_id, name=sg2['name'])
+
+        # list security groups for a server
+        fetched_groups = (
+            self.servers_client.list_security_groups_by_server(
+                server_id)['security_groups'])
+        fetched_security_groups_ids = [i['id'] for i in fetched_groups]
+        # verifying the security groups ids in list
+        missing_security_groups =\
+            [p for p in assigned_security_groups_ids
+             if p not in fetched_security_groups_ids]
+        self.assertEmpty(missing_security_groups,
+                         "Failed to find security_groups %s in fetched list" %
+                         ', '.join(missing_security_groups))
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 9bba733..e0c8887 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -79,7 +79,6 @@
 
     def _check_interface(self, iface, port_id=None, network_id=None,
                          fixed_ip=None, mac_addr=None):
-        self.assertIn('port_state', iface)
         if port_id:
             self.assertEqual(iface['port_id'], port_id)
         if network_id:
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 5ddae5e..a94c20b 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -51,7 +51,7 @@
         cls.name = data_utils.rand_name(cls.__name__ + '-server')
         cls.password = data_utils.rand_password()
         disk_config = cls.disk_config
-        cls.server_initial = cls.create_test_server(
+        server_initial = cls.create_test_server(
             validatable=True,
             wait_until='ACTIVE',
             name=cls.name,
@@ -60,7 +60,7 @@
             accessIPv6=cls.accessIPv6,
             disk_config=disk_config,
             adminPass=cls.password)
-        cls.server = (cls.client.show_server(cls.server_initial['id'])
+        cls.server = (cls.client.show_server(server_initial['id'])
                       ['server'])
 
     def _create_net_subnet_ret_net_from_cidr(self, cidr):
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 83b2e1b..8ed55e0 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -115,8 +115,8 @@
 
         self.client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.client, server['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
 
 class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 04fe11f..3f6abab 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -1,4 +1,4 @@
-# Copyright 2016 OpenStack Foundation
+# Copyright 2016-2017 OpenStack Foundation
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -26,6 +26,11 @@
 
 CONF = config.CONF
 
+if six.PY2:
+    ord_func = ord
+else:
+    ord_func = int
+
 
 class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
 
@@ -60,14 +65,19 @@
         resp = urllib3.PoolManager().request('GET', vnc_url)
         # Make sure that the GET request was accepted by the novncproxy
         self.assertEqual(resp.status, 200, 'Got a Bad HTTP Response on the '
-                         'initial call: ' + str(resp.status))
+                         'initial call: ' + six.text_type(resp.status))
         # Do some basic validation to make sure it is an expected HTML document
-        self.assertTrue('<html>' in resp.data and '</html>' in resp.data,
-                        'Not a valid html document in the response.')
+        resp_data = resp.data.decode()
+        self.assertIn('<html>', resp_data,
+                      'Not a valid html document in the response.')
+        self.assertIn('</html>', resp_data,
+                      'Not a valid html document in the response.')
         # Just try to make sure we got JavaScript back for noVNC, since we
         # won't actually use it since not inside of a browser
-        self.assertTrue('noVNC' in resp.data and '<script' in resp.data,
-                        'Not a valid noVNC javascript html document.')
+        self.assertIn('noVNC', resp_data,
+                      'Not a valid noVNC javascript html document.')
+        self.assertIn('<script', resp_data,
+                      'Not a valid noVNC javascript html document.')
 
     def _validate_rfb_negotiation(self):
         """Verify we can connect to novnc and do the websocket connection."""
@@ -82,14 +92,14 @@
                                    int(data[8:11], base=10)))
         self.assertTrue(version >= 3.3, 'Bad RFB Version: ' + str(version))
         # Send our RFB version to the server, which we will just go with 3.3
-        self._websocket.send_frame(str(data))
+        self._websocket.send_frame(data)
         # Get the sever authentication type and make sure None is supported
         data = self._websocket.receive_frame()
         self.assertIsNotNone(data, 'Expected authentication type None.')
         self.assertGreaterEqual(
             len(data), 2, 'Expected authentication type None.')
         self.assertIn(
-            1, [ord(data[i + 1]) for i in range(ord(data[0]))],
+            1, [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
             'Expected authentication type None.')
         # Send to the server that we only support authentication type None
         self._websocket.send_frame(six.int2byte(1))
@@ -98,7 +108,7 @@
         self.assertEqual(
             len(data), 4, 'Server did not think security was successful.')
         self.assertEqual(
-            [ord(i) for i in data], [0, 0, 0, 0],
+            [ord_func(i) for i in data], [0, 0, 0, 0],
             'Server did not think security was successful.')
         # Say to leave the desktop as shared as part of client initialization
         self._websocket.send_frame(six.int2byte(1))
@@ -121,12 +131,12 @@
 
     def _validate_websocket_upgrade(self):
         self.assertTrue(
-            self._websocket.response.startswith('HTTP/1.1 101 Switching '
-                                                'Protocols\r\n'),
+            self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
+                                                b'Protocols\r\n'),
             'Did not get the expected 101 on the websockify call: '
-            + str(len(self._websocket.response)))
+            + six.text_type(self._websocket.response))
         self.assertTrue(
-            self._websocket.response.find('Server: WebSockify') > 0,
+            self._websocket.response.find(b'Server: WebSockify') > 0,
             'Did not get the expected WebSocket HTTP Response.')
 
     def _create_websocket(self, url):
@@ -187,8 +197,8 @@
             # frames less than 125 bytes here (for the negotiation) and
             # that only the 2nd byte contains the length, and since the
             # server doesn't do masking, we can just read the data length
-            if ord(header[1]) & 127 > 0:
-                return self._socket.recv(ord(header[1]) & 127)
+            if ord_func(header[1]) & 127 > 0:
+                return self._socket.recv(ord_func(header[1]) & 127)
 
     def send_frame(self, data):
         """Wrapper for sending data to add in the WebSocket frame format."""
@@ -205,7 +215,7 @@
             frame_bytes.append(mask[i])
         # Mask each of the actual data bytes that we are going to send
         for i in range(len(data)):
-            frame_bytes.append(ord(data[i]) ^ mask[i % 4])
+            frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
         # Convert our integer list to a binary array of bytes
         frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
         self._socket.sendall(frame_bytes)
@@ -233,9 +243,9 @@
         # We are choosing to use binary even though browser may do Base64
         reqdata += 'Sec-WebSocket-Protocol: binary\r\n\r\n'
         # Send the HTTP GET request and get the response back
-        self._socket.sendall(reqdata)
+        self._socket.sendall(reqdata.encode('utf8'))
         self.response = data = self._socket.recv(4096)
         # Loop through & concatenate all of the data in the response body
-        while len(data) > 0 and self.response.find('\r\n\r\n') < 0:
+        while len(data) > 0 and self.response.find(b'\r\n\r\n') < 0:
             data = self._socket.recv(4096)
             self.response += data
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 957d24a..90b9da4 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -97,7 +97,7 @@
         max_file_limit = limits['absolute']['maxPersonality']
         if max_file_limit == -1:
             raise self.skipException("No limit for personality files")
-        for i in range(0, int(max_file_limit) + 1):
+        for i in range(0, max_file_limit + 1):
             path = 'etc/test' + str(i) + '.txt'
             personality.append({'path': path,
                                 'contents': base64.encode_as_text(
@@ -117,7 +117,7 @@
         if max_file_limit == -1:
             raise self.skipException("No limit for personality files")
         person = []
-        for i in range(0, int(max_file_limit)):
+        for i in range(0, max_file_limit):
             # NOTE(andreaf) The cirros disk image is blank before boot
             # so we can only inject safely to /
             path = '/test' + str(i) + '.txt'
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index f74a5a4..209ab38 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -50,9 +50,9 @@
 
         # Security group creation
         cls.sg_name = data_utils.rand_name('sg')
-        cls.sg_desc = data_utils.rand_name('sg-desc')
+        sg_desc = data_utils.rand_name('sg-desc')
         cls.sg = cls.security_groups_client.create_security_group(
-            name=cls.sg_name, description=cls.sg_desc)['security_group']
+            name=cls.sg_name, description=sg_desc)['security_group']
         cls.sg_id = cls.sg['id']
 
         cls.password = data_utils.rand_password()
diff --git a/tempest/api/compute/test_live_block_migration_negative.py b/tempest/api/compute/test_live_block_migration_negative.py
index 40d0746..01fd9ef 100644
--- a/tempest/api/compute/test_live_block_migration_negative.py
+++ b/tempest/api/compute/test_live_block_migration_negative.py
@@ -31,11 +31,6 @@
         if not CONF.compute_feature_enabled.live_migration:
             raise cls.skipException("Live migration is not enabled")
 
-    @classmethod
-    def setup_clients(cls):
-        super(LiveBlockMigrationNegativeTestJSON, cls).setup_clients()
-        cls.admin_servers_client = cls.os_adm.servers_client
-
     def _migrate_server_to(self, server_id, dest_host):
         bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
         self.admin_servers_client.live_migrate_server(
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index cbe7178..5304944 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -64,8 +64,8 @@
     def _detach_volume(self, server_id, volume_id):
         try:
             self.servers_client.detach_volume(server_id, volume_id)
-            waiters.wait_for_volume_status(self.volumes_client,
-                                           volume_id, 'available')
+            waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                    volume_id, 'available')
         except lib_exc.NotFound:
             LOG.warning("Unable to detach volume %s from server %s "
                         "possibly it was already detached", volume_id,
@@ -78,8 +78,8 @@
             kwargs.update({'device': '/dev/%s' % device})
         attachment = self.servers_client.attach_volume(
             server_id, **kwargs)['volumeAttachment']
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume_id, 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume_id, 'in-use')
         self.addCleanup(self._detach_volume, server_id,
                         volume_id)
 
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index 3d5d23b..4b06867 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -54,9 +54,9 @@
             display_name=s_name)['snapshot']
 
         def delete_snapshot(snapshot_id):
-            waiters.wait_for_snapshot_status(self.snapshots_client,
-                                             snapshot_id,
-                                             'available')
+            waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                    snapshot_id,
+                                                    'available')
             # Delete snapshot
             self.snapshots_client.delete_snapshot(snapshot_id)
             self.snapshots_client.wait_for_resource_deletion(snapshot_id)
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 63c247e..0eaa359 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -57,7 +57,8 @@
         self.assertIsNotNone(volume['id'],
                              "Field volume id is empty or not found.")
         # Wait for Volume status to become ACTIVE
-        waiters.wait_for_volume_status(self.client, volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client, volume['id'],
+                                                'available')
         # GET Volume
         fetched_volume = self.client.show_volume(volume['id'])['volume']
         # Verification of details of fetched Volume
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 56b3517..2812c68 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -164,7 +164,6 @@
         cls.client.store_image_file(image['id'], data=image_file)
         # Keep the data of one test image so it can be used to filter lists
         cls.test_data = image
-        cls.test_data['size'] = size
 
         return image['id']
 
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 435e672..2c639da 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -39,6 +39,7 @@
             msg = "quotas extension not enabled."
             raise cls.skipException(msg)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
     def test_network_quota_exceeding(self):
         # Set the network quota to two
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index 2174940..52b3978 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -125,6 +125,7 @@
 
     @decorators.idempotent_id('b277257f-113c-4499-b8d1-5fead79f7360')
     @test.requires_ext(extension='formpost', service='object')
+    @test.attr(type=['negative'])
     def test_post_object_using_form_invalid_signature(self):
         self.key = "Wrong"
         body, content_type = self.get_multipart_form()
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index f68f19a..c3e904a 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -34,7 +34,7 @@
         super(VolumeMultiBackendV2Test, cls).resource_setup()
 
         # read backend name from a list .
-        cls.backend_names = set(CONF.volume.backend_names)
+        backend_names = set(CONF.volume.backend_names)
 
         cls.name_field = cls.special_fields['name_field']
         cls.volume_id_list_with_prefix = []
@@ -42,10 +42,10 @@
 
         # Volume/Type creation (uses volume_backend_name)
         # It is not allowed to create the same backend name twice
-        if len(cls.backend_names) < 2:
+        if len(backend_names) < 2:
             raise cls.skipException("Requires at least two different "
                                     "backend names")
-        for backend_name in cls.backend_names:
+        for backend_name in backend_names:
             # Volume/Type creation (uses backend_name)
             cls._create_type_and_volume(backend_name, False)
             # Volume/Type creation (uses capabilities:volume_backend_name)
@@ -74,8 +74,8 @@
         else:
             cls.volume_id_list_without_prefix.append(
                 cls.volume['id'])
-        waiters.wait_for_volume_status(cls.admin_volume_client,
-                                       cls.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.admin_volume_client,
+                                                cls.volume['id'], 'available')
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 5a83ae3..83fca45 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -114,7 +114,7 @@
         volume_default = quota_set_default['volumes']
 
         self.admin_quotas_client.update_quota_set(
-            project_id, volumes=(int(volume_default) + 5))
+            project_id, volumes=(volume_default + 5))
 
         self.admin_quotas_client.delete_quota_set(project_id)
         quota_set_new = (self.admin_quotas_client.show_quota_set(project_id)
@@ -146,7 +146,7 @@
             transfer_id, auth_key=auth_key)['transfer']
 
         # Verify volume transferred is available
-        waiters.wait_for_volume_status(
+        waiters.wait_for_volume_resource_status(
             self.alt_client, volume['id'], 'available')
 
         # List of tenants quota usage post transfer
diff --git a/tempest/api/volume/admin/test_volume_retype_with_migration.py b/tempest/api/volume/admin/test_volume_retype_with_migration.py
index dc509de..4d32fdd 100644
--- a/tempest/api/volume/admin/test_volume_retype_with_migration.py
+++ b/tempest/api/volume/admin/test_volume_retype_with_migration.py
@@ -40,16 +40,16 @@
     def resource_setup(cls):
         super(VolumeRetypeWithMigrationV2Test, cls).resource_setup()
         # read backend name from a list.
-        cls.backend_src = CONF.volume.backend_names[0]
-        cls.backend_dst = CONF.volume.backend_names[1]
+        backend_src = CONF.volume.backend_names[0]
+        backend_dst = CONF.volume.backend_names[1]
 
-        extra_specs_src = {"volume_backend_name": cls.backend_src}
-        extra_specs_dst = {"volume_backend_name": cls.backend_dst}
+        extra_specs_src = {"volume_backend_name": backend_src}
+        extra_specs_dst = {"volume_backend_name": backend_dst}
 
-        cls.src_vol_type = cls.create_volume_type(extra_specs=extra_specs_src)
+        src_vol_type = cls.create_volume_type(extra_specs=extra_specs_src)
         cls.dst_vol_type = cls.create_volume_type(extra_specs=extra_specs_dst)
 
-        cls.src_vol = cls.create_volume(volume_type=cls.src_vol_type['name'])
+        cls.src_vol = cls.create_volume(volume_type=src_vol_type['name'])
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 7938604..5d08416 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -36,7 +36,7 @@
         # Create/update/get/delete volume with volume_type and extra spec.
         volume_types = list()
         vol_name = data_utils.rand_name(self.__class__.__name__ + '-volume')
-        self.name_field = self.special_fields['name_field']
+        name_field = self.special_fields['name_field']
         proto = CONF.volume.storage_protocol
         vendor = CONF.volume.vendor_name
         extra_specs = {"storage_protocol": proto,
@@ -46,26 +46,26 @@
             vol_type = self.create_volume_type(
                 extra_specs=extra_specs)
             volume_types.append(vol_type)
-        params = {self.name_field: vol_name,
+        params = {name_field: vol_name,
                   'volume_type': volume_types[0]['id'],
                   'size': CONF.volume.volume_size}
 
         # Create volume
         volume = self.create_volume(**params)
         self.assertEqual(volume_types[0]['name'], volume["volume_type"])
-        self.assertEqual(volume[self.name_field], vol_name,
+        self.assertEqual(volume[name_field], vol_name,
                          "The created volume name is not equal "
                          "to the requested name")
         self.assertIsNotNone(volume['id'],
                              "Field volume id is empty or not found.")
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Update volume with new volume_type
         self.volumes_client.retype_volume(volume['id'],
                                           new_type=volume_types[1]['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Get volume details and Verify
         fetched_volume = self.volumes_client.show_volume(
@@ -74,7 +74,7 @@
                          fetched_volume['volume_type'],
                          'The fetched Volume type is different '
                          'from updated volume type')
-        self.assertEqual(vol_name, fetched_volume[self.name_field],
+        self.assertEqual(vol_name, fetched_volume[name_field],
                          'The fetched Volume is different '
                          'from the created Volume')
         self.assertEqual(volume['id'], fetched_volume['id'],
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 933b6ad..5f590bc 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -17,6 +17,7 @@
 from tempest.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
+from tempest import test
 
 
 class ExtraSpecsNegativeV2Test(base.BaseVolumeAdminTest):
@@ -24,9 +25,10 @@
     @classmethod
     def resource_setup(cls):
         super(ExtraSpecsNegativeV2Test, cls).resource_setup()
-        cls.extra_specs = {"spec1": "val1"}
-        cls.volume_type = cls.create_volume_type(extra_specs=cls.extra_specs)
+        extra_specs = {"spec1": "val1"}
+        cls.volume_type = cls.create_volume_type(extra_specs=extra_specs)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('08961d20-5cbb-4910-ac0f-89ad6dbb2da1')
     def test_update_no_body(self):
         # Should not update volume type extra specs with no body
@@ -35,6 +37,7 @@
             self.admin_volume_types_client.update_volume_type_extra_specs,
             self.volume_type['id'], "spec1", None)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('25e5a0ee-89b3-4c53-8310-236f76c75365')
     def test_update_nonexistent_extra_spec_id(self):
         # Should not update volume type extra specs with nonexistent id.
@@ -45,6 +48,7 @@
             self.volume_type['id'], data_utils.rand_uuid(),
             extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('9bf7a657-b011-4aec-866d-81c496fbe5c8')
     def test_update_none_extra_spec_id(self):
         # Should not update volume type extra specs with none id.
@@ -54,6 +58,7 @@
             self.admin_volume_types_client.update_volume_type_extra_specs,
             self.volume_type['id'], None, extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('a77dfda2-9100-448e-9076-ed1711f4bdfc')
     def test_update_multiple_extra_spec(self):
         # Should not update volume type extra specs with multiple specs as
@@ -65,6 +70,7 @@
             self.volume_type['id'], list(extra_spec)[0],
             extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
     def test_create_nonexistent_type_id(self):
         # Should not create volume type extra spec for nonexistent volume
@@ -75,6 +81,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             data_utils.rand_uuid(), extra_specs)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('c821bdc8-43a4-4bf4-86c8-82f3858d5f7d')
     def test_create_none_body(self):
         # Should not create volume type extra spec for none POST body.
@@ -83,6 +90,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             self.volume_type['id'], None)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('bc772c71-1ed4-4716-b945-8b5ed0f15e87')
     def test_create_invalid_body(self):
         # Should not create volume type extra spec for invalid POST body.
@@ -91,6 +99,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             self.volume_type['id'], extra_specs=['invalid'])
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
     def test_delete_nonexistent_volume_type_id(self):
         # Should not delete volume type extra spec for nonexistent
@@ -100,6 +109,7 @@
             self.admin_volume_types_client.delete_volume_type_extra_specs,
             data_utils.rand_uuid(), "spec1")
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('dee5cf0c-cdd6-4353-b70c-e847050d71fb')
     def test_list_nonexistent_volume_type_id(self):
         # Should not list volume type extra spec for nonexistent type id.
@@ -108,6 +118,7 @@
             self.admin_volume_types_client.list_volume_types_extra_specs,
             data_utils.rand_uuid())
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('9f402cbd-1838-4eb4-9554-126a6b1908c9')
     def test_get_nonexistent_volume_type_id(self):
         # Should not get volume type extra spec for nonexistent type id.
@@ -116,6 +127,7 @@
             self.admin_volume_types_client.show_volume_type_extra_specs,
             data_utils.rand_uuid(), "spec1")
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('c881797d-12ff-4f1a-b09d-9f6212159753')
     def test_get_nonexistent_extra_spec_id(self):
         # Should not get volume type extra spec for nonexistent extra spec
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index b278127..69e9cc0 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -17,19 +17,22 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
+from tempest import test
 
 
 class VolumeTypesNegativeV2Test(base.BaseVolumeAdminTest):
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('b48c98f2-e662-4885-9b71-032256906314')
     def test_create_with_nonexistent_volume_type(self):
         # Should not be able to create volume with nonexistent volume_type.
-        self.name_field = self.special_fields['name_field']
-        params = {self.name_field: data_utils.rand_uuid(),
+        name_field = self.special_fields['name_field']
+        params = {name_field: data_utils.rand_uuid(),
                   'volume_type': data_utils.rand_uuid()}
         self.assertRaises(lib_exc.NotFound,
                           self.volumes_client.create_volume, **params)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('878b4e57-faa2-4659-b0d1-ce740a06ae81')
     def test_create_with_empty_name(self):
         # Should not be able to create volume type with an empty name.
@@ -37,6 +40,7 @@
             lib_exc.BadRequest,
             self.admin_volume_types_client.create_volume_type, name='')
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('994610d6-0476-4018-a644-a2602ef5d4aa')
     def test_get_nonexistent_type_id(self):
         # Should not be able to get volume type with nonexistent type id.
@@ -44,6 +48,7 @@
                           self.admin_volume_types_client.show_volume_type,
                           data_utils.rand_uuid())
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('6b3926d2-7d73-4896-bc3d-e42dfd11a9f6')
     def test_delete_nonexistent_type_id(self):
         # Should not be able to delete volume type with nonexistent type id.
@@ -51,6 +56,7 @@
                           self.admin_volume_types_client.delete_volume_type,
                           data_utils.rand_uuid())
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('8c09f849-f225-4d78-ba87-bffd9a5e0c6f')
     def test_create_volume_with_private_volume_type(self):
         # Should not be able to create volume with private volume type.
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 04d27ea..13b7384 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -94,8 +94,9 @@
         self.addCleanup(self._delete_backup, new_id)
         self.assertIn("id", import_backup)
         self.assertEqual(new_id, import_backup['id'])
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       import_backup['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                import_backup['id'],
+                                                'available')
 
         # Verify Import Backup
         backups = self.admin_backups_client.list_backups(
@@ -108,14 +109,16 @@
         self.addCleanup(self.admin_volume_client.delete_volume,
                         restore['volume_id'])
         self.assertEqual(backup['id'], restore['backup_id'])
-        waiters.wait_for_volume_status(self.admin_volume_client,
-                                       restore['volume_id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_volume_client,
+                                                restore['volume_id'],
+                                                'available')
 
         # Verify if restored volume is there in volume list
         volumes = self.admin_volume_client.list_volumes()['volumes']
         self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       import_backup['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                import_backup['id'],
+                                                'available')
 
     @decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94')
     def test_volume_backup_reset_status(self):
@@ -131,8 +134,8 @@
         # Reset backup status to error
         self.admin_backups_client.reset_backup_status(backup_id=backup['id'],
                                                       status="error")
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       backup['id'], 'error')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                backup['id'], 'error')
 
 
 class VolumesBackupsAdminV1Test(VolumesBackupsAdminV2Test):
diff --git a/tempest/api/volume/admin/v2/test_snapshot_manage.py b/tempest/api/volume/admin/v2/test_snapshot_manage.py
index 1114924..eed7dd1 100644
--- a/tempest/api/volume/admin/v2/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/v2/test_snapshot_manage.py
@@ -65,9 +65,9 @@
                         self.admin_snapshots_client, new_snapshot['id'])
 
         # Wait for the snapshot to be available after manage operation
-        waiters.wait_for_snapshot_status(self.admin_snapshots_client,
-                                         new_snapshot['id'],
-                                         'available')
+        waiters.wait_for_volume_resource_status(self.admin_snapshots_client,
+                                                new_snapshot['id'],
+                                                'available')
 
         # Verify the managed snapshot has the expected parent volume
         self.assertEqual(new_snapshot['volume_id'], volume['id'])
diff --git a/tempest/api/volume/admin/v2/test_volumes_list.py b/tempest/api/volume/admin/v2/test_volumes_list.py
index b0a37fb..6bab373 100644
--- a/tempest/api/volume/admin/v2/test_volumes_list.py
+++ b/tempest/api/volume/admin/v2/test_volumes_list.py
@@ -45,8 +45,8 @@
         # Create a volume in admin tenant
         adm_vol = self.admin_volume_client.create_volume(
             size=CONF.volume.volume_size)['volume']
-        waiters.wait_for_volume_status(self.admin_volume_client,
-                                       adm_vol['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_volume_client,
+                                                adm_vol['id'], 'available')
         self.addCleanup(self.admin_volume_client.delete_volume, adm_vol['id'])
         params = {'all_tenants': 1,
                   'project_id': self.volumes_client.tenant_id}
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 98e050e..f8c435f 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -131,8 +131,8 @@
 
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
         cls.volumes.append(volume)
-        waiters.wait_for_volume_status(cls.volumes_client, volume['id'],
-                                       wait_until)
+        waiters.wait_for_volume_resource_status(cls.volumes_client,
+                                                volume['id'], wait_until)
         return volume
 
     @classmethod
@@ -146,8 +146,8 @@
         snapshot = cls.snapshots_client.create_snapshot(
             volume_id=volume_id, **kwargs)['snapshot']
         cls.snapshots.append(snapshot)
-        waiters.wait_for_snapshot_status(cls.snapshots_client,
-                                         snapshot['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.snapshots_client,
+                                                snapshot['id'], 'available')
         return snapshot
 
     def create_backup(self, volume_id, backup_client=None, **kwargs):
@@ -158,8 +158,8 @@
         backup = backup_client.create_backup(
             volume_id=volume_id, **kwargs)['backup']
         self.addCleanup(backup_client.delete_backup, backup['id'])
-        waiters.wait_for_backup_status(backup_client, backup['id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(backup_client, backup['id'],
+                                                'available')
         return backup
 
     # NOTE(afazekas): these create_* and clean_* could be defined
@@ -182,10 +182,10 @@
         self.servers_client.attach_volume(
             server_id, volumeId=volume_id,
             device='/dev/%s' % CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume_id, 'in-use')
-        self.addCleanup(waiters.wait_for_volume_status, self.volumes_client,
-                        volume_id, 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume_id, 'in-use')
+        self.addCleanup(waiters.wait_for_volume_resource_status,
+                        self.volumes_client, volume_id, 'available')
         self.addCleanup(self.servers_client.detach_volume, server_id,
                         volume_id)
 
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 5477770..9f63b14 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -43,8 +43,8 @@
             volume_id=volume['id'])['transfer']
         transfer_id = transfer['id']
         auth_key = transfer['auth_key']
-        waiters.wait_for_volume_status(self.client,
-                                       volume['id'], 'awaiting-transfer')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'awaiting-transfer')
 
         # Get a volume transfer
         body = self.client.show_volume_transfer(transfer_id)['transfer']
@@ -58,8 +58,8 @@
         # Accept a volume transfer by alt_tenant
         body = self.alt_client.accept_volume_transfer(
             transfer_id, auth_key=auth_key)['transfer']
-        waiters.wait_for_volume_status(self.alt_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.alt_client,
+                                                volume['id'], 'available')
 
     @decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
     def test_create_list_delete_volume_transfer(self):
@@ -71,8 +71,8 @@
         body = self.client.create_volume_transfer(
             volume_id=volume['id'])['transfer']
         transfer_id = body['id']
-        waiters.wait_for_volume_status(self.client,
-                                       volume['id'], 'awaiting-transfer')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'awaiting-transfer')
 
         # List all volume transfers (looking for the one we created)
         body = self.client.list_volume_transfers()['transfers']
@@ -84,7 +84,8 @@
 
         # Delete a volume transfer
         self.client.delete_volume_transfer(transfer_id)
-        waiters.wait_for_volume_status(self.client, volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'available')
 
 
 class VolumesV1TransfersTest(VolumesV2TransfersTest):
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index c0cc74d..0a6901c 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -60,11 +60,11 @@
                                   instance_uuid=server['id'],
                                   mountpoint='/dev/%s' %
                                              CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'in-use')
         self.client.detach_volume(self.volume['id'])
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'available')
 
     @decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
     def test_volume_bootable(self):
@@ -91,11 +91,10 @@
                                   instance_uuid=server['id'],
                                   mountpoint='/dev/%s' %
                                              CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'in-use')
-        self.addCleanup(waiters.wait_for_volume_status, self.client,
-                        self.volume['id'],
-                        'available')
+        waiters.wait_for_volume_resource_status(self.client, self.volume['id'],
+                                                'in-use')
+        self.addCleanup(waiters.wait_for_volume_resource_status, self.client,
+                        self.volume['id'], 'available')
         self.addCleanup(self.client.detach_volume, self.volume['id'])
         volume = self.client.show_volume(self.volume['id'])['volume']
         self.assertIn('attachments', volume)
@@ -124,8 +123,8 @@
                         self.image_client.delete_image,
                         image_id)
         waiters.wait_for_image_status(self.image_client, image_id, 'active')
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'available')
 
     @decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
     def test_reserve_unreserve_volume(self):
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 939f1ac..e664ff7 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -40,11 +40,11 @@
         self.addCleanup(self.volumes_client.delete_volume,
                         restored_volume['volume_id'])
         self.assertEqual(backup_id, restored_volume['backup_id'])
-        waiters.wait_for_backup_status(self.backups_client,
-                                       backup_id, 'available')
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       restored_volume['volume_id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(self.backups_client,
+                                                backup_id, 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                restored_volume['volume_id'],
+                                                'available')
         return restored_volume
 
     @decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
@@ -60,8 +60,8 @@
                                     name=backup_name,
                                     description=description)
         self.assertEqual(backup_name, backup['name'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Get a given backup
         backup = self.backups_client.show_backup(backup['id'])['backup']
diff --git a/tempest/api/volume/test_volumes_clone.py b/tempest/api/volume/test_volumes_clone.py
index 79a1a0a..d653808 100644
--- a/tempest/api/volume/test_volumes_clone.py
+++ b/tempest/api/volume/test_volumes_clone.py
@@ -43,7 +43,7 @@
         volume = self.volumes_client.show_volume(dst_vol['id'])['volume']
         # Should allow
         self.assertEqual(volume['source_volid'], src_vol['id'])
-        self.assertEqual(int(volume['size']), src_size + 1)
+        self.assertEqual(volume['size'], src_size + 1)
 
     @decorators.idempotent_id('cbbcd7c6-5a6c-481a-97ac-ca55ab715d16')
     def test_create_from_bootable_volume(self):
diff --git a/tempest/api/volume/test_volumes_clone_negative.py b/tempest/api/volume/test_volumes_clone_negative.py
index fa827cd..5331243 100644
--- a/tempest/api/volume/test_volumes_clone_negative.py
+++ b/tempest/api/volume/test_volumes_clone_negative.py
@@ -17,6 +17,7 @@
 from tempest import config
 from tempest.lib import decorators
 from tempest.lib import exceptions
+from tempest import test
 
 CONF = config.CONF
 
@@ -29,6 +30,7 @@
         if not CONF.volume_feature_enabled.clone:
             raise cls.skipException("Cinder volume clones are disabled")
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('9adae371-a257-43a5-459a-dc7c88e66e0e')
     def test_create_from_volume_decreasing_size(self):
         # Creates a volume from another volume passing a size different from
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 2e1851e..3df9b00 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -24,13 +24,13 @@
     def test_volume_extend(self):
         # Extend Volume Test.
         volume = self.create_volume()
-        extend_size = int(volume['size']) + 1
+        extend_size = volume['size'] + 1
         self.volumes_client.extend_volume(volume['id'],
                                           new_size=extend_size)
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         volume = self.volumes_client.show_volume(volume['id'])['volume']
-        self.assertEqual(int(volume['size']), extend_size)
+        self.assertEqual(volume['size'], extend_size)
 
 
 class VolumesV1ExtendTest(VolumesV2ExtendTest):
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index d1a1c2f..a3e46a8 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -41,8 +41,8 @@
         volume = self.volumes_client.create_volume(**kwargs)['volume']
         self.assertIn('id', volume)
         self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client, volume['id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         self.assertIn(name_field, volume)
         self.assertEqual(volume[name_field], v_name,
                          "The created volume name is not equal "
@@ -106,8 +106,8 @@
         self.assertIn('id', new_volume)
         self.addCleanup(self.delete_volume, self.volumes_client,
                         new_volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       new_volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                new_volume['id'], 'available')
 
         params = {name_field: volume[name_field],
                   descrip_field: volume[descrip_field]}
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 0a095a9..28e65ed 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -221,7 +221,7 @@
     @decorators.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
     def test_volume_extend_with_nonexistent_volume_id(self):
         # Extend volume size when volume is nonexistent.
-        extend_size = int(self.volume['size']) + 1
+        extend_size = self.volume['size'] + 1
         self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
                           data_utils.rand_uuid(), new_size=extend_size)
 
@@ -229,7 +229,7 @@
     @decorators.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
     def test_volume_extend_without_passing_volume_id(self):
         # Extend volume size when passing volume id is None.
-        extend_size = int(self.volume['size']) + 1
+        extend_size = self.volume['size'] + 1
         self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
                           None, new_size=extend_size)
 
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index f1ca722..9f4ce95 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -153,7 +153,7 @@
         volume = self.volumes_client.show_volume(dst_vol['id'])['volume']
         # Should allow
         self.assertEqual(volume['snapshot_id'], src_snap['id'])
-        self.assertEqual(int(volume['size']), src_size + 1)
+        self.assertEqual(volume['size'], src_size + 1)
 
 
 class VolumesV1SnapshotTestJSON(VolumesV2SnapshotTestJSON):
diff --git a/tempest/api/volume/test_volumes_snapshots_list.py b/tempest/api/volume/test_volumes_snapshots_list.py
index ff390ea..a0eaa00 100644
--- a/tempest/api/volume/test_volumes_snapshots_list.py
+++ b/tempest/api/volume/test_volumes_snapshots_list.py
@@ -28,11 +28,11 @@
     @classmethod
     def resource_setup(cls):
         super(VolumesV2SnapshotListTestJSON, cls).resource_setup()
-        cls.volume_origin = cls.create_volume()
+        volume_origin = cls.create_volume()
         cls.name_field = cls.special_fields['name_field']
         # Create snapshots with params
         for _ in range(2):
-            cls.snapshot = cls.create_snapshot(cls.volume_origin['id'])
+            cls.snapshot = cls.create_snapshot(volume_origin['id'])
 
     def _list_by_param_values_and_assert(self, with_detail=False, **params):
         """list or list_details with given params and validates result."""
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 7d77ee7..9e44379 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -47,6 +47,7 @@
                           self.snapshots_client.create_snapshot,
                           volume_id=None, display_name=s_name)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('677863d1-34f9-456d-b6ac-9924f667a7f4')
     def test_volume_from_snapshot_decreasing_size(self):
         # Creates a volume a snapshot passing a size different from the source
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 4776545..8b51e64 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -37,13 +37,13 @@
         super(VolumesV2ListTestJSON, cls).resource_setup()
 
         # Create 3 test volumes
-        cls.metadata = {'Type': 'work'}
+        metadata = {'Type': 'work'}
         # NOTE(zhufl): When using pre-provisioned credentials, the project
         # may have volumes other than those created below.
         existing_volumes = cls.volumes_client.list_volumes()['volumes']
         cls.volume_id_list = [vol['id'] for vol in existing_volumes]
         for _ in range(3):
-            volume = cls.create_volume(metadata=cls.metadata)
+            volume = cls.create_volume(metadata=metadata)
             cls.volume_id_list.append(volume['id'])
 
     @decorators.idempotent_id('2a7064eb-b9c3-429b-b888-33928fc5edd3')
diff --git a/tempest/api/volume/v2/test_volumes_snapshots_list.py b/tempest/api/volume/v2/test_volumes_snapshots_list.py
index e2f11e8..d385f65 100644
--- a/tempest/api/volume/v2/test_volumes_snapshots_list.py
+++ b/tempest/api/volume/v2/test_volumes_snapshots_list.py
@@ -33,10 +33,10 @@
         super(VolumesV2SnapshotListTestJSON, cls).resource_setup()
         cls.snapshot_id_list = []
         # Create a volume
-        cls.volume_origin = cls.create_volume()
+        volume_origin = cls.create_volume()
         # Create 3 snapshots
         for _ in range(3):
-            snapshot = cls.create_snapshot(cls.volume_origin['id'])
+            snapshot = cls.create_snapshot(volume_origin['id'])
             cls.snapshot_id_list.append(snapshot['id'])
 
     def _list_snapshots_param_sort(self, sort_key, sort_dir):
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 55bc93e..99da983 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -124,8 +124,9 @@
                   'imageRef': image_id,
                   'size': CONF.volume.volume_size}
         volume = volumes_client.create_volume(**params)
-        waiters.wait_for_volume_status(volumes_client,
-                                       volume['volume']['id'], 'available')
+        waiters.wait_for_volume_resource_status(volumes_client,
+                                                volume['volume']['id'],
+                                                'available')
 
         bd_map_v2 = [{
             'uuid': volume['volume']['id'],
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 15619f4..3e5600c 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -10,7 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
+import re
 import time
 
 from oslo_log import log as logging
@@ -179,25 +179,33 @@
     raise lib_exc.TimeoutException(message)
 
 
-def wait_for_volume_status(client, volume_id, status):
-    """Waits for a Volume to reach a given status."""
-    body = client.show_volume(volume_id)['volume']
-    volume_status = body['status']
+def wait_for_volume_resource_status(client, resource_id, status):
+    """Waits for a volume resource to reach a given status.
+
+    This function is a common function for volume, snapshot and backup
+    resources. The function extracts the name of the desired resource from
+    the client class name of the resource.
+    """
+    resource_name = re.findall(r'(Volume|Snapshot|Backup)',
+                               client.__class__.__name__)[0].lower()
+    show_resource = getattr(client, 'show_' + resource_name)
+    resource_status = show_resource(resource_id)[resource_name]['status']
     start = int(time.time())
 
-    while volume_status != status:
+    while resource_status != status:
         time.sleep(client.build_interval)
-        body = client.show_volume(volume_id)['volume']
-        volume_status = body['status']
-        if volume_status == 'error' and status != 'error':
-            raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
-        if volume_status == 'error_restoring':
-            raise exceptions.VolumeRestoreErrorException(volume_id=volume_id)
+        resource_status = show_resource(resource_id)[
+            '{}'.format(resource_name)]['status']
+        if resource_status == 'error' and resource_status != status:
+            raise exceptions.VolumeResourceBuildErrorException(
+                resource_name=resource_name, resource_id=resource_id)
+        if resource_name == 'volume' and resource_status == 'error_restoring':
+            raise exceptions.VolumeRestoreErrorException(volume_id=resource_id)
 
         if int(time.time()) - start >= client.build_timeout:
-            message = ('Volume %s failed to reach %s status (current %s) '
+            message = ('%s %s failed to reach %s status (current %s) '
                        'within the required time (%s s).' %
-                       (volume_id, status, volume_status,
+                       (resource_name, resource_id, status, resource_status,
                         client.build_timeout))
             raise lib_exc.TimeoutException(message)
 
@@ -221,48 +229,6 @@
             raise lib_exc.TimeoutException(message)
 
 
-def wait_for_snapshot_status(client, snapshot_id, status):
-    """Waits for a Snapshot to reach a given status."""
-    body = client.show_snapshot(snapshot_id)['snapshot']
-    snapshot_status = body['status']
-    start = int(time.time())
-
-    while snapshot_status != status:
-        time.sleep(client.build_interval)
-        body = client.show_snapshot(snapshot_id)['snapshot']
-        snapshot_status = body['status']
-        if snapshot_status == 'error':
-            raise exceptions.SnapshotBuildErrorException(
-                snapshot_id=snapshot_id)
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Snapshot %s failed to reach %s status (current %s) '
-                       'within the required time (%s s).' %
-                       (snapshot_id, status, snapshot_status,
-                        client.build_timeout))
-            raise lib_exc.TimeoutException(message)
-
-
-def wait_for_backup_status(client, backup_id, status):
-    """Waits for a Backup to reach a given status."""
-    body = client.show_backup(backup_id)['backup']
-    backup_status = body['status']
-    start = int(time.time())
-
-    while backup_status != status:
-        time.sleep(client.build_interval)
-        body = client.show_backup(backup_id)['backup']
-        backup_status = body['status']
-        if backup_status == 'error' and backup_status != status:
-            raise lib_exc.VolumeBackupException(backup_id=backup_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Volume backup %s failed to reach %s status '
-                       '(current %s) within the required time (%s s).' %
-                       (backup_id, status, backup_status,
-                        client.build_timeout))
-            raise lib_exc.TimeoutException(message)
-
-
 def wait_for_qos_operations(client, qos_id, operation, args=None):
     """Waits for a qos operations to be completed.
 
diff --git a/tempest/config.py b/tempest/config.py
index bd19967..b4d88c5 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -15,14 +15,12 @@
 
 from __future__ import print_function
 
-import functools
 import os
 import tempfile
 
 from oslo_concurrency import lockutils
 from oslo_config import cfg
 from oslo_log import log as logging
-import testtools
 
 from tempest.lib import exceptions
 from tempest.lib.services import clients
@@ -1191,75 +1189,6 @@
 CONF = TempestConfigProxy()
 
 
-def skip_unless_config(*args):
-    """Decorator to raise a skip if a config opt doesn't exist or is False
-
-    :param str group: The first arg, the option group to check
-    :param str name: The second arg, the option name to check
-    :param str msg: Optional third arg, the skip msg to use if a skip is raised
-    :raises testtools.TestCaseskipException: If the specified config option
-        doesn't exist or it exists and evaluates to False
-    """
-    def decorator(f):
-        group = args[0]
-        name = args[1]
-
-        @functools.wraps(f)
-        def wrapper(self, *func_args, **func_kwargs):
-            if not hasattr(CONF, group):
-                msg = "Config group %s doesn't exist" % group
-                raise testtools.TestCase.skipException(msg)
-
-            conf_group = getattr(CONF, group)
-            if not hasattr(conf_group, name):
-                msg = "Config option %s.%s doesn't exist" % (group,
-                                                             name)
-                raise testtools.TestCase.skipException(msg)
-
-            value = getattr(conf_group, name)
-            if not value:
-                if len(args) == 3:
-                    msg = args[2]
-                else:
-                    msg = "Config option %s.%s is false" % (group,
-                                                            name)
-                raise testtools.TestCase.skipException(msg)
-            return f(self, *func_args, **func_kwargs)
-        return wrapper
-    return decorator
-
-
-def skip_if_config(*args):
-    """Raise a skipException if a config exists and is True
-
-    :param str group: The first arg, the option group to check
-    :param str name: The second arg, the option name to check
-    :param str msg: Optional third arg, the skip msg to use if a skip is raised
-    :raises testtools.TestCase.skipException: If the specified config option
-        exists and evaluates to True
-    """
-    def decorator(f):
-        group = args[0]
-        name = args[1]
-
-        @functools.wraps(f)
-        def wrapper(self, *func_args, **func_kwargs):
-            if hasattr(CONF, group):
-                conf_group = getattr(CONF, group)
-                if hasattr(conf_group, name):
-                    value = getattr(conf_group, name)
-                    if value:
-                        if len(args) == 3:
-                            msg = args[2]
-                        else:
-                            msg = "Config option %s.%s is false" % (group,
-                                                                    name)
-                        raise testtools.TestCase.skipException(msg)
-            return f(self, *func_args, **func_kwargs)
-        return wrapper
-    return decorator
-
-
 def service_client_config(service_client_name=None):
     """Return a dict with the parameters to init service clients
 
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 45bbc11..f48d7ac 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -37,18 +37,15 @@
     message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
 
 
-class VolumeBuildErrorException(exceptions.TempestException):
-    message = "Volume %(volume_id)s failed to build and is in ERROR status"
+class VolumeResourceBuildErrorException(exceptions.TempestException):
+    message = ("%(resource_name)s %(resource_id)s failed to build and is in "
+               "ERROR status")
 
 
 class VolumeRestoreErrorException(exceptions.TempestException):
     message = "Volume %(volume_id)s failed to restore and is in ERROR status"
 
 
-class SnapshotBuildErrorException(exceptions.TempestException):
-    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
-
-
 class StackBuildErrorException(exceptions.TempestException):
     message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
                "due to '%(stack_status_reason)s'")
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index f16ef88..adff244 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -19,6 +19,8 @@
 from oslo_serialization import jsonutils as json
 from six.moves.urllib import parse as urllib
 
+from tempest.lib.api_schema.response.compute.v2_1 import \
+    security_groups as security_groups_schema
 from tempest.lib.api_schema.response.compute.v2_1 import servers as schema
 from tempest.lib.api_schema.response.compute.v2_16 import servers as schemav216
 from tempest.lib.api_schema.response.compute.v2_19 import servers as schemav219
@@ -717,3 +719,16 @@
         http://developer.openstack.org/api-ref-compute-v2.1.html#removeFixedIp
         """
         return self.action(server_id, 'removeFixedIp', **kwargs)
+
+    def list_security_groups_by_server(self, server_id):
+        """Lists security groups for a server.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        http://developer.openstack.org/api-ref-compute-v2.1.html#listSecurityGroupsByServer
+        """
+        resp, body = self.get("servers/%s/os-security-groups" % server_id)
+        body = json.loads(body)
+        self.validate_response(security_groups_schema.list_security_groups,
+                               resp, body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 6014c8c..e670216 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -241,8 +241,8 @@
             self.assertEqual(name, volume['display_name'])
         else:
             self.assertEqual(name, volume['name'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         # The volume retrieved on creation has a non-up-to-date status.
         # Retrieval after it becomes active ensures correct details.
         volume = self.volumes_client.show_volume(volume['id'])['volume']
@@ -481,8 +481,9 @@
                 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                                 self.snapshots_client.delete_snapshot,
                                 snapshot_id)
-                waiters.wait_for_snapshot_status(self.snapshots_client,
-                                                 snapshot_id, 'available')
+                waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                        snapshot_id,
+                                                        'available')
         image_name = snapshot_image['name']
         self.assertEqual(name, image_name)
         LOG.debug("Created snapshot image %s for server %s",
@@ -494,16 +495,16 @@
             server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
             % CONF.compute.volume_device_name)['volumeAttachment']
         self.assertEqual(volume_to_attach['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'in-use')
 
         # Return the updated volume after the attachment
         return self.volumes_client.show_volume(volume['id'])['volume']
 
     def nova_volume_detach(self, server, volume):
         self.servers_client.detach_volume(server['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         volume = self.volumes_client.show_volume(volume['id'])['volume']
         self.assertEqual('available', volume['status'])
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f8e7742..1196659 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -39,7 +39,6 @@
     def setup_clients(cls):
         super(TestNetworkAdvancedServerOps, cls).setup_clients()
         cls.admin_servers_client = cls.os_adm.servers_client
-        cls.admin_hosts_client = cls.os_adm.hosts_client
 
     @classmethod
     def skip_checks(cls):
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 8661217..ef9664d 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -64,10 +64,10 @@
         self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                         snapshot['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
-        waiters.wait_for_snapshot_status(self.snapshots_client,
-                                         snapshot['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                snapshot['id'], 'available')
         if 'display_name' in snapshot:
             self.assertEqual(snapshot_name, snapshot['display_name'])
         else:
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 43dcf96..9c33b71 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -82,8 +82,8 @@
         self.addCleanup(
             self.snapshots_client.wait_for_resource_deletion, snap['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snap['id'])
-        waiters.wait_for_snapshot_status(self.snapshots_client,
-                                         snap['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                snap['id'], 'available')
 
         # NOTE(e0ne): Cinder API v2 uses name instead of display_name
         if 'display_name' in snap:
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index a1da343..6345728 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -48,10 +48,14 @@
 
     @mock.patch.multiple(compute_base.BaseV2ComputeTest,
                          compute_images_client=mock.DEFAULT,
+                         servers_client=mock.DEFAULT,
                          images=[], create=True)
     @mock.patch.object(waiters, 'wait_for_image_status')
+    @mock.patch.object(waiters, 'wait_for_server_status')
     def test_create_image_from_server_wait_until_active(self,
+                                                        wait_for_server_status,
                                                         wait_for_image_status,
+                                                        servers_client,
                                                         compute_images_client):
         """Tests create_image_from_server with wait_until='ACTIVE' kwarg."""
         # setup mocks
@@ -67,6 +71,35 @@
         # make our assertions
         wait_for_image_status.assert_called_once_with(
             compute_images_client, image_id, 'ACTIVE')
+        wait_for_server_status.assert_called_once_with(
+            servers_client, mock.sentinel.server_id, 'ACTIVE')
+        compute_images_client.show_image.assert_called_once_with(image_id)
+
+    @mock.patch.multiple(compute_base.BaseV2ComputeTest,
+                         compute_images_client=mock.DEFAULT,
+                         servers_client=mock.DEFAULT,
+                         images=[], create=True)
+    @mock.patch.object(waiters, 'wait_for_image_status')
+    @mock.patch.object(waiters, 'wait_for_server_status')
+    def test_create_image_from_server_wait_until_active_no_server_wait(
+            self, wait_for_server_status, wait_for_image_status,
+            servers_client, compute_images_client):
+        """Tests create_image_from_server with wait_until='ACTIVE' kwarg."""
+        # setup mocks
+        image_id = uuidutils.generate_uuid()
+        fake_image = mock.Mock(response={'location': image_id})
+        compute_images_client.create_image.return_value = fake_image
+        compute_images_client.show_image.return_value = (
+            {'image': fake_image})
+        # call the utility method
+        image = compute_base.BaseV2ComputeTest.create_image_from_server(
+            mock.sentinel.server_id, wait_until='ACTIVE',
+            wait_for_server=False)
+        self.assertEqual(fake_image, image)
+        # make our assertions
+        wait_for_image_status.assert_called_once_with(
+            compute_images_client, image_id, 'ACTIVE')
+        self.assertEqual(0, wait_for_server_status.call_count)
         compute_images_client.show_image.assert_called_once_with(image_id)
 
     @mock.patch.multiple(compute_base.BaseV2ComputeTest,
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 46f9526..c2f622c 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -66,7 +66,7 @@
         client.show_volume = mock_show
         volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
         self.assertRaises(exceptions.VolumeRestoreErrorException,
-                          waiters.wait_for_volume_status,
+                          waiters.wait_for_volume_resource_status,
                           client, volume_id, 'available')
         mock_show.assert_has_calls([mock.call(volume_id),
                                     mock.call(volume_id)])
diff --git a/tempest/tests/lib/services/compute/test_servers_client.py b/tempest/tests/lib/services/compute/test_servers_client.py
index adfaaf2..b563ab2 100644
--- a/tempest/tests/lib/services/compute/test_servers_client.py
+++ b/tempest/tests/lib/services/compute/test_servers_client.py
@@ -172,6 +172,14 @@
         "traceback": "fake-trace-back"
     }
 
+    FAKE_SECURITY_GROUPS = [{
+        "description": "default",
+        "id": "3fb26eb3-581b-4420-9963-b0879a026506",
+        "name": "default",
+        "rules": [],
+        "tenant_id": "openstack"
+    }]
+
     FAKE_INSTANCE_WITH_EVENTS = copy.deepcopy(FAKE_INSTANCE_ACTIONS)
     FAKE_INSTANCE_WITH_EVENTS['events'] = [FAKE_INSTANCE_ACTION_EVENTS]
 
@@ -1009,3 +1017,17 @@
             server_id=self.server_id,
             type='fake-console-type'
             )
+
+    def test_list_security_groups_by_server_with_str_body(self):
+        self._test_list_security_groups_by_server()
+
+    def test_list_security_groups_by_server_with_bytes_body(self):
+        self._test_list_security_groups_by_server(True)
+
+    def _test_list_security_groups_by_server(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_security_groups_by_server,
+            'tempest.lib.common.rest_client.RestClient.get',
+            {'security_groups': self.FAKE_SECURITY_GROUPS},
+            server_id=self.server_id,
+            )
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index ae2f2a3..a069a81 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -199,96 +199,3 @@
                           self._test_requires_ext_helper,
                           extension='enabled_ext',
                           service='bad_service')
-
-
-class TestConfigDecorators(BaseDecoratorsTest):
-    def setUp(self):
-        super(TestConfigDecorators, self).setUp()
-        cfg.CONF.set_default('nova', True, 'service_available')
-        cfg.CONF.set_default('glance', False, 'service_available')
-
-    def _assert_skip_message(self, func, skip_msg):
-        try:
-            func()
-            self.fail()
-        except testtools.TestCase.skipException as skip_exc:
-            self.assertEqual(skip_exc.args[0], skip_msg)
-
-    def _test_skip_unless_config(self, expected_to_skip=True, *decorator_args):
-
-        class TestFoo(test.BaseTestCase):
-            @config.skip_unless_config(*decorator_args)
-            def test_bar(self):
-                return 0
-
-        t = TestFoo('test_bar')
-        if expected_to_skip:
-            self.assertRaises(testtools.TestCase.skipException, t.test_bar)
-            if (len(decorator_args) >= 3):
-                # decorator_args[2]: skip message specified
-                self._assert_skip_message(t.test_bar, decorator_args[2])
-        else:
-            try:
-                self.assertEqual(t.test_bar(), 0)
-            except testtools.TestCase.skipException:
-                # We caught a skipException but we didn't expect to skip
-                # this test so raise a hard test failure instead.
-                raise testtools.TestCase.failureException(
-                    "Not supposed to skip")
-
-    def _test_skip_if_config(self, expected_to_skip=True,
-                             *decorator_args):
-
-        class TestFoo(test.BaseTestCase):
-            @config.skip_if_config(*decorator_args)
-            def test_bar(self):
-                return 0
-
-        t = TestFoo('test_bar')
-        if expected_to_skip:
-            self.assertRaises(testtools.TestCase.skipException, t.test_bar)
-            if (len(decorator_args) >= 3):
-                # decorator_args[2]: skip message specified
-                self._assert_skip_message(t.test_bar, decorator_args[2])
-        else:
-            try:
-                self.assertEqual(t.test_bar(), 0)
-            except testtools.TestCase.skipException:
-                # We caught a skipException but we didn't expect to skip
-                # this test so raise a hard test failure instead.
-                raise testtools.TestCase.failureException(
-                    "Not supposed to skip")
-
-    def test_skip_unless_no_group(self):
-        self._test_skip_unless_config(True, 'fake_group', 'an_option')
-
-    def test_skip_unless_no_option(self):
-        self._test_skip_unless_config(True, 'service_available',
-                                      'not_an_option')
-
-    def test_skip_unless_false_option(self):
-        self._test_skip_unless_config(True, 'service_available', 'glance')
-
-    def test_skip_unless_false_option_msg(self):
-        self._test_skip_unless_config(True, 'service_available', 'glance',
-                                      'skip message')
-
-    def test_skip_unless_true_option(self):
-        self._test_skip_unless_config(False,
-                                      'service_available', 'nova')
-
-    def test_skip_if_no_group(self):
-        self._test_skip_if_config(False, 'fake_group', 'an_option')
-
-    def test_skip_if_no_option(self):
-        self._test_skip_if_config(False, 'service_available', 'not_an_option')
-
-    def test_skip_if_false_option(self):
-        self._test_skip_if_config(False, 'service_available', 'glance')
-
-    def test_skip_if_true_option(self):
-        self._test_skip_if_config(True, 'service_available', 'nova')
-
-    def test_skip_if_true_option_msg(self):
-        self._test_skip_if_config(True, 'service_available', 'nova',
-                                  'skip message')