Merge "Scenario manager: remove some useless `_list_*` methods"
diff --git a/doc/source/test-removal.rst b/doc/source/test-removal.rst
index 79a5846..4757dc4 100644
--- a/doc/source/test-removal.rst
+++ b/doc/source/test-removal.rst
@@ -38,8 +38,10 @@
  #. The test proposed for removal has a failure rate <  0.50% in the gate over
     the past release (the value and interval will likely be adjusted in the
     future)
- #. There must not be an external user/consumer of tempest that depends on the
-    test proposed for removal
+
+    .. _`prong #3`:
+ #. There must not be an external user/consumer of tempest
+    that depends on the test proposed for removal
 
 The answers to 1 and 2 are easy to verify. For 1 just provide a link to the new
 test location. If you are linking to the tempest removal patch please also put
@@ -133,6 +135,10 @@
  #. A revert for a patch which added a broken test, or testing which didn't
     actually run in the gate (basically any revert for something which
     shouldn't have been added)
+ #. Tests that would become out of scope as a consequence of an API change,
+    as described in `API Compatibility`_.
+    Such tests cannot live in Tempest because of the branchless nature of
+    Tempest. Such test must still honor `prong #3`_.
 
 For the first exception type the only types of testing in tree which have been
 declared out of scope at this point are:
@@ -149,7 +155,7 @@
 Tempest Scope
 ^^^^^^^^^^^^^
 
-Also starting in the liberty cycle tempest has defined a set of projects which
+Starting in the liberty cycle tempest has defined a set of projects which
 are defined as in scope for direct testing in tempest. As of today that list
 is:
 
@@ -166,3 +172,17 @@
 to maintain continuity after migrating the tests out of tempest.
 
 .. _tempest plugin mechanism: http://docs.openstack.org/developer/tempest/plugin.html
+
+API Compatibility
+"""""""""""""""""
+
+If an API introduces a non-discoverable, backward incompatible change, and
+such change is not backported to all versions supported by Tempest, tests for
+that API cannot live in Tempest anymore.
+This is because tests would not be able to know or control which API response
+to expect, and thus would not be able to enforce a specific behavior.
+
+If a test exists in Tempest that would meet this criteria as consequence of a
+change, the test must be removed according to the procedure discussed into
+this document. The API change should not be merged until all conditions
+required for test removal can be met.
\ No newline at end of file
diff --git a/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml b/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
index 484d543..d07448a 100644
--- a/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
+++ b/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
@@ -4,7 +4,7 @@
     plugins to declare and automatically register any service client defined
     in the plugin.
   - tempest.lib exposes a new stable interface, the clients module and
-    ServiceClients class, which provides a convinient way for plugin tests to
+    ServiceClients class, which provides a convenient way for plugin tests to
     access service clients defined in Tempest as well as service clients
     defined in all loaded plugins.
     The new ServiceClients class only exposes for now the service clients
diff --git a/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml b/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
index 543cf7b..52c04af 100644
--- a/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
+++ b/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
@@ -2,4 +2,4 @@
 deprecations:
   - The ``call_until_true`` function is moved from the ``tempest.test`` module
    to the ``tempest.lib.common.utils.test_utils`` module. Backward
-   compatibilty is preserved until Ocata.
+   compatibility is preserved until Ocata.
diff --git a/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml b/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
index 20f310d..813e47f 100644
--- a/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
+++ b/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
@@ -1,5 +1,5 @@
 ---
 upgrade:
-  - the already depreacted tempest-cleanup standalone command has been
+  - the already deprecated tempest-cleanup standalone command has been
     removed. The corresponding functionalities can be accessed through
     the unified `tempest` command (`tempest cleanup`).
diff --git a/releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml b/releasenotes/notes/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
similarity index 100%
rename from releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
rename to releasenotes/notes/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
diff --git a/releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml b/releasenotes/notes/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
similarity index 100%
rename from releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml
rename to releasenotes/notes/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
diff --git a/releasenotes/notes/add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml b/releasenotes/notes/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
similarity index 100%
rename from releasenotes/notes/add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
rename to releasenotes/notes/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
diff --git a/releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml b/releasenotes/notes/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
similarity index 100%
rename from releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
rename to releasenotes/notes/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
diff --git a/releasenotes/notes/deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml b/releasenotes/notes/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
rename to releasenotes/notes/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
diff --git a/releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml b/releasenotes/notes/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
rename to releasenotes/notes/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
diff --git a/releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml b/releasenotes/notes/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
rename to releasenotes/notes/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
diff --git a/releasenotes/notes/jsonschema-validator-2377ba131e12d3c7.yaml b/releasenotes/notes/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
similarity index 100%
rename from releasenotes/notes/jsonschema-validator-2377ba131e12d3c7.yaml
rename to releasenotes/notes/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
diff --git a/releasenotes/notes/remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml b/releasenotes/notes/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
diff --git a/releasenotes/notes/remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml b/releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
diff --git a/releasenotes/notes/remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml b/releasenotes/notes/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
diff --git a/releasenotes/notes/remove-deprecated-network-config-options-f9ce276231578fe6.yaml b/releasenotes/notes/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-network-config-options-f9ce276231578fe6.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
diff --git a/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml b/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml
new file mode 100644
index 0000000..5555949
--- /dev/null
+++ b/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml
@@ -0,0 +1,13 @@
+---
+prelude: >
+    This release is marking the start of Ocata release support in Tempest
+other:
+  - |
+    OpenStack releases supported at this time are **Mitaka**, **Newton**,
+    and **Ocata**.
+
+    The release under current development as of this tag is Pike,
+    meaning that every Tempest commit is also tested against master during
+    the Pike cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Pike (or future releases)
+    cloud.
diff --git a/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml b/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
new file mode 100644
index 0000000..67f9541
--- /dev/null
+++ b/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add the list security groups by server API to the servers_client
+    library. This feature enables the possibility to list security
+    groups for a server instance.
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 0a9db46..c3c88a5 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -101,7 +101,7 @@
             # check some extensions for the flavor create/show/detail response
             self.assertEqual(flavor['swap'], '')
             self.assertEqual(int(flavor['rxtx_factor']), 1)
-            self.assertEqual(int(flavor['OS-FLV-EXT-DATA:ephemeral']), 0)
+            self.assertEqual(flavor['OS-FLV-EXT-DATA:ephemeral'], 0)
             self.assertEqual(flavor['os-flavor-access:is_public'], True)
 
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index bd72d13..9fe1f74 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -26,6 +26,8 @@
     Add and remove Flavor Access require admin privileges.
     """
 
+    credentials = ['primary', 'admin', 'alt']
+
     @classmethod
     def skip_checks(cls):
         super(FlavorsAccessNegativeTestJSON, cls).skip_checks()
@@ -151,4 +153,4 @@
         self.assertRaises(lib_exc.NotFound,
                           self.admin_flavors_client.remove_flavor_access,
                           new_flavor['id'],
-                          data_utils.rand_uuid())
+                          self.os_alt.servers_client.tenant_id)
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 39797f7..3ffd238 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -45,7 +45,6 @@
     def setup_clients(cls):
         super(LiveBlockMigrationTestJSON, cls).setup_clients()
         cls.admin_hosts_client = cls.os_adm.hosts_client
-        cls.admin_servers_client = cls.os_adm.servers_client
         cls.admin_migration_client = cls.os_adm.migrations_client
 
     @classmethod
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 21f5c68..aa75348 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -31,7 +31,6 @@
         super(MigrationsAdminTest, cls).setup_clients()
         cls.client = cls.os_adm.migrations_client
         cls.flavors_admin_client = cls.os_adm.flavors_client
-        cls.admin_servers_client = cls.os_adm.servers_client
 
     @decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
     def test_list_migrations(self):
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 0850205..ca8382f 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -87,6 +87,7 @@
 
     @decorators.skip_because(bug="1186354",
                              condition=CONF.service_available.neutron)
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('7c6c8f3b-2bf6-4918-b240-57b136a66aa0')
     @test.services('network')
     def test_security_groups_exceed_limit(self):
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index b53ced9..13a411b 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -45,16 +45,6 @@
         server = cls.create_test_server(wait_until='ACTIVE')
         cls.s1_id = server['id']
 
-    def _get_unused_flavor_id(self):
-        flavor_id = data_utils.rand_int_id(start=1000)
-        while True:
-            try:
-                self.flavors_client.show_flavor(flavor_id)
-            except lib_exc.NotFound:
-                break
-            flavor_id = data_utils.rand_int_id(start=1000)
-        return flavor_id
-
     @decorators.idempotent_id('28dcec23-f807-49da-822c-56a92ea3c687')
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
@@ -62,22 +52,16 @@
     def test_resize_server_using_overlimit_ram(self):
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
         self.useFixture(fixtures.LockFixture('compute_quotas'))
-        flavor_name = data_utils.rand_name("flavor")
-        flavor_id = self._get_unused_flavor_id()
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
-        ram = int(quota_set['ram'])
+        ram = quota_set['ram']
         if ram == -1:
             raise self.skipException("ram quota set is -1,"
                                      " cannot test overlimit")
         ram += 1
         vcpus = 1
         disk = 5
-        flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
-                                                       ram=ram, vcpus=vcpus,
-                                                       disk=disk,
-                                                       id=flavor_id)['flavor']
-        self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
+        flavor_ref = self.create_flavor(ram=ram, vcpus=vcpus, disk=disk)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
                           self.client.resize_server,
                           self.servers[0]['id'],
@@ -90,22 +74,16 @@
     def test_resize_server_using_overlimit_vcpus(self):
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
         self.useFixture(fixtures.LockFixture('compute_quotas'))
-        flavor_name = data_utils.rand_name("flavor")
-        flavor_id = self._get_unused_flavor_id()
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
-        vcpus = int(quota_set['cores'])
+        vcpus = quota_set['cores']
         if vcpus == -1:
             raise self.skipException("cores quota set is -1,"
                                      " cannot test overlimit")
         vcpus += 1
         ram = 512
         disk = 5
-        flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
-                                                       ram=ram, vcpus=vcpus,
-                                                       disk=disk,
-                                                       id=flavor_id)['flavor']
-        self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
+        flavor_ref = self.create_flavor(ram=ram, vcpus=vcpus, disk=disk)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
                           self.client.resize_server,
                           self.servers[0]['id'],
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 5f2444a..45472df 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -38,12 +38,6 @@
         if not CONF.compute_feature_enabled.swap_volume:
             raise cls.skipException("Swapping volumes is not supported.")
 
-    @classmethod
-    def setup_clients(cls):
-        super(TestVolumeSwap, cls).setup_clients()
-        # We need the admin client for performing the update (swap) volume call
-        cls.servers_admin_client = cls.os_adm.servers_client
-
     @decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
     @test.services('volume')
     def test_volume_swap(self):
@@ -58,12 +52,12 @@
         # Attach "volume1" to server
         self.attach_volume(server, volume1)
         # Swap volume from "volume1" to "volume2"
-        self.servers_admin_client.update_attached_volume(
+        self.admin_servers_client.update_attached_volume(
             server['id'], volume1['id'], volumeId=volume2['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume1['id'], 'available')
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume2['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume1['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume2['id'], 'in-use')
         self.addCleanup(self.servers_client.detach_volume,
                         server['id'], volume2['id'])
         # Verify "volume2" is attached to the server
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 1f85c18..905bc3d 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -32,25 +32,22 @@
             raise cls.skipException(skip_msg)
 
     @classmethod
-    def setup_clients(cls):
-        super(VolumesAdminNegativeTest, cls).setup_clients()
-        cls.servers_admin_client = cls.os_adm.servers_client
-
-    @classmethod
     def resource_setup(cls):
         super(VolumesAdminNegativeTest, cls).resource_setup()
         cls.server = cls.create_test_server(wait_until='ACTIVE')
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
     def test_update_attached_volume_with_nonexistent_volume_in_uri(self):
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
-                          self.servers_admin_client.update_attached_volume,
+                          self.admin_servers_client.update_attached_volume,
                           self.server['id'], nonexistent_volume,
                           volumeId=volume['id'])
 
     @test.related_bug('1629110', status_code=400)
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
     def test_update_attached_volume_with_nonexistent_volume_in_body(self):
         volume = self.create_volume()
@@ -58,6 +55,6 @@
 
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.BadRequest,
-                          self.servers_admin_client.update_attached_volume,
+                          self.admin_servers_client.update_attached_volume,
                           self.server['id'], volume['id'],
                           volumeId=nonexistent_volume)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index c636894..706b859 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -326,6 +326,10 @@
                     raise
             image = cls.compute_images_client.show_image(image_id)['image']
 
+            if kwargs['wait_until'] == 'ACTIVE':
+                if kwargs.get('wait_for_server', True):
+                    waiters.wait_for_server_status(cls.servers_client,
+                                                   server_id, 'ACTIVE')
         return image
 
     @classmethod
@@ -406,8 +410,8 @@
             kwargs['imageRef'] = image_ref
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
         cls.volumes.append(volume)
-        waiters.wait_for_volume_status(cls.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.volumes_client,
+                                                volume['id'], 'available')
         return volume
 
     @classmethod
@@ -446,15 +450,15 @@
         # On teardown detach the volume and wait for it to be available. This
         # is so we don't error out when trying to delete the volume during
         # teardown.
-        self.addCleanup(waiters.wait_for_volume_status,
+        self.addCleanup(waiters.wait_for_volume_resource_status,
                         self.volumes_client, volume['id'], 'available')
         # Ignore 404s on detach in case the server is deleted or the volume
         # is already detached.
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.servers_client.detach_volume,
                         server['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'in-use')
 
 
 class BaseV2ComputeAdminTest(BaseV2ComputeTest):
@@ -468,6 +472,7 @@
         cls.availability_zone_admin_client = (
             cls.os_adm.availability_zone_client)
         cls.admin_flavors_client = cls.os_adm.flavors_client
+        cls.admin_servers_client = cls.os_adm.servers_client
 
     def create_flavor(self, ram, vcpus, disk, name=None,
                       is_public='True', **kwargs):
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index a70c0a9..b313f44 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -21,6 +21,7 @@
 from tempest.common import image as common_image
 from tempest.common.utils import data_utils
 from tempest import config
+from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 from tempest import test
 
@@ -43,7 +44,7 @@
 
     @test.attr(type=['negative'])
     @test.services('image')
-    @test.idempotent_id('90f0d93a-91c1-450c-91e6-07d18172cefe')
+    @decorators.idempotent_id('90f0d93a-91c1-450c-91e6-07d18172cefe')
     def test_boot_with_low_ram(self):
         """Try boot a vm with lower than min ram
 
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index d9db0b5..a0c860a 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -60,6 +60,7 @@
         snapshot_name = data_utils.rand_name('test-snap')
         image = self.create_image_from_server(server['id'],
                                               name=snapshot_name,
-                                              wait_until='ACTIVE')
+                                              wait_until='ACTIVE',
+                                              wait_for_server=False)
         self.addCleanup(self.client.delete_image, image['id'])
         self.assertEqual(snapshot_name, image['name'])
diff --git a/tempest/api/compute/limits/test_absolute_limits_negative.py b/tempest/api/compute/limits/test_absolute_limits_negative.py
index b9ae0c6..21b4b1c 100644
--- a/tempest/api/compute/limits/test_absolute_limits_negative.py
+++ b/tempest/api/compute/limits/test_absolute_limits_negative.py
@@ -41,11 +41,11 @@
         max_meta = limits['absolute']['maxImageMeta']
 
         # No point in running this test if there is no limit.
-        if int(max_meta) == -1:
+        if max_meta == -1:
             raise self.skipException('no limit for maxImageMeta')
 
         # Create server should fail, since we are passing > metadata Limit!
-        max_meta_data = int(max_meta) + 1
+        max_meta_data = max_meta + 1
 
         meta_data = {}
         for xx in range(max_meta_data):
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index e070336..e90a1fc 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -144,3 +144,31 @@
                          ['security_group'])
         self.assertEqual(s_new_name, fetched_group['name'])
         self.assertEqual(s_new_des, fetched_group['description'])
+
+    @decorators.idempotent_id('79517d60-535a-438f-af3d-e6feab1cbea7')
+    @test.services('network')
+    def test_list_security_groups_by_server(self):
+        # Create a couple security groups that we will use
+        # for the server resource this test creates
+        sg = self.create_security_group()
+        sg2 = self.create_security_group()
+        assigned_security_groups_ids = [sg['id'], sg2['id']]
+        # Create server and add the security group created
+        # above to the server we just created
+        server_id = self.create_test_server(wait_until='ACTIVE')['id']
+        # add security groups to server
+        self.servers_client.add_security_group(server_id, name=sg['name'])
+        self.servers_client.add_security_group(server_id, name=sg2['name'])
+
+        # list security groups for a server
+        fetched_groups = (
+            self.servers_client.list_security_groups_by_server(
+                server_id)['security_groups'])
+        fetched_security_groups_ids = [i['id'] for i in fetched_groups]
+        # verifying the security groups ids in list
+        missing_security_groups =\
+            [p for p in assigned_security_groups_ids
+             if p not in fetched_security_groups_ids]
+        self.assertEmpty(missing_security_groups,
+                         "Failed to find security_groups %s in fetched list" %
+                         ', '.join(missing_security_groups))
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 9bba733..e0c8887 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -79,7 +79,6 @@
 
     def _check_interface(self, iface, port_id=None, network_id=None,
                          fixed_ip=None, mac_addr=None):
-        self.assertIn('port_state', iface)
         if port_id:
             self.assertEqual(iface['port_id'], port_id)
         if network_id:
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index a94c20b..20646d1 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -254,7 +254,6 @@
             self.flavor_ref)['flavor']
 
         def create_flavor_with_ephemeral(ephem_disk):
-            flavor_id = data_utils.rand_int_id(start=1000)
             name = 'flavor_with_ephemeral_%s' % ephem_disk
             flavor_name = data_utils.rand_name(name)
 
@@ -263,17 +262,10 @@
             disk = flavor_base['disk']
 
             # Create a flavor with ephemeral disk
-            flavor = self.flavor_client.create_flavor(
-                name=flavor_name, ram=ram, vcpus=vcpus, disk=disk,
-                id=flavor_id, ephemeral=ephem_disk)['flavor']
-            self.addCleanup(flavor_clean_up, flavor['id'])
-
+            flavor = self.create_flavor(name=flavor_name, ram=ram, vcpus=vcpus,
+                                        disk=disk, ephemeral=ephem_disk)
             return flavor['id']
 
-        def flavor_clean_up(flavor_id):
-            self.flavor_client.delete_flavor(flavor_id)
-            self.flavor_client.wait_for_resource_deletion(flavor_id)
-
         flavor_with_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=1)
         flavor_no_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=0)
 
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 83b2e1b..8ed55e0 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -115,8 +115,8 @@
 
         self.client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.client, server['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
 
 class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 04fe11f..3f6abab 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -1,4 +1,4 @@
-# Copyright 2016 OpenStack Foundation
+# Copyright 2016-2017 OpenStack Foundation
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -26,6 +26,11 @@
 
 CONF = config.CONF
 
+if six.PY2:
+    ord_func = ord
+else:
+    ord_func = int
+
 
 class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
 
@@ -60,14 +65,19 @@
         resp = urllib3.PoolManager().request('GET', vnc_url)
         # Make sure that the GET request was accepted by the novncproxy
         self.assertEqual(resp.status, 200, 'Got a Bad HTTP Response on the '
-                         'initial call: ' + str(resp.status))
+                         'initial call: ' + six.text_type(resp.status))
         # Do some basic validation to make sure it is an expected HTML document
-        self.assertTrue('<html>' in resp.data and '</html>' in resp.data,
-                        'Not a valid html document in the response.')
+        resp_data = resp.data.decode()
+        self.assertIn('<html>', resp_data,
+                      'Not a valid html document in the response.')
+        self.assertIn('</html>', resp_data,
+                      'Not a valid html document in the response.')
         # Just try to make sure we got JavaScript back for noVNC, since we
         # won't actually use it since not inside of a browser
-        self.assertTrue('noVNC' in resp.data and '<script' in resp.data,
-                        'Not a valid noVNC javascript html document.')
+        self.assertIn('noVNC', resp_data,
+                      'Not a valid noVNC javascript html document.')
+        self.assertIn('<script', resp_data,
+                      'Not a valid noVNC javascript html document.')
 
     def _validate_rfb_negotiation(self):
         """Verify we can connect to novnc and do the websocket connection."""
@@ -82,14 +92,14 @@
                                    int(data[8:11], base=10)))
         self.assertTrue(version >= 3.3, 'Bad RFB Version: ' + str(version))
         # Send our RFB version to the server, which we will just go with 3.3
-        self._websocket.send_frame(str(data))
+        self._websocket.send_frame(data)
         # Get the sever authentication type and make sure None is supported
         data = self._websocket.receive_frame()
         self.assertIsNotNone(data, 'Expected authentication type None.')
         self.assertGreaterEqual(
             len(data), 2, 'Expected authentication type None.')
         self.assertIn(
-            1, [ord(data[i + 1]) for i in range(ord(data[0]))],
+            1, [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
             'Expected authentication type None.')
         # Send to the server that we only support authentication type None
         self._websocket.send_frame(six.int2byte(1))
@@ -98,7 +108,7 @@
         self.assertEqual(
             len(data), 4, 'Server did not think security was successful.')
         self.assertEqual(
-            [ord(i) for i in data], [0, 0, 0, 0],
+            [ord_func(i) for i in data], [0, 0, 0, 0],
             'Server did not think security was successful.')
         # Say to leave the desktop as shared as part of client initialization
         self._websocket.send_frame(six.int2byte(1))
@@ -121,12 +131,12 @@
 
     def _validate_websocket_upgrade(self):
         self.assertTrue(
-            self._websocket.response.startswith('HTTP/1.1 101 Switching '
-                                                'Protocols\r\n'),
+            self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
+                                                b'Protocols\r\n'),
             'Did not get the expected 101 on the websockify call: '
-            + str(len(self._websocket.response)))
+            + six.text_type(self._websocket.response))
         self.assertTrue(
-            self._websocket.response.find('Server: WebSockify') > 0,
+            self._websocket.response.find(b'Server: WebSockify') > 0,
             'Did not get the expected WebSocket HTTP Response.')
 
     def _create_websocket(self, url):
@@ -187,8 +197,8 @@
             # frames less than 125 bytes here (for the negotiation) and
             # that only the 2nd byte contains the length, and since the
             # server doesn't do masking, we can just read the data length
-            if ord(header[1]) & 127 > 0:
-                return self._socket.recv(ord(header[1]) & 127)
+            if ord_func(header[1]) & 127 > 0:
+                return self._socket.recv(ord_func(header[1]) & 127)
 
     def send_frame(self, data):
         """Wrapper for sending data to add in the WebSocket frame format."""
@@ -205,7 +215,7 @@
             frame_bytes.append(mask[i])
         # Mask each of the actual data bytes that we are going to send
         for i in range(len(data)):
-            frame_bytes.append(ord(data[i]) ^ mask[i % 4])
+            frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
         # Convert our integer list to a binary array of bytes
         frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
         self._socket.sendall(frame_bytes)
@@ -233,9 +243,9 @@
         # We are choosing to use binary even though browser may do Base64
         reqdata += 'Sec-WebSocket-Protocol: binary\r\n\r\n'
         # Send the HTTP GET request and get the response back
-        self._socket.sendall(reqdata)
+        self._socket.sendall(reqdata.encode('utf8'))
         self.response = data = self._socket.recv(4096)
         # Loop through & concatenate all of the data in the response body
-        while len(data) > 0 and self.response.find('\r\n\r\n') < 0:
+        while len(data) > 0 and self.response.find(b'\r\n\r\n') < 0:
             data = self._socket.recv(4096)
             self.response += data
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 957d24a..90b9da4 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -97,7 +97,7 @@
         max_file_limit = limits['absolute']['maxPersonality']
         if max_file_limit == -1:
             raise self.skipException("No limit for personality files")
-        for i in range(0, int(max_file_limit) + 1):
+        for i in range(0, max_file_limit + 1):
             path = 'etc/test' + str(i) + '.txt'
             personality.append({'path': path,
                                 'contents': base64.encode_as_text(
@@ -117,7 +117,7 @@
         if max_file_limit == -1:
             raise self.skipException("No limit for personality files")
         person = []
-        for i in range(0, int(max_file_limit)):
+        for i in range(0, max_file_limit):
             # NOTE(andreaf) The cirros disk image is blank before boot
             # so we can only inject safely to /
             path = '/test' + str(i) + '.txt'
diff --git a/tempest/api/compute/test_live_block_migration_negative.py b/tempest/api/compute/test_live_block_migration_negative.py
index 40d0746..01fd9ef 100644
--- a/tempest/api/compute/test_live_block_migration_negative.py
+++ b/tempest/api/compute/test_live_block_migration_negative.py
@@ -31,11 +31,6 @@
         if not CONF.compute_feature_enabled.live_migration:
             raise cls.skipException("Live migration is not enabled")
 
-    @classmethod
-    def setup_clients(cls):
-        super(LiveBlockMigrationNegativeTestJSON, cls).setup_clients()
-        cls.admin_servers_client = cls.os_adm.servers_client
-
     def _migrate_server_to(self, server_id, dest_host):
         bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
         self.admin_servers_client.live_migrate_server(
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index cbe7178..5304944 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -64,8 +64,8 @@
     def _detach_volume(self, server_id, volume_id):
         try:
             self.servers_client.detach_volume(server_id, volume_id)
-            waiters.wait_for_volume_status(self.volumes_client,
-                                           volume_id, 'available')
+            waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                    volume_id, 'available')
         except lib_exc.NotFound:
             LOG.warning("Unable to detach volume %s from server %s "
                         "possibly it was already detached", volume_id,
@@ -78,8 +78,8 @@
             kwargs.update({'device': '/dev/%s' % device})
         attachment = self.servers_client.attach_volume(
             server_id, **kwargs)['volumeAttachment']
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume_id, 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume_id, 'in-use')
         self.addCleanup(self._detach_volume, server_id,
                         volume_id)
 
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index 3d5d23b..4b06867 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -54,9 +54,9 @@
             display_name=s_name)['snapshot']
 
         def delete_snapshot(snapshot_id):
-            waiters.wait_for_snapshot_status(self.snapshots_client,
-                                             snapshot_id,
-                                             'available')
+            waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                    snapshot_id,
+                                                    'available')
             # Delete snapshot
             self.snapshots_client.delete_snapshot(snapshot_id)
             self.snapshots_client.wait_for_resource_deletion(snapshot_id)
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 63c247e..0eaa359 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -57,7 +57,8 @@
         self.assertIsNotNone(volume['id'],
                              "Field volume id is empty or not found.")
         # Wait for Volume status to become ACTIVE
-        waiters.wait_for_volume_status(self.client, volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client, volume['id'],
+                                                'available')
         # GET Volume
         fetched_volume = self.client.show_volume(volume['id'])['volume']
         # Verification of details of fetched Volume
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 56b3517..2812c68 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -164,7 +164,6 @@
         cls.client.store_image_file(image['id'], data=image_file)
         # Keep the data of one test image so it can be used to filter lists
         cls.test_data = image
-        cls.test_data['size'] = size
 
         return image['id']
 
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 435e672..2c639da 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -39,6 +39,7 @@
             msg = "quotas extension not enabled."
             raise cls.skipException(msg)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
     def test_network_quota_exceeding(self):
         # Set the network quota to two
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index 2174940..52b3978 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -125,6 +125,7 @@
 
     @decorators.idempotent_id('b277257f-113c-4499-b8d1-5fead79f7360')
     @test.requires_ext(extension='formpost', service='object')
+    @test.attr(type=['negative'])
     def test_post_object_using_form_invalid_signature(self):
         self.key = "Wrong"
         body, content_type = self.get_multipart_form()
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 1b97e4a..c3e904a 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -74,8 +74,8 @@
         else:
             cls.volume_id_list_without_prefix.append(
                 cls.volume['id'])
-        waiters.wait_for_volume_status(cls.admin_volume_client,
-                                       cls.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.admin_volume_client,
+                                                cls.volume['id'], 'available')
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 5a83ae3..83fca45 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -114,7 +114,7 @@
         volume_default = quota_set_default['volumes']
 
         self.admin_quotas_client.update_quota_set(
-            project_id, volumes=(int(volume_default) + 5))
+            project_id, volumes=(volume_default + 5))
 
         self.admin_quotas_client.delete_quota_set(project_id)
         quota_set_new = (self.admin_quotas_client.show_quota_set(project_id)
@@ -146,7 +146,7 @@
             transfer_id, auth_key=auth_key)['transfer']
 
         # Verify volume transferred is available
-        waiters.wait_for_volume_status(
+        waiters.wait_for_volume_resource_status(
             self.alt_client, volume['id'], 'available')
 
         # List of tenants quota usage post transfer
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 213723c..5d08416 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -58,14 +58,14 @@
                          "to the requested name")
         self.assertIsNotNone(volume['id'],
                              "Field volume id is empty or not found.")
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Update volume with new volume_type
         self.volumes_client.retype_volume(volume['id'],
                                           new_type=volume_types[1]['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Get volume details and Verify
         fetched_volume = self.volumes_client.show_volume(
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 1dce7e0..5f590bc 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -17,6 +17,7 @@
 from tempest.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
+from tempest import test
 
 
 class ExtraSpecsNegativeV2Test(base.BaseVolumeAdminTest):
@@ -27,6 +28,7 @@
         extra_specs = {"spec1": "val1"}
         cls.volume_type = cls.create_volume_type(extra_specs=extra_specs)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('08961d20-5cbb-4910-ac0f-89ad6dbb2da1')
     def test_update_no_body(self):
         # Should not update volume type extra specs with no body
@@ -35,6 +37,7 @@
             self.admin_volume_types_client.update_volume_type_extra_specs,
             self.volume_type['id'], "spec1", None)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('25e5a0ee-89b3-4c53-8310-236f76c75365')
     def test_update_nonexistent_extra_spec_id(self):
         # Should not update volume type extra specs with nonexistent id.
@@ -45,6 +48,7 @@
             self.volume_type['id'], data_utils.rand_uuid(),
             extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('9bf7a657-b011-4aec-866d-81c496fbe5c8')
     def test_update_none_extra_spec_id(self):
         # Should not update volume type extra specs with none id.
@@ -54,6 +58,7 @@
             self.admin_volume_types_client.update_volume_type_extra_specs,
             self.volume_type['id'], None, extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('a77dfda2-9100-448e-9076-ed1711f4bdfc')
     def test_update_multiple_extra_spec(self):
         # Should not update volume type extra specs with multiple specs as
@@ -65,6 +70,7 @@
             self.volume_type['id'], list(extra_spec)[0],
             extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
     def test_create_nonexistent_type_id(self):
         # Should not create volume type extra spec for nonexistent volume
@@ -75,6 +81,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             data_utils.rand_uuid(), extra_specs)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('c821bdc8-43a4-4bf4-86c8-82f3858d5f7d')
     def test_create_none_body(self):
         # Should not create volume type extra spec for none POST body.
@@ -83,6 +90,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             self.volume_type['id'], None)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('bc772c71-1ed4-4716-b945-8b5ed0f15e87')
     def test_create_invalid_body(self):
         # Should not create volume type extra spec for invalid POST body.
@@ -91,6 +99,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             self.volume_type['id'], extra_specs=['invalid'])
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
     def test_delete_nonexistent_volume_type_id(self):
         # Should not delete volume type extra spec for nonexistent
@@ -100,6 +109,7 @@
             self.admin_volume_types_client.delete_volume_type_extra_specs,
             data_utils.rand_uuid(), "spec1")
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('dee5cf0c-cdd6-4353-b70c-e847050d71fb')
     def test_list_nonexistent_volume_type_id(self):
         # Should not list volume type extra spec for nonexistent type id.
@@ -108,6 +118,7 @@
             self.admin_volume_types_client.list_volume_types_extra_specs,
             data_utils.rand_uuid())
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('9f402cbd-1838-4eb4-9554-126a6b1908c9')
     def test_get_nonexistent_volume_type_id(self):
         # Should not get volume type extra spec for nonexistent type id.
@@ -116,6 +127,7 @@
             self.admin_volume_types_client.show_volume_type_extra_specs,
             data_utils.rand_uuid(), "spec1")
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('c881797d-12ff-4f1a-b09d-9f6212159753')
     def test_get_nonexistent_extra_spec_id(self):
         # Should not get volume type extra spec for nonexistent extra spec
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index e8694b2..69e9cc0 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -17,10 +17,12 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
+from tempest import test
 
 
 class VolumeTypesNegativeV2Test(base.BaseVolumeAdminTest):
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('b48c98f2-e662-4885-9b71-032256906314')
     def test_create_with_nonexistent_volume_type(self):
         # Should not be able to create volume with nonexistent volume_type.
@@ -30,6 +32,7 @@
         self.assertRaises(lib_exc.NotFound,
                           self.volumes_client.create_volume, **params)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('878b4e57-faa2-4659-b0d1-ce740a06ae81')
     def test_create_with_empty_name(self):
         # Should not be able to create volume type with an empty name.
@@ -37,6 +40,7 @@
             lib_exc.BadRequest,
             self.admin_volume_types_client.create_volume_type, name='')
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('994610d6-0476-4018-a644-a2602ef5d4aa')
     def test_get_nonexistent_type_id(self):
         # Should not be able to get volume type with nonexistent type id.
@@ -44,6 +48,7 @@
                           self.admin_volume_types_client.show_volume_type,
                           data_utils.rand_uuid())
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('6b3926d2-7d73-4896-bc3d-e42dfd11a9f6')
     def test_delete_nonexistent_type_id(self):
         # Should not be able to delete volume type with nonexistent type id.
@@ -51,6 +56,7 @@
                           self.admin_volume_types_client.delete_volume_type,
                           data_utils.rand_uuid())
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('8c09f849-f225-4d78-ba87-bffd9a5e0c6f')
     def test_create_volume_with_private_volume_type(self):
         # Should not be able to create volume with private volume type.
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 04d27ea..13b7384 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -94,8 +94,9 @@
         self.addCleanup(self._delete_backup, new_id)
         self.assertIn("id", import_backup)
         self.assertEqual(new_id, import_backup['id'])
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       import_backup['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                import_backup['id'],
+                                                'available')
 
         # Verify Import Backup
         backups = self.admin_backups_client.list_backups(
@@ -108,14 +109,16 @@
         self.addCleanup(self.admin_volume_client.delete_volume,
                         restore['volume_id'])
         self.assertEqual(backup['id'], restore['backup_id'])
-        waiters.wait_for_volume_status(self.admin_volume_client,
-                                       restore['volume_id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_volume_client,
+                                                restore['volume_id'],
+                                                'available')
 
         # Verify if restored volume is there in volume list
         volumes = self.admin_volume_client.list_volumes()['volumes']
         self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       import_backup['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                import_backup['id'],
+                                                'available')
 
     @decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94')
     def test_volume_backup_reset_status(self):
@@ -131,8 +134,8 @@
         # Reset backup status to error
         self.admin_backups_client.reset_backup_status(backup_id=backup['id'],
                                                       status="error")
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       backup['id'], 'error')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                backup['id'], 'error')
 
 
 class VolumesBackupsAdminV1Test(VolumesBackupsAdminV2Test):
diff --git a/tempest/api/volume/admin/v2/test_snapshot_manage.py b/tempest/api/volume/admin/v2/test_snapshot_manage.py
index 1114924..eed7dd1 100644
--- a/tempest/api/volume/admin/v2/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/v2/test_snapshot_manage.py
@@ -65,9 +65,9 @@
                         self.admin_snapshots_client, new_snapshot['id'])
 
         # Wait for the snapshot to be available after manage operation
-        waiters.wait_for_snapshot_status(self.admin_snapshots_client,
-                                         new_snapshot['id'],
-                                         'available')
+        waiters.wait_for_volume_resource_status(self.admin_snapshots_client,
+                                                new_snapshot['id'],
+                                                'available')
 
         # Verify the managed snapshot has the expected parent volume
         self.assertEqual(new_snapshot['volume_id'], volume['id'])
diff --git a/tempest/api/volume/admin/v2/test_volumes_list.py b/tempest/api/volume/admin/v2/test_volumes_list.py
index b0a37fb..6bab373 100644
--- a/tempest/api/volume/admin/v2/test_volumes_list.py
+++ b/tempest/api/volume/admin/v2/test_volumes_list.py
@@ -45,8 +45,8 @@
         # Create a volume in admin tenant
         adm_vol = self.admin_volume_client.create_volume(
             size=CONF.volume.volume_size)['volume']
-        waiters.wait_for_volume_status(self.admin_volume_client,
-                                       adm_vol['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_volume_client,
+                                                adm_vol['id'], 'available')
         self.addCleanup(self.admin_volume_client.delete_volume, adm_vol['id'])
         params = {'all_tenants': 1,
                   'project_id': self.volumes_client.tenant_id}
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 98e050e..f8c435f 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -131,8 +131,8 @@
 
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
         cls.volumes.append(volume)
-        waiters.wait_for_volume_status(cls.volumes_client, volume['id'],
-                                       wait_until)
+        waiters.wait_for_volume_resource_status(cls.volumes_client,
+                                                volume['id'], wait_until)
         return volume
 
     @classmethod
@@ -146,8 +146,8 @@
         snapshot = cls.snapshots_client.create_snapshot(
             volume_id=volume_id, **kwargs)['snapshot']
         cls.snapshots.append(snapshot)
-        waiters.wait_for_snapshot_status(cls.snapshots_client,
-                                         snapshot['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.snapshots_client,
+                                                snapshot['id'], 'available')
         return snapshot
 
     def create_backup(self, volume_id, backup_client=None, **kwargs):
@@ -158,8 +158,8 @@
         backup = backup_client.create_backup(
             volume_id=volume_id, **kwargs)['backup']
         self.addCleanup(backup_client.delete_backup, backup['id'])
-        waiters.wait_for_backup_status(backup_client, backup['id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(backup_client, backup['id'],
+                                                'available')
         return backup
 
     # NOTE(afazekas): these create_* and clean_* could be defined
@@ -182,10 +182,10 @@
         self.servers_client.attach_volume(
             server_id, volumeId=volume_id,
             device='/dev/%s' % CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume_id, 'in-use')
-        self.addCleanup(waiters.wait_for_volume_status, self.volumes_client,
-                        volume_id, 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume_id, 'in-use')
+        self.addCleanup(waiters.wait_for_volume_resource_status,
+                        self.volumes_client, volume_id, 'available')
         self.addCleanup(self.servers_client.detach_volume, server_id,
                         volume_id)
 
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 5477770..9f63b14 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -43,8 +43,8 @@
             volume_id=volume['id'])['transfer']
         transfer_id = transfer['id']
         auth_key = transfer['auth_key']
-        waiters.wait_for_volume_status(self.client,
-                                       volume['id'], 'awaiting-transfer')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'awaiting-transfer')
 
         # Get a volume transfer
         body = self.client.show_volume_transfer(transfer_id)['transfer']
@@ -58,8 +58,8 @@
         # Accept a volume transfer by alt_tenant
         body = self.alt_client.accept_volume_transfer(
             transfer_id, auth_key=auth_key)['transfer']
-        waiters.wait_for_volume_status(self.alt_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.alt_client,
+                                                volume['id'], 'available')
 
     @decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
     def test_create_list_delete_volume_transfer(self):
@@ -71,8 +71,8 @@
         body = self.client.create_volume_transfer(
             volume_id=volume['id'])['transfer']
         transfer_id = body['id']
-        waiters.wait_for_volume_status(self.client,
-                                       volume['id'], 'awaiting-transfer')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'awaiting-transfer')
 
         # List all volume transfers (looking for the one we created)
         body = self.client.list_volume_transfers()['transfers']
@@ -84,7 +84,8 @@
 
         # Delete a volume transfer
         self.client.delete_volume_transfer(transfer_id)
-        waiters.wait_for_volume_status(self.client, volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'available')
 
 
 class VolumesV1TransfersTest(VolumesV2TransfersTest):
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index c0cc74d..0a6901c 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -60,11 +60,11 @@
                                   instance_uuid=server['id'],
                                   mountpoint='/dev/%s' %
                                              CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'in-use')
         self.client.detach_volume(self.volume['id'])
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'available')
 
     @decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
     def test_volume_bootable(self):
@@ -91,11 +91,10 @@
                                   instance_uuid=server['id'],
                                   mountpoint='/dev/%s' %
                                              CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'in-use')
-        self.addCleanup(waiters.wait_for_volume_status, self.client,
-                        self.volume['id'],
-                        'available')
+        waiters.wait_for_volume_resource_status(self.client, self.volume['id'],
+                                                'in-use')
+        self.addCleanup(waiters.wait_for_volume_resource_status, self.client,
+                        self.volume['id'], 'available')
         self.addCleanup(self.client.detach_volume, self.volume['id'])
         volume = self.client.show_volume(self.volume['id'])['volume']
         self.assertIn('attachments', volume)
@@ -124,8 +123,8 @@
                         self.image_client.delete_image,
                         image_id)
         waiters.wait_for_image_status(self.image_client, image_id, 'active')
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'available')
 
     @decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
     def test_reserve_unreserve_volume(self):
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 939f1ac..e664ff7 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -40,11 +40,11 @@
         self.addCleanup(self.volumes_client.delete_volume,
                         restored_volume['volume_id'])
         self.assertEqual(backup_id, restored_volume['backup_id'])
-        waiters.wait_for_backup_status(self.backups_client,
-                                       backup_id, 'available')
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       restored_volume['volume_id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(self.backups_client,
+                                                backup_id, 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                restored_volume['volume_id'],
+                                                'available')
         return restored_volume
 
     @decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
@@ -60,8 +60,8 @@
                                     name=backup_name,
                                     description=description)
         self.assertEqual(backup_name, backup['name'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Get a given backup
         backup = self.backups_client.show_backup(backup['id'])['backup']
diff --git a/tempest/api/volume/test_volumes_clone_negative.py b/tempest/api/volume/test_volumes_clone_negative.py
index fa827cd..5331243 100644
--- a/tempest/api/volume/test_volumes_clone_negative.py
+++ b/tempest/api/volume/test_volumes_clone_negative.py
@@ -17,6 +17,7 @@
 from tempest import config
 from tempest.lib import decorators
 from tempest.lib import exceptions
+from tempest import test
 
 CONF = config.CONF
 
@@ -29,6 +30,7 @@
         if not CONF.volume_feature_enabled.clone:
             raise cls.skipException("Cinder volume clones are disabled")
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('9adae371-a257-43a5-459a-dc7c88e66e0e')
     def test_create_from_volume_decreasing_size(self):
         # Creates a volume from another volume passing a size different from
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 2378790..3df9b00 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -27,8 +27,8 @@
         extend_size = volume['size'] + 1
         self.volumes_client.extend_volume(volume['id'],
                                           new_size=extend_size)
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         volume = self.volumes_client.show_volume(volume['id'])['volume']
         self.assertEqual(volume['size'], extend_size)
 
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index d1a1c2f..a3e46a8 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -41,8 +41,8 @@
         volume = self.volumes_client.create_volume(**kwargs)['volume']
         self.assertIn('id', volume)
         self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client, volume['id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         self.assertIn(name_field, volume)
         self.assertEqual(volume[name_field], v_name,
                          "The created volume name is not equal "
@@ -106,8 +106,8 @@
         self.assertIn('id', new_volume)
         self.addCleanup(self.delete_volume, self.volumes_client,
                         new_volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       new_volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                new_volume['id'], 'available')
 
         params = {name_field: volume[name_field],
                   descrip_field: volume[descrip_field]}
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 9f4ce95..19cd98f 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -10,6 +10,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from testtools import matchers
+
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
 from tempest import config
@@ -95,8 +97,12 @@
 
     @decorators.idempotent_id('2a8abbe4-d871-46db-b049-c41f5af8216e')
     def test_snapshot_create_get_list_update_delete(self):
-        # Create a snapshot
-        snapshot = self.create_snapshot(self.volume_origin['id'])
+        # Create a snapshot with metadata
+        metadata = {"snap-meta1": "value1",
+                    "snap-meta2": "value2",
+                    "snap-meta3": "value3"}
+        snapshot = self.create_snapshot(self.volume_origin['id'],
+                                        metadata=metadata)
 
         # Get the snap and check for some of its details
         snap_get = self.snapshots_client.show_snapshot(
@@ -105,6 +111,10 @@
                          snap_get['volume_id'],
                          "Referred volume origin mismatch")
 
+        # Verify snapshot metadata
+        self.assertThat(snap_get['metadata'].items(),
+                        matchers.ContainsAll(metadata.items()))
+
         # Compare also with the output from the list action
         tracking_data = (snapshot['id'], snapshot[self.name_field])
         snaps_list = self.snapshots_client.list_snapshots()['snapshots']
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 1e68848..9e44379 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -47,6 +47,7 @@
                           self.snapshots_client.create_snapshot,
                           volume_id=None, display_name=s_name)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('677863d1-34f9-456d-b6ac-9924f667a7f4')
     def test_volume_from_snapshot_decreasing_size(self):
         # Creates a volume a snapshot passing a size different from the source
@@ -61,6 +62,13 @@
                           size=src_size - 1,
                           snapshot_id=src_snap['id'])
 
+    @test.attr(type=['negative'])
+    @decorators.idempotent_id('8fd92339-e22f-4591-86b4-1e2215372a40')
+    def test_list_snapshot_invalid_param_limit(self):
+        self.assertRaises(lib_exc.BadRequest,
+                          self.snapshots_client.list_snapshots,
+                          limit='invalid')
+
 
 class VolumesV1SnapshotNegativeTestJSON(VolumesV2SnapshotNegativeTestJSON):
     _api_version = 1
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 8b51e64..d2328c8 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -37,13 +37,12 @@
         super(VolumesV2ListTestJSON, cls).resource_setup()
 
         # Create 3 test volumes
-        metadata = {'Type': 'work'}
         # NOTE(zhufl): When using pre-provisioned credentials, the project
         # may have volumes other than those created below.
         existing_volumes = cls.volumes_client.list_volumes()['volumes']
         cls.volume_id_list = [vol['id'] for vol in existing_volumes]
         for _ in range(3):
-            volume = cls.create_volume(metadata=metadata)
+            volume = cls.create_volume()
             cls.volume_id_list.append(volume['id'])
 
     @decorators.idempotent_id('2a7064eb-b9c3-429b-b888-33928fc5edd3')
diff --git a/tempest/api/volume/v2/test_volumes_snapshots_negative.py b/tempest/api/volume/v2/test_volumes_snapshots_negative.py
new file mode 100644
index 0000000..e5581b9
--- /dev/null
+++ b/tempest/api/volume/v2/test_volumes_snapshots_negative.py
@@ -0,0 +1,46 @@
+# Copyright 2017 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.volume import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest import test
+
+CONF = config.CONF
+
+
+class VolumesV2SnapshotNegativeTest(base.BaseVolumeTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(VolumesV2SnapshotNegativeTest, cls).skip_checks()
+        if not CONF.volume_feature_enabled.snapshot:
+            raise cls.skipException("Cinder volume snapshots are disabled")
+
+    @test.attr(type=['negative'])
+    @decorators.idempotent_id('27b5f37f-bf69-4e8c-986e-c44f3d6819b8')
+    def test_list_snapshots_invalid_param_sort(self):
+        self.assertRaises(lib_exc.BadRequest,
+                          self.snapshots_client.list_snapshots,
+                          sort_key='invalid')
+
+    @test.attr(type=['negative'])
+    @decorators.idempotent_id('b68deeda-ca79-4a32-81af-5c51179e553a')
+    def test_list_snapshots_invalid_param_marker(self):
+        self.assertRaises(lib_exc.NotFound,
+                          self.snapshots_client.list_snapshots,
+                          marker=data_utils.rand_uuid())
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 55bc93e..99da983 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -124,8 +124,9 @@
                   'imageRef': image_id,
                   'size': CONF.volume.volume_size}
         volume = volumes_client.create_volume(**params)
-        waiters.wait_for_volume_status(volumes_client,
-                                       volume['volume']['id'], 'available')
+        waiters.wait_for_volume_resource_status(volumes_client,
+                                                volume['volume']['id'],
+                                                'available')
 
         bd_map_v2 = [{
             'uuid': volume['volume']['id'],
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 15619f4..3e5600c 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -10,7 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
+import re
 import time
 
 from oslo_log import log as logging
@@ -179,25 +179,33 @@
     raise lib_exc.TimeoutException(message)
 
 
-def wait_for_volume_status(client, volume_id, status):
-    """Waits for a Volume to reach a given status."""
-    body = client.show_volume(volume_id)['volume']
-    volume_status = body['status']
+def wait_for_volume_resource_status(client, resource_id, status):
+    """Waits for a volume resource to reach a given status.
+
+    This function is a common function for volume, snapshot and backup
+    resources. The function extracts the name of the desired resource from
+    the client class name of the resource.
+    """
+    resource_name = re.findall(r'(Volume|Snapshot|Backup)',
+                               client.__class__.__name__)[0].lower()
+    show_resource = getattr(client, 'show_' + resource_name)
+    resource_status = show_resource(resource_id)[resource_name]['status']
     start = int(time.time())
 
-    while volume_status != status:
+    while resource_status != status:
         time.sleep(client.build_interval)
-        body = client.show_volume(volume_id)['volume']
-        volume_status = body['status']
-        if volume_status == 'error' and status != 'error':
-            raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
-        if volume_status == 'error_restoring':
-            raise exceptions.VolumeRestoreErrorException(volume_id=volume_id)
+        resource_status = show_resource(resource_id)[
+            '{}'.format(resource_name)]['status']
+        if resource_status == 'error' and resource_status != status:
+            raise exceptions.VolumeResourceBuildErrorException(
+                resource_name=resource_name, resource_id=resource_id)
+        if resource_name == 'volume' and resource_status == 'error_restoring':
+            raise exceptions.VolumeRestoreErrorException(volume_id=resource_id)
 
         if int(time.time()) - start >= client.build_timeout:
-            message = ('Volume %s failed to reach %s status (current %s) '
+            message = ('%s %s failed to reach %s status (current %s) '
                        'within the required time (%s s).' %
-                       (volume_id, status, volume_status,
+                       (resource_name, resource_id, status, resource_status,
                         client.build_timeout))
             raise lib_exc.TimeoutException(message)
 
@@ -221,48 +229,6 @@
             raise lib_exc.TimeoutException(message)
 
 
-def wait_for_snapshot_status(client, snapshot_id, status):
-    """Waits for a Snapshot to reach a given status."""
-    body = client.show_snapshot(snapshot_id)['snapshot']
-    snapshot_status = body['status']
-    start = int(time.time())
-
-    while snapshot_status != status:
-        time.sleep(client.build_interval)
-        body = client.show_snapshot(snapshot_id)['snapshot']
-        snapshot_status = body['status']
-        if snapshot_status == 'error':
-            raise exceptions.SnapshotBuildErrorException(
-                snapshot_id=snapshot_id)
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Snapshot %s failed to reach %s status (current %s) '
-                       'within the required time (%s s).' %
-                       (snapshot_id, status, snapshot_status,
-                        client.build_timeout))
-            raise lib_exc.TimeoutException(message)
-
-
-def wait_for_backup_status(client, backup_id, status):
-    """Waits for a Backup to reach a given status."""
-    body = client.show_backup(backup_id)['backup']
-    backup_status = body['status']
-    start = int(time.time())
-
-    while backup_status != status:
-        time.sleep(client.build_interval)
-        body = client.show_backup(backup_id)['backup']
-        backup_status = body['status']
-        if backup_status == 'error' and backup_status != status:
-            raise lib_exc.VolumeBackupException(backup_id=backup_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Volume backup %s failed to reach %s status '
-                       '(current %s) within the required time (%s s).' %
-                       (backup_id, status, backup_status,
-                        client.build_timeout))
-            raise lib_exc.TimeoutException(message)
-
-
 def wait_for_qos_operations(client, qos_id, operation, args=None):
     """Waits for a qos operations to be completed.
 
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 45bbc11..f48d7ac 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -37,18 +37,15 @@
     message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
 
 
-class VolumeBuildErrorException(exceptions.TempestException):
-    message = "Volume %(volume_id)s failed to build and is in ERROR status"
+class VolumeResourceBuildErrorException(exceptions.TempestException):
+    message = ("%(resource_name)s %(resource_id)s failed to build and is in "
+               "ERROR status")
 
 
 class VolumeRestoreErrorException(exceptions.TempestException):
     message = "Volume %(volume_id)s failed to restore and is in ERROR status"
 
 
-class SnapshotBuildErrorException(exceptions.TempestException):
-    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
-
-
 class StackBuildErrorException(exceptions.TempestException):
     message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
                "due to '%(stack_status_reason)s'")
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index f16ef88..adff244 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -19,6 +19,8 @@
 from oslo_serialization import jsonutils as json
 from six.moves.urllib import parse as urllib
 
+from tempest.lib.api_schema.response.compute.v2_1 import \
+    security_groups as security_groups_schema
 from tempest.lib.api_schema.response.compute.v2_1 import servers as schema
 from tempest.lib.api_schema.response.compute.v2_16 import servers as schemav216
 from tempest.lib.api_schema.response.compute.v2_19 import servers as schemav219
@@ -717,3 +719,16 @@
         http://developer.openstack.org/api-ref-compute-v2.1.html#removeFixedIp
         """
         return self.action(server_id, 'removeFixedIp', **kwargs)
+
+    def list_security_groups_by_server(self, server_id):
+        """Lists security groups for a server.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        http://developer.openstack.org/api-ref-compute-v2.1.html#listSecurityGroupsByServer
+        """
+        resp, body = self.get("servers/%s/os-security-groups" % server_id)
+        body = json.loads(body)
+        self.validate_response(security_groups_schema.list_security_groups,
+                               resp, body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/image/v2/namespace_tags_client.py b/tempest/lib/services/image/v2/namespace_tags_client.py
index ac8b569..a7f8c39 100644
--- a/tempest/lib/services/image/v2/namespace_tags_client.py
+++ b/tempest/lib/services/image/v2/namespace_tags_client.py
@@ -115,5 +115,11 @@
         """
         url = 'metadefs/namespaces/%s/tags' % namespace
         resp, _ = self.delete(url)
-        self.expected_success(200, resp.status)
+
+        # NOTE(rosmaita): Bug 1656183 fixed the success response code for
+        # this call to make it consistent with the other metadefs delete
+        # calls.  Accept both codes in case tempest is being run against
+        # an old Glance.
+        self.expected_success([200, 204], resp.status)
+
         return rest_client.ResponseBody(resp)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 16c8118..e58031b 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -241,8 +241,8 @@
             self.assertEqual(name, volume['display_name'])
         else:
             self.assertEqual(name, volume['name'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         # The volume retrieved on creation has a non-up-to-date status.
         # Retrieval after it becomes active ensures correct details.
         volume = self.volumes_client.show_volume(volume['id'])['volume']
@@ -481,8 +481,9 @@
                 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                                 self.snapshots_client.delete_snapshot,
                                 snapshot_id)
-                waiters.wait_for_snapshot_status(self.snapshots_client,
-                                                 snapshot_id, 'available')
+                waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                        snapshot_id,
+                                                        'available')
         image_name = snapshot_image['name']
         self.assertEqual(name, image_name)
         LOG.debug("Created snapshot image %s for server %s",
@@ -494,16 +495,16 @@
             server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
             % CONF.compute.volume_device_name)['volumeAttachment']
         self.assertEqual(volume_to_attach['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'in-use')
 
         # Return the updated volume after the attachment
         return self.volumes_client.show_volume(volume['id'])['volume']
 
     def nova_volume_detach(self, server, volume):
         self.servers_client.detach_volume(server['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         volume = self.volumes_client.show_volume(volume['id'])['volume']
         self.assertEqual('available', volume['status'])
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f8e7742..1196659 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -39,7 +39,6 @@
     def setup_clients(cls):
         super(TestNetworkAdvancedServerOps, cls).setup_clients()
         cls.admin_servers_client = cls.os_adm.servers_client
-        cls.admin_hosts_client = cls.os_adm.hosts_client
 
     @classmethod
     def skip_checks(cls):
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 8661217..716c0bf 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -64,10 +64,10 @@
         self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                         snapshot['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
-        waiters.wait_for_snapshot_status(self.snapshots_client,
-                                         snapshot['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                snapshot['id'], 'available')
         if 'display_name' in snapshot:
             self.assertEqual(snapshot_name, snapshot['display_name'])
         else:
@@ -88,6 +88,7 @@
                                           CONF.compute.build_interval):
             raise lib_exc.TimeoutException
 
+    @decorators.skip_because(bug="1664793")
     @decorators.idempotent_id('10fd234a-515c-41e5-b092-8323060598c5')
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 43dcf96..9c33b71 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -82,8 +82,8 @@
         self.addCleanup(
             self.snapshots_client.wait_for_resource_deletion, snap['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snap['id'])
-        waiters.wait_for_snapshot_status(self.snapshots_client,
-                                         snap['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                snap['id'], 'available')
 
         # NOTE(e0ne): Cinder API v2 uses name instead of display_name
         if 'display_name' in snap:
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index a1da343..6345728 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -48,10 +48,14 @@
 
     @mock.patch.multiple(compute_base.BaseV2ComputeTest,
                          compute_images_client=mock.DEFAULT,
+                         servers_client=mock.DEFAULT,
                          images=[], create=True)
     @mock.patch.object(waiters, 'wait_for_image_status')
+    @mock.patch.object(waiters, 'wait_for_server_status')
     def test_create_image_from_server_wait_until_active(self,
+                                                        wait_for_server_status,
                                                         wait_for_image_status,
+                                                        servers_client,
                                                         compute_images_client):
         """Tests create_image_from_server with wait_until='ACTIVE' kwarg."""
         # setup mocks
@@ -67,6 +71,35 @@
         # make our assertions
         wait_for_image_status.assert_called_once_with(
             compute_images_client, image_id, 'ACTIVE')
+        wait_for_server_status.assert_called_once_with(
+            servers_client, mock.sentinel.server_id, 'ACTIVE')
+        compute_images_client.show_image.assert_called_once_with(image_id)
+
+    @mock.patch.multiple(compute_base.BaseV2ComputeTest,
+                         compute_images_client=mock.DEFAULT,
+                         servers_client=mock.DEFAULT,
+                         images=[], create=True)
+    @mock.patch.object(waiters, 'wait_for_image_status')
+    @mock.patch.object(waiters, 'wait_for_server_status')
+    def test_create_image_from_server_wait_until_active_no_server_wait(
+            self, wait_for_server_status, wait_for_image_status,
+            servers_client, compute_images_client):
+        """Tests create_image_from_server with wait_until='ACTIVE' kwarg."""
+        # setup mocks
+        image_id = uuidutils.generate_uuid()
+        fake_image = mock.Mock(response={'location': image_id})
+        compute_images_client.create_image.return_value = fake_image
+        compute_images_client.show_image.return_value = (
+            {'image': fake_image})
+        # call the utility method
+        image = compute_base.BaseV2ComputeTest.create_image_from_server(
+            mock.sentinel.server_id, wait_until='ACTIVE',
+            wait_for_server=False)
+        self.assertEqual(fake_image, image)
+        # make our assertions
+        wait_for_image_status.assert_called_once_with(
+            compute_images_client, image_id, 'ACTIVE')
+        self.assertEqual(0, wait_for_server_status.call_count)
         compute_images_client.show_image.assert_called_once_with(image_id)
 
     @mock.patch.multiple(compute_base.BaseV2ComputeTest,
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 46f9526..c2f622c 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -66,7 +66,7 @@
         client.show_volume = mock_show
         volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
         self.assertRaises(exceptions.VolumeRestoreErrorException,
-                          waiters.wait_for_volume_status,
+                          waiters.wait_for_volume_resource_status,
                           client, volume_id, 'available')
         mock_show.assert_has_calls([mock.call(volume_id),
                                     mock.call(volume_id)])
diff --git a/tempest/tests/lib/services/compute/test_servers_client.py b/tempest/tests/lib/services/compute/test_servers_client.py
index adfaaf2..b563ab2 100644
--- a/tempest/tests/lib/services/compute/test_servers_client.py
+++ b/tempest/tests/lib/services/compute/test_servers_client.py
@@ -172,6 +172,14 @@
         "traceback": "fake-trace-back"
     }
 
+    FAKE_SECURITY_GROUPS = [{
+        "description": "default",
+        "id": "3fb26eb3-581b-4420-9963-b0879a026506",
+        "name": "default",
+        "rules": [],
+        "tenant_id": "openstack"
+    }]
+
     FAKE_INSTANCE_WITH_EVENTS = copy.deepcopy(FAKE_INSTANCE_ACTIONS)
     FAKE_INSTANCE_WITH_EVENTS['events'] = [FAKE_INSTANCE_ACTION_EVENTS]
 
@@ -1009,3 +1017,17 @@
             server_id=self.server_id,
             type='fake-console-type'
             )
+
+    def test_list_security_groups_by_server_with_str_body(self):
+        self._test_list_security_groups_by_server()
+
+    def test_list_security_groups_by_server_with_bytes_body(self):
+        self._test_list_security_groups_by_server(True)
+
+    def _test_list_security_groups_by_server(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_security_groups_by_server,
+            'tempest.lib.common.rest_client.RestClient.get',
+            {'security_groups': self.FAKE_SECURITY_GROUPS},
+            server_id=self.server_id,
+            )