Merge "Add missing @test.attr(type='negative') for negative tests"
diff --git a/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml b/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
index 484d543..d07448a 100644
--- a/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
+++ b/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
@@ -4,7 +4,7 @@
     plugins to declare and automatically register any service client defined
     in the plugin.
   - tempest.lib exposes a new stable interface, the clients module and
-    ServiceClients class, which provides a convinient way for plugin tests to
+    ServiceClients class, which provides a convenient way for plugin tests to
     access service clients defined in Tempest as well as service clients
     defined in all loaded plugins.
     The new ServiceClients class only exposes for now the service clients
diff --git a/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml b/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
index 543cf7b..52c04af 100644
--- a/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
+++ b/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
@@ -2,4 +2,4 @@
 deprecations:
   - The ``call_until_true`` function is moved from the ``tempest.test`` module
    to the ``tempest.lib.common.utils.test_utils`` module. Backward
-   compatibilty is preserved until Ocata.
+   compatibility is preserved until Ocata.
diff --git a/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml b/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
index 20f310d..813e47f 100644
--- a/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
+++ b/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
@@ -1,5 +1,5 @@
 ---
 upgrade:
-  - the already depreacted tempest-cleanup standalone command has been
+  - the already deprecated tempest-cleanup standalone command has been
     removed. The corresponding functionalities can be accessed through
     the unified `tempest` command (`tempest cleanup`).
diff --git a/releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml b/releasenotes/notes/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
similarity index 100%
rename from releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
rename to releasenotes/notes/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
diff --git a/releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml b/releasenotes/notes/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
similarity index 100%
rename from releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml
rename to releasenotes/notes/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
diff --git a/releasenotes/notes/add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml b/releasenotes/notes/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
similarity index 100%
rename from releasenotes/notes/add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
rename to releasenotes/notes/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
diff --git a/releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml b/releasenotes/notes/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
similarity index 100%
rename from releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
rename to releasenotes/notes/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
diff --git a/releasenotes/notes/deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml b/releasenotes/notes/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
rename to releasenotes/notes/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
diff --git a/releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml b/releasenotes/notes/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
rename to releasenotes/notes/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
diff --git a/releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml b/releasenotes/notes/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
rename to releasenotes/notes/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
diff --git a/releasenotes/notes/jsonschema-validator-2377ba131e12d3c7.yaml b/releasenotes/notes/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
similarity index 100%
rename from releasenotes/notes/jsonschema-validator-2377ba131e12d3c7.yaml
rename to releasenotes/notes/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
diff --git a/releasenotes/notes/remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml b/releasenotes/notes/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
diff --git a/releasenotes/notes/remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml b/releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
diff --git a/releasenotes/notes/remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml b/releasenotes/notes/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
diff --git a/releasenotes/notes/remove-deprecated-network-config-options-f9ce276231578fe6.yaml b/releasenotes/notes/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
similarity index 100%
rename from releasenotes/notes/remove-deprecated-network-config-options-f9ce276231578fe6.yaml
rename to releasenotes/notes/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
diff --git a/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml b/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml
new file mode 100644
index 0000000..5555949
--- /dev/null
+++ b/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml
@@ -0,0 +1,13 @@
+---
+prelude: >
+    This release is marking the start of Ocata release support in Tempest
+other:
+  - |
+    OpenStack releases supported at this time are **Mitaka**, **Newton**,
+    and **Ocata**.
+
+    The release under current development as of this tag is Pike,
+    meaning that every Tempest commit is also tested against master during
+    the Pike cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Pike (or future releases)
+    cloud.
diff --git a/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml b/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
new file mode 100644
index 0000000..67f9541
--- /dev/null
+++ b/releasenotes/notes/add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add the list security groups by server API to the servers_client
+    library. This feature enables the possibility to list security
+    groups for a server instance.
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 0a9db46..c3c88a5 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -101,7 +101,7 @@
             # check some extensions for the flavor create/show/detail response
             self.assertEqual(flavor['swap'], '')
             self.assertEqual(int(flavor['rxtx_factor']), 1)
-            self.assertEqual(int(flavor['OS-FLV-EXT-DATA:ephemeral']), 0)
+            self.assertEqual(flavor['OS-FLV-EXT-DATA:ephemeral'], 0)
             self.assertEqual(flavor['os-flavor-access:is_public'], True)
 
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 39797f7..3ffd238 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -45,7 +45,6 @@
     def setup_clients(cls):
         super(LiveBlockMigrationTestJSON, cls).setup_clients()
         cls.admin_hosts_client = cls.os_adm.hosts_client
-        cls.admin_servers_client = cls.os_adm.servers_client
         cls.admin_migration_client = cls.os_adm.migrations_client
 
     @classmethod
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 21f5c68..aa75348 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -31,7 +31,6 @@
         super(MigrationsAdminTest, cls).setup_clients()
         cls.client = cls.os_adm.migrations_client
         cls.flavors_admin_client = cls.os_adm.flavors_client
-        cls.admin_servers_client = cls.os_adm.servers_client
 
     @decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
     def test_list_migrations(self):
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index b53ced9..5220c97 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -66,7 +66,7 @@
         flavor_id = self._get_unused_flavor_id()
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
-        ram = int(quota_set['ram'])
+        ram = quota_set['ram']
         if ram == -1:
             raise self.skipException("ram quota set is -1,"
                                      " cannot test overlimit")
@@ -94,7 +94,7 @@
         flavor_id = self._get_unused_flavor_id()
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
-        vcpus = int(quota_set['cores'])
+        vcpus = quota_set['cores']
         if vcpus == -1:
             raise self.skipException("cores quota set is -1,"
                                      " cannot test overlimit")
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 5f2444a..45472df 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -38,12 +38,6 @@
         if not CONF.compute_feature_enabled.swap_volume:
             raise cls.skipException("Swapping volumes is not supported.")
 
-    @classmethod
-    def setup_clients(cls):
-        super(TestVolumeSwap, cls).setup_clients()
-        # We need the admin client for performing the update (swap) volume call
-        cls.servers_admin_client = cls.os_adm.servers_client
-
     @decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
     @test.services('volume')
     def test_volume_swap(self):
@@ -58,12 +52,12 @@
         # Attach "volume1" to server
         self.attach_volume(server, volume1)
         # Swap volume from "volume1" to "volume2"
-        self.servers_admin_client.update_attached_volume(
+        self.admin_servers_client.update_attached_volume(
             server['id'], volume1['id'], volumeId=volume2['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume1['id'], 'available')
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume2['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume1['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume2['id'], 'in-use')
         self.addCleanup(self.servers_client.detach_volume,
                         server['id'], volume2['id'])
         # Verify "volume2" is attached to the server
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 615785b..905bc3d 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -32,11 +32,6 @@
             raise cls.skipException(skip_msg)
 
     @classmethod
-    def setup_clients(cls):
-        super(VolumesAdminNegativeTest, cls).setup_clients()
-        cls.servers_admin_client = cls.os_adm.servers_client
-
-    @classmethod
     def resource_setup(cls):
         super(VolumesAdminNegativeTest, cls).resource_setup()
         cls.server = cls.create_test_server(wait_until='ACTIVE')
@@ -47,7 +42,7 @@
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
-                          self.servers_admin_client.update_attached_volume,
+                          self.admin_servers_client.update_attached_volume,
                           self.server['id'], nonexistent_volume,
                           volumeId=volume['id'])
 
@@ -60,6 +55,6 @@
 
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.BadRequest,
-                          self.servers_admin_client.update_attached_volume,
+                          self.admin_servers_client.update_attached_volume,
                           self.server['id'], volume['id'],
                           volumeId=nonexistent_volume)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index c636894..706b859 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -326,6 +326,10 @@
                     raise
             image = cls.compute_images_client.show_image(image_id)['image']
 
+            if kwargs['wait_until'] == 'ACTIVE':
+                if kwargs.get('wait_for_server', True):
+                    waiters.wait_for_server_status(cls.servers_client,
+                                                   server_id, 'ACTIVE')
         return image
 
     @classmethod
@@ -406,8 +410,8 @@
             kwargs['imageRef'] = image_ref
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
         cls.volumes.append(volume)
-        waiters.wait_for_volume_status(cls.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.volumes_client,
+                                                volume['id'], 'available')
         return volume
 
     @classmethod
@@ -446,15 +450,15 @@
         # On teardown detach the volume and wait for it to be available. This
         # is so we don't error out when trying to delete the volume during
         # teardown.
-        self.addCleanup(waiters.wait_for_volume_status,
+        self.addCleanup(waiters.wait_for_volume_resource_status,
                         self.volumes_client, volume['id'], 'available')
         # Ignore 404s on detach in case the server is deleted or the volume
         # is already detached.
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.servers_client.detach_volume,
                         server['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'in-use')
 
 
 class BaseV2ComputeAdminTest(BaseV2ComputeTest):
@@ -468,6 +472,7 @@
         cls.availability_zone_admin_client = (
             cls.os_adm.availability_zone_client)
         cls.admin_flavors_client = cls.os_adm.flavors_client
+        cls.admin_servers_client = cls.os_adm.servers_client
 
     def create_flavor(self, ram, vcpus, disk, name=None,
                       is_public='True', **kwargs):
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index d9db0b5..a0c860a 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -60,6 +60,7 @@
         snapshot_name = data_utils.rand_name('test-snap')
         image = self.create_image_from_server(server['id'],
                                               name=snapshot_name,
-                                              wait_until='ACTIVE')
+                                              wait_until='ACTIVE',
+                                              wait_for_server=False)
         self.addCleanup(self.client.delete_image, image['id'])
         self.assertEqual(snapshot_name, image['name'])
diff --git a/tempest/api/compute/limits/test_absolute_limits_negative.py b/tempest/api/compute/limits/test_absolute_limits_negative.py
index b9ae0c6..21b4b1c 100644
--- a/tempest/api/compute/limits/test_absolute_limits_negative.py
+++ b/tempest/api/compute/limits/test_absolute_limits_negative.py
@@ -41,11 +41,11 @@
         max_meta = limits['absolute']['maxImageMeta']
 
         # No point in running this test if there is no limit.
-        if int(max_meta) == -1:
+        if max_meta == -1:
             raise self.skipException('no limit for maxImageMeta')
 
         # Create server should fail, since we are passing > metadata Limit!
-        max_meta_data = int(max_meta) + 1
+        max_meta_data = max_meta + 1
 
         meta_data = {}
         for xx in range(max_meta_data):
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index e070336..349bfda 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -144,3 +144,31 @@
                          ['security_group'])
         self.assertEqual(s_new_name, fetched_group['name'])
         self.assertEqual(s_new_des, fetched_group['description'])
+
+    @test.idempotent_id('79517d60-535a-438f-af3d-e6feab1cbea7')
+    @test.services('network')
+    def test_list_security_groups_by_server(self):
+        # Create a couple security groups that we will use
+        # for the server resource this test creates
+        sg = self.create_security_group()
+        sg2 = self.create_security_group()
+        assigned_security_groups_ids = [sg['id'], sg2['id']]
+        # Create server and add the security group created
+        # above to the server we just created
+        server_id = self.create_test_server(wait_until='ACTIVE')['id']
+        # add security groups to server
+        self.servers_client.add_security_group(server_id, name=sg['name'])
+        self.servers_client.add_security_group(server_id, name=sg2['name'])
+
+        # list security groups for a server
+        fetched_groups = (
+            self.servers_client.list_security_groups_by_server(
+                server_id)['security_groups'])
+        fetched_security_groups_ids = [i['id'] for i in fetched_groups]
+        # verifying the security groups ids in list
+        missing_security_groups =\
+            [p for p in assigned_security_groups_ids
+             if p not in fetched_security_groups_ids]
+        self.assertEmpty(missing_security_groups,
+                         "Failed to find security_groups %s in fetched list" %
+                         ', '.join(missing_security_groups))
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 9bba733..e0c8887 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -79,7 +79,6 @@
 
     def _check_interface(self, iface, port_id=None, network_id=None,
                          fixed_ip=None, mac_addr=None):
-        self.assertIn('port_state', iface)
         if port_id:
             self.assertEqual(iface['port_id'], port_id)
         if network_id:
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 83b2e1b..8ed55e0 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -115,8 +115,8 @@
 
         self.client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.client, server['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
 
 class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 04fe11f..3f6abab 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -1,4 +1,4 @@
-# Copyright 2016 OpenStack Foundation
+# Copyright 2016-2017 OpenStack Foundation
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -26,6 +26,11 @@
 
 CONF = config.CONF
 
+if six.PY2:
+    ord_func = ord
+else:
+    ord_func = int
+
 
 class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
 
@@ -60,14 +65,19 @@
         resp = urllib3.PoolManager().request('GET', vnc_url)
         # Make sure that the GET request was accepted by the novncproxy
         self.assertEqual(resp.status, 200, 'Got a Bad HTTP Response on the '
-                         'initial call: ' + str(resp.status))
+                         'initial call: ' + six.text_type(resp.status))
         # Do some basic validation to make sure it is an expected HTML document
-        self.assertTrue('<html>' in resp.data and '</html>' in resp.data,
-                        'Not a valid html document in the response.')
+        resp_data = resp.data.decode()
+        self.assertIn('<html>', resp_data,
+                      'Not a valid html document in the response.')
+        self.assertIn('</html>', resp_data,
+                      'Not a valid html document in the response.')
         # Just try to make sure we got JavaScript back for noVNC, since we
         # won't actually use it since not inside of a browser
-        self.assertTrue('noVNC' in resp.data and '<script' in resp.data,
-                        'Not a valid noVNC javascript html document.')
+        self.assertIn('noVNC', resp_data,
+                      'Not a valid noVNC javascript html document.')
+        self.assertIn('<script', resp_data,
+                      'Not a valid noVNC javascript html document.')
 
     def _validate_rfb_negotiation(self):
         """Verify we can connect to novnc and do the websocket connection."""
@@ -82,14 +92,14 @@
                                    int(data[8:11], base=10)))
         self.assertTrue(version >= 3.3, 'Bad RFB Version: ' + str(version))
         # Send our RFB version to the server, which we will just go with 3.3
-        self._websocket.send_frame(str(data))
+        self._websocket.send_frame(data)
         # Get the sever authentication type and make sure None is supported
         data = self._websocket.receive_frame()
         self.assertIsNotNone(data, 'Expected authentication type None.')
         self.assertGreaterEqual(
             len(data), 2, 'Expected authentication type None.')
         self.assertIn(
-            1, [ord(data[i + 1]) for i in range(ord(data[0]))],
+            1, [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
             'Expected authentication type None.')
         # Send to the server that we only support authentication type None
         self._websocket.send_frame(six.int2byte(1))
@@ -98,7 +108,7 @@
         self.assertEqual(
             len(data), 4, 'Server did not think security was successful.')
         self.assertEqual(
-            [ord(i) for i in data], [0, 0, 0, 0],
+            [ord_func(i) for i in data], [0, 0, 0, 0],
             'Server did not think security was successful.')
         # Say to leave the desktop as shared as part of client initialization
         self._websocket.send_frame(six.int2byte(1))
@@ -121,12 +131,12 @@
 
     def _validate_websocket_upgrade(self):
         self.assertTrue(
-            self._websocket.response.startswith('HTTP/1.1 101 Switching '
-                                                'Protocols\r\n'),
+            self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
+                                                b'Protocols\r\n'),
             'Did not get the expected 101 on the websockify call: '
-            + str(len(self._websocket.response)))
+            + six.text_type(self._websocket.response))
         self.assertTrue(
-            self._websocket.response.find('Server: WebSockify') > 0,
+            self._websocket.response.find(b'Server: WebSockify') > 0,
             'Did not get the expected WebSocket HTTP Response.')
 
     def _create_websocket(self, url):
@@ -187,8 +197,8 @@
             # frames less than 125 bytes here (for the negotiation) and
             # that only the 2nd byte contains the length, and since the
             # server doesn't do masking, we can just read the data length
-            if ord(header[1]) & 127 > 0:
-                return self._socket.recv(ord(header[1]) & 127)
+            if ord_func(header[1]) & 127 > 0:
+                return self._socket.recv(ord_func(header[1]) & 127)
 
     def send_frame(self, data):
         """Wrapper for sending data to add in the WebSocket frame format."""
@@ -205,7 +215,7 @@
             frame_bytes.append(mask[i])
         # Mask each of the actual data bytes that we are going to send
         for i in range(len(data)):
-            frame_bytes.append(ord(data[i]) ^ mask[i % 4])
+            frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
         # Convert our integer list to a binary array of bytes
         frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
         self._socket.sendall(frame_bytes)
@@ -233,9 +243,9 @@
         # We are choosing to use binary even though browser may do Base64
         reqdata += 'Sec-WebSocket-Protocol: binary\r\n\r\n'
         # Send the HTTP GET request and get the response back
-        self._socket.sendall(reqdata)
+        self._socket.sendall(reqdata.encode('utf8'))
         self.response = data = self._socket.recv(4096)
         # Loop through & concatenate all of the data in the response body
-        while len(data) > 0 and self.response.find('\r\n\r\n') < 0:
+        while len(data) > 0 and self.response.find(b'\r\n\r\n') < 0:
             data = self._socket.recv(4096)
             self.response += data
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 957d24a..90b9da4 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -97,7 +97,7 @@
         max_file_limit = limits['absolute']['maxPersonality']
         if max_file_limit == -1:
             raise self.skipException("No limit for personality files")
-        for i in range(0, int(max_file_limit) + 1):
+        for i in range(0, max_file_limit + 1):
             path = 'etc/test' + str(i) + '.txt'
             personality.append({'path': path,
                                 'contents': base64.encode_as_text(
@@ -117,7 +117,7 @@
         if max_file_limit == -1:
             raise self.skipException("No limit for personality files")
         person = []
-        for i in range(0, int(max_file_limit)):
+        for i in range(0, max_file_limit):
             # NOTE(andreaf) The cirros disk image is blank before boot
             # so we can only inject safely to /
             path = '/test' + str(i) + '.txt'
diff --git a/tempest/api/compute/test_live_block_migration_negative.py b/tempest/api/compute/test_live_block_migration_negative.py
index 40d0746..01fd9ef 100644
--- a/tempest/api/compute/test_live_block_migration_negative.py
+++ b/tempest/api/compute/test_live_block_migration_negative.py
@@ -31,11 +31,6 @@
         if not CONF.compute_feature_enabled.live_migration:
             raise cls.skipException("Live migration is not enabled")
 
-    @classmethod
-    def setup_clients(cls):
-        super(LiveBlockMigrationNegativeTestJSON, cls).setup_clients()
-        cls.admin_servers_client = cls.os_adm.servers_client
-
     def _migrate_server_to(self, server_id, dest_host):
         bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
         self.admin_servers_client.live_migrate_server(
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index cbe7178..5304944 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -64,8 +64,8 @@
     def _detach_volume(self, server_id, volume_id):
         try:
             self.servers_client.detach_volume(server_id, volume_id)
-            waiters.wait_for_volume_status(self.volumes_client,
-                                           volume_id, 'available')
+            waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                    volume_id, 'available')
         except lib_exc.NotFound:
             LOG.warning("Unable to detach volume %s from server %s "
                         "possibly it was already detached", volume_id,
@@ -78,8 +78,8 @@
             kwargs.update({'device': '/dev/%s' % device})
         attachment = self.servers_client.attach_volume(
             server_id, **kwargs)['volumeAttachment']
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume_id, 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume_id, 'in-use')
         self.addCleanup(self._detach_volume, server_id,
                         volume_id)
 
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index 3d5d23b..4b06867 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -54,9 +54,9 @@
             display_name=s_name)['snapshot']
 
         def delete_snapshot(snapshot_id):
-            waiters.wait_for_snapshot_status(self.snapshots_client,
-                                             snapshot_id,
-                                             'available')
+            waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                    snapshot_id,
+                                                    'available')
             # Delete snapshot
             self.snapshots_client.delete_snapshot(snapshot_id)
             self.snapshots_client.wait_for_resource_deletion(snapshot_id)
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 63c247e..0eaa359 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -57,7 +57,8 @@
         self.assertIsNotNone(volume['id'],
                              "Field volume id is empty or not found.")
         # Wait for Volume status to become ACTIVE
-        waiters.wait_for_volume_status(self.client, volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client, volume['id'],
+                                                'available')
         # GET Volume
         fetched_volume = self.client.show_volume(volume['id'])['volume']
         # Verification of details of fetched Volume
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 56b3517..2812c68 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -164,7 +164,6 @@
         cls.client.store_image_file(image['id'], data=image_file)
         # Keep the data of one test image so it can be used to filter lists
         cls.test_data = image
-        cls.test_data['size'] = size
 
         return image['id']
 
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 1b97e4a..c3e904a 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -74,8 +74,8 @@
         else:
             cls.volume_id_list_without_prefix.append(
                 cls.volume['id'])
-        waiters.wait_for_volume_status(cls.admin_volume_client,
-                                       cls.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.admin_volume_client,
+                                                cls.volume['id'], 'available')
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 5a83ae3..83fca45 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -114,7 +114,7 @@
         volume_default = quota_set_default['volumes']
 
         self.admin_quotas_client.update_quota_set(
-            project_id, volumes=(int(volume_default) + 5))
+            project_id, volumes=(volume_default + 5))
 
         self.admin_quotas_client.delete_quota_set(project_id)
         quota_set_new = (self.admin_quotas_client.show_quota_set(project_id)
@@ -146,7 +146,7 @@
             transfer_id, auth_key=auth_key)['transfer']
 
         # Verify volume transferred is available
-        waiters.wait_for_volume_status(
+        waiters.wait_for_volume_resource_status(
             self.alt_client, volume['id'], 'available')
 
         # List of tenants quota usage post transfer
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 213723c..5d08416 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -58,14 +58,14 @@
                          "to the requested name")
         self.assertIsNotNone(volume['id'],
                              "Field volume id is empty or not found.")
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Update volume with new volume_type
         self.volumes_client.retype_volume(volume['id'],
                                           new_type=volume_types[1]['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Get volume details and Verify
         fetched_volume = self.volumes_client.show_volume(
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 1dce7e0..5f590bc 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -17,6 +17,7 @@
 from tempest.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
+from tempest import test
 
 
 class ExtraSpecsNegativeV2Test(base.BaseVolumeAdminTest):
@@ -27,6 +28,7 @@
         extra_specs = {"spec1": "val1"}
         cls.volume_type = cls.create_volume_type(extra_specs=extra_specs)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('08961d20-5cbb-4910-ac0f-89ad6dbb2da1')
     def test_update_no_body(self):
         # Should not update volume type extra specs with no body
@@ -35,6 +37,7 @@
             self.admin_volume_types_client.update_volume_type_extra_specs,
             self.volume_type['id'], "spec1", None)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('25e5a0ee-89b3-4c53-8310-236f76c75365')
     def test_update_nonexistent_extra_spec_id(self):
         # Should not update volume type extra specs with nonexistent id.
@@ -45,6 +48,7 @@
             self.volume_type['id'], data_utils.rand_uuid(),
             extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('9bf7a657-b011-4aec-866d-81c496fbe5c8')
     def test_update_none_extra_spec_id(self):
         # Should not update volume type extra specs with none id.
@@ -54,6 +58,7 @@
             self.admin_volume_types_client.update_volume_type_extra_specs,
             self.volume_type['id'], None, extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('a77dfda2-9100-448e-9076-ed1711f4bdfc')
     def test_update_multiple_extra_spec(self):
         # Should not update volume type extra specs with multiple specs as
@@ -65,6 +70,7 @@
             self.volume_type['id'], list(extra_spec)[0],
             extra_spec)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
     def test_create_nonexistent_type_id(self):
         # Should not create volume type extra spec for nonexistent volume
@@ -75,6 +81,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             data_utils.rand_uuid(), extra_specs)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('c821bdc8-43a4-4bf4-86c8-82f3858d5f7d')
     def test_create_none_body(self):
         # Should not create volume type extra spec for none POST body.
@@ -83,6 +90,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             self.volume_type['id'], None)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('bc772c71-1ed4-4716-b945-8b5ed0f15e87')
     def test_create_invalid_body(self):
         # Should not create volume type extra spec for invalid POST body.
@@ -91,6 +99,7 @@
             self.admin_volume_types_client.create_volume_type_extra_specs,
             self.volume_type['id'], extra_specs=['invalid'])
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
     def test_delete_nonexistent_volume_type_id(self):
         # Should not delete volume type extra spec for nonexistent
@@ -100,6 +109,7 @@
             self.admin_volume_types_client.delete_volume_type_extra_specs,
             data_utils.rand_uuid(), "spec1")
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('dee5cf0c-cdd6-4353-b70c-e847050d71fb')
     def test_list_nonexistent_volume_type_id(self):
         # Should not list volume type extra spec for nonexistent type id.
@@ -108,6 +118,7 @@
             self.admin_volume_types_client.list_volume_types_extra_specs,
             data_utils.rand_uuid())
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('9f402cbd-1838-4eb4-9554-126a6b1908c9')
     def test_get_nonexistent_volume_type_id(self):
         # Should not get volume type extra spec for nonexistent type id.
@@ -116,6 +127,7 @@
             self.admin_volume_types_client.show_volume_type_extra_specs,
             data_utils.rand_uuid(), "spec1")
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('c881797d-12ff-4f1a-b09d-9f6212159753')
     def test_get_nonexistent_extra_spec_id(self):
         # Should not get volume type extra spec for nonexistent extra spec
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 04d27ea..13b7384 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -94,8 +94,9 @@
         self.addCleanup(self._delete_backup, new_id)
         self.assertIn("id", import_backup)
         self.assertEqual(new_id, import_backup['id'])
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       import_backup['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                import_backup['id'],
+                                                'available')
 
         # Verify Import Backup
         backups = self.admin_backups_client.list_backups(
@@ -108,14 +109,16 @@
         self.addCleanup(self.admin_volume_client.delete_volume,
                         restore['volume_id'])
         self.assertEqual(backup['id'], restore['backup_id'])
-        waiters.wait_for_volume_status(self.admin_volume_client,
-                                       restore['volume_id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_volume_client,
+                                                restore['volume_id'],
+                                                'available')
 
         # Verify if restored volume is there in volume list
         volumes = self.admin_volume_client.list_volumes()['volumes']
         self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       import_backup['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                import_backup['id'],
+                                                'available')
 
     @decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94')
     def test_volume_backup_reset_status(self):
@@ -131,8 +134,8 @@
         # Reset backup status to error
         self.admin_backups_client.reset_backup_status(backup_id=backup['id'],
                                                       status="error")
-        waiters.wait_for_backup_status(self.admin_backups_client,
-                                       backup['id'], 'error')
+        waiters.wait_for_volume_resource_status(self.admin_backups_client,
+                                                backup['id'], 'error')
 
 
 class VolumesBackupsAdminV1Test(VolumesBackupsAdminV2Test):
diff --git a/tempest/api/volume/admin/v2/test_snapshot_manage.py b/tempest/api/volume/admin/v2/test_snapshot_manage.py
index 1114924..eed7dd1 100644
--- a/tempest/api/volume/admin/v2/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/v2/test_snapshot_manage.py
@@ -65,9 +65,9 @@
                         self.admin_snapshots_client, new_snapshot['id'])
 
         # Wait for the snapshot to be available after manage operation
-        waiters.wait_for_snapshot_status(self.admin_snapshots_client,
-                                         new_snapshot['id'],
-                                         'available')
+        waiters.wait_for_volume_resource_status(self.admin_snapshots_client,
+                                                new_snapshot['id'],
+                                                'available')
 
         # Verify the managed snapshot has the expected parent volume
         self.assertEqual(new_snapshot['volume_id'], volume['id'])
diff --git a/tempest/api/volume/admin/v2/test_volumes_list.py b/tempest/api/volume/admin/v2/test_volumes_list.py
index b0a37fb..6bab373 100644
--- a/tempest/api/volume/admin/v2/test_volumes_list.py
+++ b/tempest/api/volume/admin/v2/test_volumes_list.py
@@ -45,8 +45,8 @@
         # Create a volume in admin tenant
         adm_vol = self.admin_volume_client.create_volume(
             size=CONF.volume.volume_size)['volume']
-        waiters.wait_for_volume_status(self.admin_volume_client,
-                                       adm_vol['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.admin_volume_client,
+                                                adm_vol['id'], 'available')
         self.addCleanup(self.admin_volume_client.delete_volume, adm_vol['id'])
         params = {'all_tenants': 1,
                   'project_id': self.volumes_client.tenant_id}
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 98e050e..f8c435f 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -131,8 +131,8 @@
 
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
         cls.volumes.append(volume)
-        waiters.wait_for_volume_status(cls.volumes_client, volume['id'],
-                                       wait_until)
+        waiters.wait_for_volume_resource_status(cls.volumes_client,
+                                                volume['id'], wait_until)
         return volume
 
     @classmethod
@@ -146,8 +146,8 @@
         snapshot = cls.snapshots_client.create_snapshot(
             volume_id=volume_id, **kwargs)['snapshot']
         cls.snapshots.append(snapshot)
-        waiters.wait_for_snapshot_status(cls.snapshots_client,
-                                         snapshot['id'], 'available')
+        waiters.wait_for_volume_resource_status(cls.snapshots_client,
+                                                snapshot['id'], 'available')
         return snapshot
 
     def create_backup(self, volume_id, backup_client=None, **kwargs):
@@ -158,8 +158,8 @@
         backup = backup_client.create_backup(
             volume_id=volume_id, **kwargs)['backup']
         self.addCleanup(backup_client.delete_backup, backup['id'])
-        waiters.wait_for_backup_status(backup_client, backup['id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(backup_client, backup['id'],
+                                                'available')
         return backup
 
     # NOTE(afazekas): these create_* and clean_* could be defined
@@ -182,10 +182,10 @@
         self.servers_client.attach_volume(
             server_id, volumeId=volume_id,
             device='/dev/%s' % CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume_id, 'in-use')
-        self.addCleanup(waiters.wait_for_volume_status, self.volumes_client,
-                        volume_id, 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume_id, 'in-use')
+        self.addCleanup(waiters.wait_for_volume_resource_status,
+                        self.volumes_client, volume_id, 'available')
         self.addCleanup(self.servers_client.detach_volume, server_id,
                         volume_id)
 
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 5477770..9f63b14 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -43,8 +43,8 @@
             volume_id=volume['id'])['transfer']
         transfer_id = transfer['id']
         auth_key = transfer['auth_key']
-        waiters.wait_for_volume_status(self.client,
-                                       volume['id'], 'awaiting-transfer')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'awaiting-transfer')
 
         # Get a volume transfer
         body = self.client.show_volume_transfer(transfer_id)['transfer']
@@ -58,8 +58,8 @@
         # Accept a volume transfer by alt_tenant
         body = self.alt_client.accept_volume_transfer(
             transfer_id, auth_key=auth_key)['transfer']
-        waiters.wait_for_volume_status(self.alt_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.alt_client,
+                                                volume['id'], 'available')
 
     @decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
     def test_create_list_delete_volume_transfer(self):
@@ -71,8 +71,8 @@
         body = self.client.create_volume_transfer(
             volume_id=volume['id'])['transfer']
         transfer_id = body['id']
-        waiters.wait_for_volume_status(self.client,
-                                       volume['id'], 'awaiting-transfer')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'awaiting-transfer')
 
         # List all volume transfers (looking for the one we created)
         body = self.client.list_volume_transfers()['transfers']
@@ -84,7 +84,8 @@
 
         # Delete a volume transfer
         self.client.delete_volume_transfer(transfer_id)
-        waiters.wait_for_volume_status(self.client, volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(
+            self.client, volume['id'], 'available')
 
 
 class VolumesV1TransfersTest(VolumesV2TransfersTest):
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index c0cc74d..0a6901c 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -60,11 +60,11 @@
                                   instance_uuid=server['id'],
                                   mountpoint='/dev/%s' %
                                              CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'in-use')
         self.client.detach_volume(self.volume['id'])
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'available')
 
     @decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
     def test_volume_bootable(self):
@@ -91,11 +91,10 @@
                                   instance_uuid=server['id'],
                                   mountpoint='/dev/%s' %
                                              CONF.compute.volume_device_name)
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'in-use')
-        self.addCleanup(waiters.wait_for_volume_status, self.client,
-                        self.volume['id'],
-                        'available')
+        waiters.wait_for_volume_resource_status(self.client, self.volume['id'],
+                                                'in-use')
+        self.addCleanup(waiters.wait_for_volume_resource_status, self.client,
+                        self.volume['id'], 'available')
         self.addCleanup(self.client.detach_volume, self.volume['id'])
         volume = self.client.show_volume(self.volume['id'])['volume']
         self.assertIn('attachments', volume)
@@ -124,8 +123,8 @@
                         self.image_client.delete_image,
                         image_id)
         waiters.wait_for_image_status(self.image_client, image_id, 'active')
-        waiters.wait_for_volume_status(self.client,
-                                       self.volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.client,
+                                                self.volume['id'], 'available')
 
     @decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
     def test_reserve_unreserve_volume(self):
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 939f1ac..e664ff7 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -40,11 +40,11 @@
         self.addCleanup(self.volumes_client.delete_volume,
                         restored_volume['volume_id'])
         self.assertEqual(backup_id, restored_volume['backup_id'])
-        waiters.wait_for_backup_status(self.backups_client,
-                                       backup_id, 'available')
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       restored_volume['volume_id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(self.backups_client,
+                                                backup_id, 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                restored_volume['volume_id'],
+                                                'available')
         return restored_volume
 
     @decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
@@ -60,8 +60,8 @@
                                     name=backup_name,
                                     description=description)
         self.assertEqual(backup_name, backup['name'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         # Get a given backup
         backup = self.backups_client.show_backup(backup['id'])['backup']
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 2378790..3df9b00 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -27,8 +27,8 @@
         extend_size = volume['size'] + 1
         self.volumes_client.extend_volume(volume['id'],
                                           new_size=extend_size)
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         volume = self.volumes_client.show_volume(volume['id'])['volume']
         self.assertEqual(volume['size'], extend_size)
 
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index d1a1c2f..a3e46a8 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -41,8 +41,8 @@
         volume = self.volumes_client.create_volume(**kwargs)['volume']
         self.assertIn('id', volume)
         self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client, volume['id'],
-                                       'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         self.assertIn(name_field, volume)
         self.assertEqual(volume[name_field], v_name,
                          "The created volume name is not equal "
@@ -106,8 +106,8 @@
         self.assertIn('id', new_volume)
         self.addCleanup(self.delete_volume, self.volumes_client,
                         new_volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       new_volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                new_volume['id'], 'available')
 
         params = {name_field: volume[name_field],
                   descrip_field: volume[descrip_field]}
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 1e68848..a3d91b0 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -47,6 +47,7 @@
                           self.snapshots_client.create_snapshot,
                           volume_id=None, display_name=s_name)
 
+    @test.attr(type=['negative'])
     @decorators.idempotent_id('677863d1-34f9-456d-b6ac-9924f667a7f4')
     def test_volume_from_snapshot_decreasing_size(self):
         # Creates a volume a snapshot passing a size different from the source
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 55bc93e..99da983 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -124,8 +124,9 @@
                   'imageRef': image_id,
                   'size': CONF.volume.volume_size}
         volume = volumes_client.create_volume(**params)
-        waiters.wait_for_volume_status(volumes_client,
-                                       volume['volume']['id'], 'available')
+        waiters.wait_for_volume_resource_status(volumes_client,
+                                                volume['volume']['id'],
+                                                'available')
 
         bd_map_v2 = [{
             'uuid': volume['volume']['id'],
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 15619f4..3e5600c 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -10,7 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
+import re
 import time
 
 from oslo_log import log as logging
@@ -179,25 +179,33 @@
     raise lib_exc.TimeoutException(message)
 
 
-def wait_for_volume_status(client, volume_id, status):
-    """Waits for a Volume to reach a given status."""
-    body = client.show_volume(volume_id)['volume']
-    volume_status = body['status']
+def wait_for_volume_resource_status(client, resource_id, status):
+    """Waits for a volume resource to reach a given status.
+
+    This function is a common function for volume, snapshot and backup
+    resources. The function extracts the name of the desired resource from
+    the client class name of the resource.
+    """
+    resource_name = re.findall(r'(Volume|Snapshot|Backup)',
+                               client.__class__.__name__)[0].lower()
+    show_resource = getattr(client, 'show_' + resource_name)
+    resource_status = show_resource(resource_id)[resource_name]['status']
     start = int(time.time())
 
-    while volume_status != status:
+    while resource_status != status:
         time.sleep(client.build_interval)
-        body = client.show_volume(volume_id)['volume']
-        volume_status = body['status']
-        if volume_status == 'error' and status != 'error':
-            raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
-        if volume_status == 'error_restoring':
-            raise exceptions.VolumeRestoreErrorException(volume_id=volume_id)
+        resource_status = show_resource(resource_id)[
+            '{}'.format(resource_name)]['status']
+        if resource_status == 'error' and resource_status != status:
+            raise exceptions.VolumeResourceBuildErrorException(
+                resource_name=resource_name, resource_id=resource_id)
+        if resource_name == 'volume' and resource_status == 'error_restoring':
+            raise exceptions.VolumeRestoreErrorException(volume_id=resource_id)
 
         if int(time.time()) - start >= client.build_timeout:
-            message = ('Volume %s failed to reach %s status (current %s) '
+            message = ('%s %s failed to reach %s status (current %s) '
                        'within the required time (%s s).' %
-                       (volume_id, status, volume_status,
+                       (resource_name, resource_id, status, resource_status,
                         client.build_timeout))
             raise lib_exc.TimeoutException(message)
 
@@ -221,48 +229,6 @@
             raise lib_exc.TimeoutException(message)
 
 
-def wait_for_snapshot_status(client, snapshot_id, status):
-    """Waits for a Snapshot to reach a given status."""
-    body = client.show_snapshot(snapshot_id)['snapshot']
-    snapshot_status = body['status']
-    start = int(time.time())
-
-    while snapshot_status != status:
-        time.sleep(client.build_interval)
-        body = client.show_snapshot(snapshot_id)['snapshot']
-        snapshot_status = body['status']
-        if snapshot_status == 'error':
-            raise exceptions.SnapshotBuildErrorException(
-                snapshot_id=snapshot_id)
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Snapshot %s failed to reach %s status (current %s) '
-                       'within the required time (%s s).' %
-                       (snapshot_id, status, snapshot_status,
-                        client.build_timeout))
-            raise lib_exc.TimeoutException(message)
-
-
-def wait_for_backup_status(client, backup_id, status):
-    """Waits for a Backup to reach a given status."""
-    body = client.show_backup(backup_id)['backup']
-    backup_status = body['status']
-    start = int(time.time())
-
-    while backup_status != status:
-        time.sleep(client.build_interval)
-        body = client.show_backup(backup_id)['backup']
-        backup_status = body['status']
-        if backup_status == 'error' and backup_status != status:
-            raise lib_exc.VolumeBackupException(backup_id=backup_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Volume backup %s failed to reach %s status '
-                       '(current %s) within the required time (%s s).' %
-                       (backup_id, status, backup_status,
-                        client.build_timeout))
-            raise lib_exc.TimeoutException(message)
-
-
 def wait_for_qos_operations(client, qos_id, operation, args=None):
     """Waits for a qos operations to be completed.
 
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 45bbc11..f48d7ac 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -37,18 +37,15 @@
     message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
 
 
-class VolumeBuildErrorException(exceptions.TempestException):
-    message = "Volume %(volume_id)s failed to build and is in ERROR status"
+class VolumeResourceBuildErrorException(exceptions.TempestException):
+    message = ("%(resource_name)s %(resource_id)s failed to build and is in "
+               "ERROR status")
 
 
 class VolumeRestoreErrorException(exceptions.TempestException):
     message = "Volume %(volume_id)s failed to restore and is in ERROR status"
 
 
-class SnapshotBuildErrorException(exceptions.TempestException):
-    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
-
-
 class StackBuildErrorException(exceptions.TempestException):
     message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
                "due to '%(stack_status_reason)s'")
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index f16ef88..adff244 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -19,6 +19,8 @@
 from oslo_serialization import jsonutils as json
 from six.moves.urllib import parse as urllib
 
+from tempest.lib.api_schema.response.compute.v2_1 import \
+    security_groups as security_groups_schema
 from tempest.lib.api_schema.response.compute.v2_1 import servers as schema
 from tempest.lib.api_schema.response.compute.v2_16 import servers as schemav216
 from tempest.lib.api_schema.response.compute.v2_19 import servers as schemav219
@@ -717,3 +719,16 @@
         http://developer.openstack.org/api-ref-compute-v2.1.html#removeFixedIp
         """
         return self.action(server_id, 'removeFixedIp', **kwargs)
+
+    def list_security_groups_by_server(self, server_id):
+        """Lists security groups for a server.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        http://developer.openstack.org/api-ref-compute-v2.1.html#listSecurityGroupsByServer
+        """
+        resp, body = self.get("servers/%s/os-security-groups" % server_id)
+        body = json.loads(body)
+        self.validate_response(security_groups_schema.list_security_groups,
+                               resp, body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 6014c8c..e670216 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -241,8 +241,8 @@
             self.assertEqual(name, volume['display_name'])
         else:
             self.assertEqual(name, volume['name'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
         # The volume retrieved on creation has a non-up-to-date status.
         # Retrieval after it becomes active ensures correct details.
         volume = self.volumes_client.show_volume(volume['id'])['volume']
@@ -481,8 +481,9 @@
                 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                                 self.snapshots_client.delete_snapshot,
                                 snapshot_id)
-                waiters.wait_for_snapshot_status(self.snapshots_client,
-                                                 snapshot_id, 'available')
+                waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                        snapshot_id,
+                                                        'available')
         image_name = snapshot_image['name']
         self.assertEqual(name, image_name)
         LOG.debug("Created snapshot image %s for server %s",
@@ -494,16 +495,16 @@
             server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
             % CONF.compute.volume_device_name)['volumeAttachment']
         self.assertEqual(volume_to_attach['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'in-use')
 
         # Return the updated volume after the attachment
         return self.volumes_client.show_volume(volume['id'])['volume']
 
     def nova_volume_detach(self, server, volume):
         self.servers_client.detach_volume(server['id'], volume['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
 
         volume = self.volumes_client.show_volume(volume['id'])['volume']
         self.assertEqual('available', volume['status'])
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 8661217..ef9664d 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -64,10 +64,10 @@
         self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                         snapshot['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
-        waiters.wait_for_volume_status(self.volumes_client,
-                                       volume['id'], 'available')
-        waiters.wait_for_snapshot_status(self.snapshots_client,
-                                         snapshot['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                snapshot['id'], 'available')
         if 'display_name' in snapshot:
             self.assertEqual(snapshot_name, snapshot['display_name'])
         else:
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 43dcf96..9c33b71 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -82,8 +82,8 @@
         self.addCleanup(
             self.snapshots_client.wait_for_resource_deletion, snap['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snap['id'])
-        waiters.wait_for_snapshot_status(self.snapshots_client,
-                                         snap['id'], 'available')
+        waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                snap['id'], 'available')
 
         # NOTE(e0ne): Cinder API v2 uses name instead of display_name
         if 'display_name' in snap:
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index a1da343..6345728 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -48,10 +48,14 @@
 
     @mock.patch.multiple(compute_base.BaseV2ComputeTest,
                          compute_images_client=mock.DEFAULT,
+                         servers_client=mock.DEFAULT,
                          images=[], create=True)
     @mock.patch.object(waiters, 'wait_for_image_status')
+    @mock.patch.object(waiters, 'wait_for_server_status')
     def test_create_image_from_server_wait_until_active(self,
+                                                        wait_for_server_status,
                                                         wait_for_image_status,
+                                                        servers_client,
                                                         compute_images_client):
         """Tests create_image_from_server with wait_until='ACTIVE' kwarg."""
         # setup mocks
@@ -67,6 +71,35 @@
         # make our assertions
         wait_for_image_status.assert_called_once_with(
             compute_images_client, image_id, 'ACTIVE')
+        wait_for_server_status.assert_called_once_with(
+            servers_client, mock.sentinel.server_id, 'ACTIVE')
+        compute_images_client.show_image.assert_called_once_with(image_id)
+
+    @mock.patch.multiple(compute_base.BaseV2ComputeTest,
+                         compute_images_client=mock.DEFAULT,
+                         servers_client=mock.DEFAULT,
+                         images=[], create=True)
+    @mock.patch.object(waiters, 'wait_for_image_status')
+    @mock.patch.object(waiters, 'wait_for_server_status')
+    def test_create_image_from_server_wait_until_active_no_server_wait(
+            self, wait_for_server_status, wait_for_image_status,
+            servers_client, compute_images_client):
+        """Tests create_image_from_server with wait_until='ACTIVE' kwarg."""
+        # setup mocks
+        image_id = uuidutils.generate_uuid()
+        fake_image = mock.Mock(response={'location': image_id})
+        compute_images_client.create_image.return_value = fake_image
+        compute_images_client.show_image.return_value = (
+            {'image': fake_image})
+        # call the utility method
+        image = compute_base.BaseV2ComputeTest.create_image_from_server(
+            mock.sentinel.server_id, wait_until='ACTIVE',
+            wait_for_server=False)
+        self.assertEqual(fake_image, image)
+        # make our assertions
+        wait_for_image_status.assert_called_once_with(
+            compute_images_client, image_id, 'ACTIVE')
+        self.assertEqual(0, wait_for_server_status.call_count)
         compute_images_client.show_image.assert_called_once_with(image_id)
 
     @mock.patch.multiple(compute_base.BaseV2ComputeTest,
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 46f9526..c2f622c 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -66,7 +66,7 @@
         client.show_volume = mock_show
         volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
         self.assertRaises(exceptions.VolumeRestoreErrorException,
-                          waiters.wait_for_volume_status,
+                          waiters.wait_for_volume_resource_status,
                           client, volume_id, 'available')
         mock_show.assert_has_calls([mock.call(volume_id),
                                     mock.call(volume_id)])
diff --git a/tempest/tests/lib/services/compute/test_servers_client.py b/tempest/tests/lib/services/compute/test_servers_client.py
index adfaaf2..b563ab2 100644
--- a/tempest/tests/lib/services/compute/test_servers_client.py
+++ b/tempest/tests/lib/services/compute/test_servers_client.py
@@ -172,6 +172,14 @@
         "traceback": "fake-trace-back"
     }
 
+    FAKE_SECURITY_GROUPS = [{
+        "description": "default",
+        "id": "3fb26eb3-581b-4420-9963-b0879a026506",
+        "name": "default",
+        "rules": [],
+        "tenant_id": "openstack"
+    }]
+
     FAKE_INSTANCE_WITH_EVENTS = copy.deepcopy(FAKE_INSTANCE_ACTIONS)
     FAKE_INSTANCE_WITH_EVENTS['events'] = [FAKE_INSTANCE_ACTION_EVENTS]
 
@@ -1009,3 +1017,17 @@
             server_id=self.server_id,
             type='fake-console-type'
             )
+
+    def test_list_security_groups_by_server_with_str_body(self):
+        self._test_list_security_groups_by_server()
+
+    def test_list_security_groups_by_server_with_bytes_body(self):
+        self._test_list_security_groups_by_server(True)
+
+    def _test_list_security_groups_by_server(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_security_groups_by_server,
+            'tempest.lib.common.rest_client.RestClient.get',
+            {'security_groups': self.FAKE_SECURITY_GROUPS},
+            server_id=self.server_id,
+            )