Merge "Properly preserve trace on error during wait"
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 65085b9..2ec1017 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -24,8 +24,8 @@
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseDataProcessingTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseDataProcessingTest, cls).resource_setup()
         if not CONF.service_available.sahara:
             raise cls.skipException('Sahara support is required')
 
@@ -43,7 +43,7 @@
         cls._jobs = []
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
                               cls.client.delete_cluster_template)
         cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
@@ -56,7 +56,7 @@
         cls.cleanup_resources(getattr(cls, '_data_sources', []),
                               cls.client.delete_data_source)
         cls.clear_isolated_creds()
-        super(BaseDataProcessingTest, cls).tearDownClass()
+        super(BaseDataProcessingTest, cls).resource_cleanup()
 
     @staticmethod
     def cleanup_resources(resource_id_list, method):
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index ff67c1c..537f90c 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -22,9 +22,8 @@
     sahara/restapi/rest_api_v1.0.html#cluster-templates
     """
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ClusterTemplateTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ClusterTemplateTest, cls).resource_setup()
         # create node group template
         node_group_template = {
             'name': data_utils.rand_name('sahara-ng-template'),
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
index aae56c4..3650751 100644
--- a/tempest/api/data_processing/test_data_sources.py
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -19,8 +19,8 @@
 
 class DataSourceTest(dp_base.BaseDataProcessingTest):
     @classmethod
-    def setUpClass(cls):
-        super(DataSourceTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(DataSourceTest, cls).resource_setup()
         cls.swift_data_source_with_creds = {
             'url': 'swift://sahara-container.sahara/input-source',
             'description': 'Test data source',
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
index 15ee145..d006991 100644
--- a/tempest/api/data_processing/test_job_binaries.py
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -22,9 +22,8 @@
     sahara/restapi/rest_api_v1.1_EDP.html#job-binaries
     """
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(JobBinaryTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(JobBinaryTest, cls).resource_setup()
         cls.swift_job_binary_with_extra = {
             'url': 'swift://sahara-container.sahara/example.jar',
             'description': 'Test job binary',
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
index 45e1140..7e99867 100644
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -22,8 +22,8 @@
     sahara/restapi/rest_api_v1.1_EDP.html#job-binary-internals
     """
     @classmethod
-    def setUpClass(cls):
-        super(JobBinaryInternalTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(JobBinaryInternalTest, cls).resource_setup()
         cls.job_binary_internal_data = 'Some script may be data'
 
     def _create_job_binary_internal(self, binary_name=None):
diff --git a/tempest/api/data_processing/test_jobs.py b/tempest/api/data_processing/test_jobs.py
index 8591dbd..5af2eef 100644
--- a/tempest/api/data_processing/test_jobs.py
+++ b/tempest/api/data_processing/test_jobs.py
@@ -22,9 +22,8 @@
     sahara/restapi/rest_api_v1.1_EDP.html#jobs
     """
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(JobTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(JobTest, cls).resource_setup()
         # create job binary
         job_binary = {
             'name': data_utils.rand_name('sahara-job-binary'),
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index c2c0075..f3f59fc 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -19,8 +19,8 @@
 
 class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
     @classmethod
-    def setUpClass(cls):
-        super(NodeGroupTemplateTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(NodeGroupTemplateTest, cls).resource_setup()
         cls.node_group_template = {
             'description': 'Test node group template',
             'plugin_name': 'vanilla',
diff --git a/tempest/api/database/base.py b/tempest/api/database/base.py
index 6e0f431..c9f16ca 100644
--- a/tempest/api/database/base.py
+++ b/tempest/api/database/base.py
@@ -27,8 +27,8 @@
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseDatabaseTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseDatabaseTest, cls).resource_setup()
         if not CONF.service_available.trove:
             skip_msg = ("%s skipped as trove is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
diff --git a/tempest/api/database/flavors/test_flavors.py b/tempest/api/database/flavors/test_flavors.py
index a5c8caa..aed1abe 100644
--- a/tempest/api/database/flavors/test_flavors.py
+++ b/tempest/api/database/flavors/test_flavors.py
@@ -20,8 +20,8 @@
 class DatabaseFlavorsTest(base.BaseDatabaseTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(DatabaseFlavorsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(DatabaseFlavorsTest, cls).resource_setup()
         cls.client = cls.database_flavors_client
 
     @test.attr(type='smoke')
diff --git a/tempest/api/database/flavors/test_flavors_negative.py b/tempest/api/database/flavors/test_flavors_negative.py
index 202dc48..9f14cce 100644
--- a/tempest/api/database/flavors/test_flavors_negative.py
+++ b/tempest/api/database/flavors/test_flavors_negative.py
@@ -21,8 +21,8 @@
 class DatabaseFlavorsNegativeTest(base.BaseDatabaseTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(DatabaseFlavorsNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(DatabaseFlavorsNegativeTest, cls).resource_setup()
         cls.client = cls.database_flavors_client
 
     @test.attr(type=['negative', 'gate'])
diff --git a/tempest/api/database/versions/test_versions.py b/tempest/api/database/versions/test_versions.py
index 453abe7..80fcecf 100644
--- a/tempest/api/database/versions/test_versions.py
+++ b/tempest/api/database/versions/test_versions.py
@@ -21,8 +21,8 @@
     _interface = 'json'
 
     @classmethod
-    def setUpClass(cls):
-        super(DatabaseVersionsTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(DatabaseVersionsTest, cls).resource_setup()
         cls.client = cls.database_versions_client
 
     @test.attr(type='smoke')
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index c875b2f..08767e3 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -31,9 +31,9 @@
     """Base test class for Image API tests."""
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         cls.set_network_resources()
-        super(BaseImageTest, cls).setUpClass()
+        super(BaseImageTest, cls).resource_setup()
         cls.created_images = []
         cls._interface = 'json'
         cls.isolated_creds = isolated_creds.IsolatedCreds(
@@ -47,7 +47,7 @@
             cls.os = clients.Manager()
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for image_id in cls.created_images:
             try:
                 cls.client.delete_image(image_id)
@@ -57,7 +57,7 @@
         for image_id in cls.created_images:
                 cls.client.wait_for_resource_deletion(image_id)
         cls.isolated_creds.clear_isolated_creds()
-        super(BaseImageTest, cls).tearDownClass()
+        super(BaseImageTest, cls).resource_cleanup()
 
     @classmethod
     def create_image(cls, **kwargs):
@@ -79,8 +79,8 @@
 class BaseV1ImageTest(BaseImageTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseV1ImageTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseV1ImageTest, cls).resource_setup()
         cls.client = cls.os.image_client
         if not CONF.image_feature_enabled.api_v1:
             msg = "Glance API v1 not supported"
@@ -89,8 +89,8 @@
 
 class BaseV1ImageMembersTest(BaseV1ImageTest):
     @classmethod
-    def setUpClass(cls):
-        super(BaseV1ImageMembersTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseV1ImageMembersTest, cls).resource_setup()
         if CONF.compute.allow_tenant_isolation:
             cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
         else:
@@ -113,8 +113,8 @@
 class BaseV2ImageTest(BaseImageTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseV2ImageTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseV2ImageTest, cls).resource_setup()
         cls.client = cls.os.image_client_v2
         if not CONF.image_feature_enabled.api_v2:
             msg = "Glance API v2 not supported"
@@ -124,8 +124,8 @@
 class BaseV2MemberImageTest(BaseV2ImageTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(BaseV2MemberImageTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(BaseV2MemberImageTest, cls).resource_setup()
         if CONF.compute.allow_tenant_isolation:
             creds = cls.isolated_creds.get_alt_creds()
             cls.os_alt = clients.Manager(creds)
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index bf55b89..38a623a 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -106,9 +106,8 @@
     """
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ListImagesTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ListImagesTest, cls).resource_setup()
         # We add a few images here to test the listing functionality of
         # the images API
         img1 = cls._create_remote_image('one', 'bare', 'raw')
@@ -235,8 +234,7 @@
 
 class ListSnapshotImagesTest(base.BaseV1ImageTest):
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
+    def resource_setup(cls):
         # This test class only uses nova v3 api to create snapshot
         # as the similar test which uses nova v2 api already exists
         # in nova v2 compute images api tests.
@@ -246,7 +244,7 @@
             skip_msg = ("%s skipped as nova v3 api is not available" %
                         cls.__name__)
             raise cls.skipException(skip_msg)
-        super(ListSnapshotImagesTest, cls).setUpClass()
+        super(ListSnapshotImagesTest, cls).resource_setup()
         cls.servers_client = cls.os.servers_v3_client
         cls.servers = []
         # We add a few images here to test the listing functionality of
@@ -265,10 +263,10 @@
         cls.client.wait_for_image_status(image['id'], 'active')
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for server in getattr(cls, "servers", []):
             cls.servers_client.delete_server(server['id'])
-        super(ListSnapshotImagesTest, cls).tearDownClass()
+        super(ListSnapshotImagesTest, cls).resource_cleanup()
 
     @classmethod
     def _create_snapshot(cls, name, image_id, flavor, **kwargs):
@@ -329,8 +327,8 @@
 
 class UpdateImageMetaTest(base.BaseV1ImageTest):
     @classmethod
-    def setUpClass(cls):
-        super(UpdateImageMetaTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(UpdateImageMetaTest, cls).resource_setup()
         cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
 
     @classmethod
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index a974ebb..7e018e5 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -125,9 +125,8 @@
     """
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ListImagesTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ListImagesTest, cls).resource_setup()
         # We add a few images here to test the listing functionality of
         # the images API
         cls._create_standard_image('bare', 'raw')
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index a143659..6a5fd3d 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -28,9 +28,9 @@
 class BaseObjectTest(tempest.test.BaseTestCase):
 
     @classmethod
-    def setUpClass(cls):
+    def resource_setup(cls):
         cls.set_network_resources()
-        super(BaseObjectTest, cls).setUpClass()
+        super(BaseObjectTest, cls).resource_setup()
         if not CONF.service_available.swift:
             skip_msg = ("%s skipped as swift is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
@@ -72,10 +72,10 @@
         cls.data = SwiftDataGenerator(cls.identity_admin_client)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.data.teardown_all()
         cls.isolated_creds.clear_isolated_creds()
-        super(BaseObjectTest, cls).tearDownClass()
+        super(BaseObjectTest, cls).resource_cleanup()
 
     @classmethod
     def delete_containers(cls, containers, container_client=None,
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index c1eb897..97e9195 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -26,9 +26,8 @@
 class AccountQuotasTest(base.BaseObjectTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(AccountQuotasTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(AccountQuotasTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name="TestContainer")
         cls.container_client.create_container(cls.container_name)
 
@@ -71,10 +70,10 @@
         super(AccountQuotasTest, self).tearDown()
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        super(AccountQuotasTest, cls).tearDownClass()
+        super(AccountQuotasTest, cls).resource_cleanup()
 
     @test.attr(type="smoke")
     @test.requires_ext(extension='account_quotas', service='object')
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 7324c2e..6c1fb5a 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -27,9 +27,8 @@
 class AccountQuotasNegativeTest(base.BaseObjectTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(AccountQuotasNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(AccountQuotasNegativeTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name="TestContainer")
         cls.container_client.create_container(cls.container_name)
 
@@ -71,10 +70,10 @@
         super(AccountQuotasNegativeTest, self).tearDown()
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        super(AccountQuotasNegativeTest, cls).tearDownClass()
+        super(AccountQuotasNegativeTest, cls).resource_cleanup()
 
     @test.attr(type=["negative", "smoke"])
     @test.requires_ext(extension='account_quotas', service='object')
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 69cba1e..a0436ee 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -32,9 +32,8 @@
     containers = []
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(AccountTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(AccountTest, cls).resource_setup()
         for i in moves.xrange(ord('a'), ord('f') + 1):
             name = data_utils.rand_name(name='%s-' % chr(i))
             cls.container_client.create_container(name)
@@ -42,9 +41,9 @@
         cls.containers_count = len(cls.containers)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_containers(cls.containers)
-        super(AccountTest, cls).tearDownClass()
+        super(AccountTest, cls).resource_cleanup()
 
     @test.attr(type='smoke')
     def test_list_containers(self):
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index a7d45be..e816a9f 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -21,8 +21,8 @@
 
 class ObjectTestACLs(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ObjectTestACLs, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectTestACLs, cls).resource_setup()
         cls.data.setup_test_user()
         test_os = clients.Manager(cls.data.test_credentials)
         cls.test_auth_data = test_os.auth_provider.auth_data
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 1a21ecc..9b49db3 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -23,8 +23,8 @@
 
 class ObjectACLsNegativeTest(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ObjectACLsNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectACLsNegativeTest, cls).resource_setup()
         cls.data.setup_test_user()
         test_os = clients.Manager(cls.data.test_credentials)
         cls.test_auth_data = test_os.auth_provider.auth_data
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 28bde24..966a08d 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -23,9 +23,8 @@
 class StaticWebTest(base.BaseObjectTest):
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(StaticWebTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(StaticWebTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name="TestContainer")
 
         # This header should be posted on the container before every test
@@ -45,10 +44,10 @@
             metadata_prefix="X-Container-")
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         if hasattr(cls, "container_name"):
             cls.delete_containers([cls.container_name])
-        super(StaticWebTest, cls).tearDownClass()
+        super(StaticWebTest, cls).resource_cleanup()
 
     @test.requires_ext(extension='staticweb', service='object')
     @test.attr('gate')
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 3e6d58c..aebcb5c 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -35,9 +35,8 @@
     clients = {}
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ContainerSyncTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ContainerSyncTest, cls).resource_setup()
         cls.containers = []
         cls.objects = []
 
@@ -62,10 +61,10 @@
             cls.containers.append(cont_name)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for client in cls.clients.values():
             cls.delete_containers(cls.containers, client[0], client[1])
-        super(ContainerSyncTest, cls).tearDownClass()
+        super(ContainerSyncTest, cls).resource_cleanup()
 
     @test.attr(type='slow')
     @test.skip_because(bug='1317133')
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index ad7e068..f6d1fb9 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -22,8 +22,8 @@
 class CrossdomainTest(base.BaseObjectTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(CrossdomainTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(CrossdomainTest, cls).resource_setup()
 
         cls.xml_start = '<?xml version="1.0"?>\n' \
                         '<!DOCTYPE cross-domain-policy SYSTEM ' \
diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
index e27c7ef..a1138e6 100644
--- a/tempest/api/object_storage/test_healthcheck.py
+++ b/tempest/api/object_storage/test_healthcheck.py
@@ -23,8 +23,8 @@
 class HealthcheckTest(base.BaseObjectTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(HealthcheckTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(HealthcheckTest, cls).resource_setup()
 
     def setUp(self):
         super(HealthcheckTest, self).setUp()
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index 73b4f3b..8cec2fc 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -23,8 +23,8 @@
 
 class ObjectExpiryTest(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ObjectExpiryTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectExpiryTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.container_client.create_container(cls.container_name)
 
@@ -36,9 +36,9 @@
                                                    self.object_name, '')
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_containers([cls.container_name])
-        super(ObjectExpiryTest, cls).tearDownClass()
+        super(ObjectExpiryTest, cls).resource_cleanup()
 
     def _test_object_expiry(self, metadata):
         # update object metadata
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index a0fb708..05c8ff2 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -30,9 +30,8 @@
     containers = []
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ObjectFormPostTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectFormPostTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.object_name = data_utils.rand_name(name='ObjectTemp')
 
@@ -56,10 +55,10 @@
             self.key)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
         cls.delete_containers(cls.containers)
-        super(ObjectFormPostTest, cls).tearDownClass()
+        super(ObjectFormPostTest, cls).resource_cleanup()
 
     def get_multipart_form(self, expires=600):
         path = "%s/%s/%s" % (
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index 103bc8e..32f5917 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -30,9 +30,8 @@
     containers = []
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ObjectFormPostNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectFormPostNegativeTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.object_name = data_utils.rand_name(name='ObjectTemp')
 
@@ -56,10 +55,10 @@
             self.key)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
         cls.delete_containers(cls.containers)
-        super(ObjectFormPostNegativeTest, cls).tearDownClass()
+        super(ObjectFormPostNegativeTest, cls).resource_cleanup()
 
     def get_multipart_form(self, expires=600):
         path = "%s/%s/%s" % (
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 8b74b7e..56ab1fb 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -30,16 +30,16 @@
 
 class ObjectTest(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ObjectTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectTest, cls).resource_setup()
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.container_client.create_container(cls.container_name)
         cls.containers = [cls.container_name]
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_containers(cls.containers)
-        super(ObjectTest, cls).tearDownClass()
+        super(ObjectTest, cls).resource_cleanup()
 
     def _create_object(self, metadata=None):
         # setup object
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index f5ebce7..e70bd9a 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -30,8 +30,8 @@
 class ObjectTempUrlTest(base.BaseObjectTest):
 
     @classmethod
-    def setUpClass(cls):
-        super(ObjectTempUrlTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectTempUrlTest, cls).resource_setup()
         # create a container
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.container_client.create_container(cls.container_name)
@@ -52,14 +52,14 @@
                                         cls.object_name, cls.content)
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         for metadata in cls.metadatas:
             cls.account_client.delete_account_metadata(
                 metadata=metadata)
 
         cls.delete_containers(cls.containers)
 
-        super(ObjectTempUrlTest, cls).tearDownClass()
+        super(ObjectTempUrlTest, cls).resource_cleanup()
 
     def setUp(self):
         super(ObjectTempUrlTest, self).setUp()
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 28173fe..b752348 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -31,9 +31,8 @@
     containers = []
 
     @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ObjectTempUrlNegativeTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ObjectTempUrlNegativeTest, cls).resource_setup()
 
         cls.container_name = data_utils.rand_name(name='TestContainer')
         cls.container_client.create_container(cls.container_name)
@@ -47,13 +46,13 @@
             cls.account_client.list_account_metadata()
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         resp, _ = cls.account_client.delete_account_metadata(
             metadata=cls.metadata)
 
         cls.delete_containers(cls.containers)
 
-        super(ObjectTempUrlNegativeTest, cls).tearDownClass()
+        super(ObjectTempUrlNegativeTest, cls).resource_cleanup()
 
     def setUp(self):
         super(ObjectTempUrlNegativeTest, self).setUp()
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index 971449d..5fe4fc8 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -25,14 +25,14 @@
 
 class ContainerTest(base.BaseObjectTest):
     @classmethod
-    def setUpClass(cls):
-        super(ContainerTest, cls).setUpClass()
+    def resource_setup(cls):
+        super(ContainerTest, cls).resource_setup()
         cls.containers = []
 
     @classmethod
-    def tearDownClass(cls):
+    def resource_cleanup(cls):
         cls.delete_containers(cls.containers)
-        super(ContainerTest, cls).tearDownClass()
+        super(ContainerTest, cls).resource_cleanup()
 
     def assertContainer(self, container, count, byte, versioned):
         resp, _ = self.container_client.list_container_metadata(container)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index aa2d686..2d7bc24 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -73,6 +73,7 @@
         )
         cls.admin_manager = clients.Manager(cls.admin_credentials())
         # Clients (in alphabetical order)
+        cls.flavors_client = cls.manager.flavors_client
         cls.floating_ips_client = cls.manager.floating_ips_client
         # Glance image client v1
         cls.image_client = cls.manager.image_client
@@ -146,7 +147,7 @@
     def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
                              cleanup_callable, cleanup_args=None,
                              cleanup_kwargs=None, ignore_error=True):
-        """Adds wait for ansyc resource deletion at the end of cleanups
+        """Adds wait for async resource deletion at the end of cleanups
 
         @param waiter_callable: callable to wait for the resource to delete
         @param thing_id: the id of the resource to be cleaned-up
@@ -321,8 +322,9 @@
         if isinstance(server_or_ip, six.string_types):
             ip = server_or_ip
         else:
-            network_name_for_ssh = CONF.compute.network_for_ssh
-            ip = server_or_ip.networks[network_name_for_ssh][0]
+            addr = server_or_ip['addresses'][CONF.compute.network_for_ssh][0]
+            ip = addr['addr']
+
         if username is None:
             username = CONF.scenario.ssh_user
         if private_key is None:
@@ -439,6 +441,35 @@
         _, volume = self.volumes_client.get_volume(self.volume['id'])
         self.assertEqual('available', volume['status'])
 
+    def rebuild_server(self, server_id, image=None,
+                       preserve_ephemeral=False, wait=True,
+                       rebuild_kwargs=None):
+        if image is None:
+            image = CONF.compute.image_ref
+
+        rebuild_kwargs = rebuild_kwargs or {}
+
+        LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
+                  server_id, image, preserve_ephemeral)
+        self.servers_client.rebuild(server_id=server_id, image_ref=image,
+                                    preserve_ephemeral=preserve_ephemeral,
+                                    **rebuild_kwargs)
+        if wait:
+            self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+    def ping_ip_address(self, ip_address, should_succeed=True):
+        cmd = ['ping', '-c1', '-w1', ip_address]
+
+        def ping():
+            proc = subprocess.Popen(cmd,
+                                    stdout=subprocess.PIPE,
+                                    stderr=subprocess.PIPE)
+            proc.communicate()
+            return (proc.returncode == 0) == should_succeed
+
+        return tempest.test.call_until_true(
+            ping, CONF.compute.ping_timeout, 1)
+
 
 # TODO(yfried): change this class name to NetworkScenarioTest once client
 # migration is complete
@@ -617,19 +648,6 @@
         self.assertIsNone(floating_ip.port_id)
         return floating_ip
 
-    def _ping_ip_address(self, ip_address, should_succeed=True):
-        cmd = ['ping', '-c1', '-w1', ip_address]
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.wait()
-            return (proc.returncode == 0) == should_succeed
-
-        return tempest.test.call_until_true(
-            ping, CONF.compute.ping_timeout, 1)
-
     def _check_vm_connectivity(self, ip_address,
                                username=None,
                                private_key=None,
@@ -649,8 +667,8 @@
             msg = "Timed out waiting for %s to become reachable" % ip_address
         else:
             msg = "ip address %s is reachable" % ip_address
-        self.assertTrue(self._ping_ip_address(ip_address,
-                                              should_succeed=should_connect),
+        self.assertTrue(self.ping_ip_address(ip_address,
+                                             should_succeed=should_connect),
                         msg=msg)
         if should_connect:
             # no need to check ssh for negative connectivity
@@ -878,6 +896,41 @@
 
         return rules
 
+    def _create_pool(self, lb_method, protocol, subnet_id):
+        """Wrapper utility that returns a test pool."""
+        client = self.network_client
+        name = data_utils.rand_name('pool')
+        _, resp_pool = client.create_pool(protocol=protocol, name=name,
+                                          subnet_id=subnet_id,
+                                          lb_method=lb_method)
+        pool = net_resources.DeletablePool(client=client, **resp_pool['pool'])
+        self.assertEqual(pool['name'], name)
+        self.addCleanup(self.delete_wrapper, pool.delete)
+        return pool
+
+    def _create_member(self, address, protocol_port, pool_id):
+        """Wrapper utility that returns a test member."""
+        client = self.network_client
+        _, resp_member = client.create_member(protocol_port=protocol_port,
+                                              pool_id=pool_id,
+                                              address=address)
+        member = net_resources.DeletableMember(client=client,
+                                               **resp_member['member'])
+        self.addCleanup(self.delete_wrapper, member.delete)
+        return member
+
+    def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
+        """Wrapper utility that returns a test vip."""
+        client = self.network_client
+        name = data_utils.rand_name('vip')
+        _, resp_vip = client.create_vip(protocol=protocol, name=name,
+                                        subnet_id=subnet_id, pool_id=pool_id,
+                                        protocol_port=protocol_port)
+        vip = net_resources.DeletableVip(client=client, **resp_vip['vip'])
+        self.assertEqual(vip['name'], name)
+        self.addCleanup(self.delete_wrapper, vip.delete)
+        return vip
+
     def _ssh_to_server(self, server, private_key):
         ssh_login = CONF.compute.image_ssh_user
         return self.get_remote_client(server,
@@ -1481,7 +1534,7 @@
     ERROR = 'error'
 
 
-class BaremetalScenarioTest(OfficialClientTest):
+class BaremetalScenarioTest(ScenarioTest):
     @classmethod
     def setUpClass(cls):
         super(BaremetalScenarioTest, cls).setUpClass()
@@ -1492,12 +1545,13 @@
             raise cls.skipException(msg)
 
         # use an admin client manager for baremetal client
-        admin_creds = cls.admin_credentials()
-        manager = clients.OfficialClientManager(credentials=admin_creds)
+        manager = clients.Manager(
+            credentials=cls.admin_credentials()
+        )
         cls.baremetal_client = manager.baremetal_client
 
         # allow any issues obtaining the node list to raise early
-        cls.baremetal_client.node.list()
+        cls.baremetal_client.list_nodes()
 
     def _node_state_timeout(self, node_id, state_attr,
                             target_states, timeout=10, interval=1):
@@ -1506,7 +1560,7 @@
 
         def check_state():
             node = self.get_node(node_id=node_id)
-            if getattr(node, state_attr) in target_states:
+            if node.get(state_attr) in target_states:
                 return True
             return False
 
@@ -1546,14 +1600,20 @@
 
     def get_node(self, node_id=None, instance_id=None):
         if node_id:
-            return self.baremetal_client.node.get(node_id)
+            _, body = self.baremetal_client.show_node(node_id)
+            return body
         elif instance_id:
-            return self.baremetal_client.node.get_by_instance_uuid(instance_id)
+            _, body = self.baremetal_client.show_node_by_instance_uuid(
+                instance_id)
+            if body['nodes']:
+                return body['nodes'][0]
 
-    def get_ports(self, node_id):
+    def get_ports(self, node_uuid):
         ports = []
-        for port in self.baremetal_client.node.list_ports(node_id):
-            ports.append(self.baremetal_client.port.get(port.uuid))
+        _, body = self.baremetal_client.list_node_ports(node_uuid)
+        for port in body['ports']:
+            _, p = self.baremetal_client.show_port(port['uuid'])
+            ports.append(p)
         return ports
 
     def add_keypair(self):
@@ -1568,42 +1628,37 @@
 
     def boot_instance(self):
         create_kwargs = {
-            'key_name': self.keypair.id
+            'key_name': self.keypair['name']
         }
         self.instance = self.create_server(
             wait_on_boot=False, create_kwargs=create_kwargs)
 
-        self.addCleanup_with_wait(self.compute_client.servers,
-                                  self.instance.id,
-                                  cleanup_callable=self.delete_wrapper,
-                                  cleanup_args=[self.instance])
+        self.wait_node(self.instance['id'])
+        self.node = self.get_node(instance_id=self.instance['id'])
 
-        self.wait_node(self.instance.id)
-        self.node = self.get_node(instance_id=self.instance.id)
-
-        self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_ON)
+        self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
 
         self.wait_provisioning_state(
-            self.node.uuid,
+            self.node['uuid'],
             [BaremetalProvisionStates.DEPLOYWAIT,
              BaremetalProvisionStates.ACTIVE],
             timeout=15)
 
-        self.wait_provisioning_state(self.node.uuid,
+        self.wait_provisioning_state(self.node['uuid'],
                                      BaremetalProvisionStates.ACTIVE,
                                      timeout=CONF.baremetal.active_timeout)
 
-        self.status_timeout(
-            self.compute_client.servers, self.instance.id, 'ACTIVE')
-
-        self.node = self.get_node(instance_id=self.instance.id)
-        self.instance = self.compute_client.servers.get(self.instance.id)
+        self.servers_client.wait_for_server_status(self.instance['id'],
+                                                   'ACTIVE')
+        self.node = self.get_node(instance_id=self.instance['id'])
+        _, self.instance = self.servers_client.get_server(self.instance['id'])
 
     def terminate_instance(self):
-        self.instance.delete()
-        self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_OFF)
+        self.servers_client.delete_server(self.instance['id'])
+        self.wait_power_state(self.node['uuid'],
+                              BaremetalPowerStates.POWER_OFF)
         self.wait_provisioning_state(
-            self.node.uuid,
+            self.node['uuid'],
             BaremetalProvisionStates.NOSTATE,
             timeout=CONF.baremetal.unprovision_timeout)
 
@@ -1825,19 +1880,6 @@
         self.assertIsNone(floating_ip.port_id)
         return floating_ip
 
-    def _ping_ip_address(self, ip_address, should_succeed=True):
-        cmd = ['ping', '-c1', '-w1', ip_address]
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.wait()
-            return (proc.returncode == 0) == should_succeed
-
-        return tempest.test.call_until_true(
-            ping, CONF.compute.ping_timeout, 1)
-
     def _create_pool(self, lb_method, protocol, subnet_id):
         """Wrapper utility that returns a test pool."""
         name = data_utils.rand_name('pool-')
@@ -1909,8 +1951,8 @@
             msg = "Timed out waiting for %s to become reachable" % ip_address
         else:
             msg = "ip address %s is reachable" % ip_address
-        self.assertTrue(self._ping_ip_address(ip_address,
-                                              should_succeed=should_connect),
+        self.assertTrue(self.ping_ip_address(ip_address,
+                                             should_succeed=should_connect),
                         msg=msg)
         if should_connect:
             # no need to check ssh for negative connectivity
@@ -2260,19 +2302,6 @@
         return next((o['output_value'] for o in stack['outputs']
                     if o['output_key'] == output_key), None)
 
-    def _ping_ip_address(self, ip_address, should_succeed=True):
-        cmd = ['ping', '-c1', '-w1', ip_address]
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.wait()
-            return (proc.returncode == 0) == should_succeed
-
-        return tempest.test.call_until_true(
-            ping, CONF.orchestration.build_timeout, 1)
-
 
 class SwiftScenarioTest(ScenarioTest):
     """
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
deleted file mode 100644
index 8894106..0000000
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-
-import heatclient.exc as heat_exceptions
-
-from tempest import config
-from tempest.scenario import manager
-from tempest import test
-
-CONF = config.CONF
-
-
-class AutoScalingTest(manager.OrchestrationScenarioTest):
-
-    def setUp(self):
-        super(AutoScalingTest, self).setUp()
-        if not CONF.orchestration.image_ref:
-            raise self.skipException("No image available to test")
-        self.client = self.orchestration_client
-
-    def assign_keypair(self):
-        self.stack_name = self._stack_rand_name()
-        if CONF.orchestration.keypair_name:
-            self.keypair_name = CONF.orchestration.keypair_name
-        else:
-            self.keypair = self.create_keypair()
-            self.keypair_name = self.keypair.id
-
-    def launch_stack(self):
-        net = self._get_default_network()
-        self.parameters = {
-            'KeyName': self.keypair_name,
-            'InstanceType': CONF.orchestration.instance_type,
-            'ImageId': CONF.orchestration.image_ref,
-            'StackStart': str(time.time()),
-            'Subnet': net['subnets'][0]
-        }
-
-        # create the stack
-        self.template = self._load_template(__file__, 'test_autoscaling.yaml')
-        self.client.stacks.create(
-            stack_name=self.stack_name,
-            template=self.template,
-            parameters=self.parameters)
-
-        self.stack = self.client.stacks.get(self.stack_name)
-        self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
-
-        # if a keypair was set, do not delete the stack on exit to allow
-        # for manual post-mortums
-        if not CONF.orchestration.keypair_name:
-            self.addCleanup(self.client.stacks.delete, self.stack)
-
-    @test.skip_because(bug="1257575")
-    @test.attr(type='slow')
-    @test.services('orchestration', 'compute')
-    def test_scale_up_then_down(self):
-
-        self.assign_keypair()
-        self.launch_stack()
-
-        sid = self.stack_identifier
-        timeout = CONF.orchestration.build_timeout
-        interval = 10
-
-        self.assertEqual('CREATE', self.stack.action)
-        # wait for create to complete.
-        self.status_timeout(self.client.stacks, sid, 'COMPLETE',
-                            error_status='FAILED')
-
-        self.stack.get()
-        self.assertEqual('CREATE_COMPLETE', self.stack.stack_status)
-
-        # the resource SmokeServerGroup is implemented as a nested
-        # stack, so servers can be counted by counting the resources
-        # inside that nested stack
-        resource = self.client.resources.get(sid, 'SmokeServerGroup')
-        nested_stack_id = resource.physical_resource_id
-
-        def server_count():
-            # the number of servers is the number of resources
-            # in the nested stack
-            self.server_count = len(
-                self.client.resources.list(nested_stack_id))
-            return self.server_count
-
-        def assertScale(from_servers, to_servers):
-            test.call_until_true(lambda: server_count() == to_servers,
-                                 timeout, interval)
-            self.assertEqual(to_servers, self.server_count,
-                             'Failed scaling from %d to %d servers. '
-                             'Current server count: %s' % (
-                                 from_servers, to_servers,
-                                 self.server_count))
-
-        # he marched them up to the top of the hill
-        assertScale(1, 2)
-        assertScale(2, 3)
-
-        # and he marched them down again
-        assertScale(3, 2)
-        assertScale(2, 1)
-
-        # delete stack on completion
-        self.stack.delete()
-        self.status_timeout(self.client.stacks, sid, 'COMPLETE',
-                            error_status='FAILED',
-                            not_found_exception=heat_exceptions.NotFound)
-
-        try:
-            self.stack.get()
-            self.assertEqual('DELETE_COMPLETE', self.stack.stack_status)
-        except heat_exceptions.NotFound:
-            pass
diff --git a/tempest/scenario/orchestration/test_autoscaling.yaml b/tempest/scenario/orchestration/test_autoscaling.yaml
deleted file mode 100644
index 4651284..0000000
--- a/tempest/scenario/orchestration/test_autoscaling.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-HeatTemplateFormatVersion: '2012-12-12'
-Description: |
-  Template which tests autoscaling and load balancing
-Parameters:
-  KeyName:
-    Type: String
-  InstanceType:
-    Type: String
-  ImageId:
-    Type: String
-  Subnet:
-    Type: String
-  StackStart:
-    Description: Epoch seconds when the stack was launched
-    Type: Number
-  ConsumeStartSeconds:
-    Description: Seconds after invocation when memory should be consumed
-    Type: Number
-    Default: '60'
-  ConsumeStopSeconds:
-    Description: Seconds after StackStart when memory should be released
-    Type: Number
-    Default: '420'
-  ScaleUpThreshold:
-    Description: Memory percentage threshold to scale up on
-    Type: String
-    Default: '70'
-  ScaleDownThreshold:
-    Description: Memory percentage threshold to scale down on
-    Type: String
-    Default: '60'
-  ConsumeMemoryLimit:
-    Description: Memory percentage threshold to consume
-    Type: Number
-    Default: '71'
-Resources:
-  SmokeServerGroup:
-    Type: AWS::AutoScaling::AutoScalingGroup
-    Properties:
-      AvailabilityZones: {'Fn::GetAZs': ''}
-      LaunchConfigurationName: {Ref: LaunchConfig}
-      MinSize: '1'
-      MaxSize: '3'
-      VPCZoneIdentifier: [{Ref: Subnet}]
-  SmokeServerScaleUpPolicy:
-    Type: AWS::AutoScaling::ScalingPolicy
-    Properties:
-      AdjustmentType: ChangeInCapacity
-      AutoScalingGroupName: {Ref: SmokeServerGroup}
-      Cooldown: '60'
-      ScalingAdjustment: '1'
-  SmokeServerScaleDownPolicy:
-    Type: AWS::AutoScaling::ScalingPolicy
-    Properties:
-      AdjustmentType: ChangeInCapacity
-      AutoScalingGroupName: {Ref: SmokeServerGroup}
-      Cooldown: '60'
-      ScalingAdjustment: '-1'
-  MEMAlarmHigh:
-    Type: AWS::CloudWatch::Alarm
-    Properties:
-      AlarmDescription: Scale-up if MEM > ScaleUpThreshold% for 10 seconds
-      MetricName: MemoryUtilization
-      Namespace: system/linux
-      Statistic: Average
-      Period: '10'
-      EvaluationPeriods: '1'
-      Threshold: {Ref: ScaleUpThreshold}
-      AlarmActions: [{Ref: SmokeServerScaleUpPolicy}]
-      Dimensions:
-      - Name: AutoScalingGroupName
-        Value: {Ref: SmokeServerGroup}
-      ComparisonOperator: GreaterThanThreshold
-  MEMAlarmLow:
-    Type: AWS::CloudWatch::Alarm
-    Properties:
-      AlarmDescription: Scale-down if MEM < ScaleDownThreshold% for 10 seconds
-      MetricName: MemoryUtilization
-      Namespace: system/linux
-      Statistic: Average
-      Period: '10'
-      EvaluationPeriods: '1'
-      Threshold: {Ref: ScaleDownThreshold}
-      AlarmActions: [{Ref: SmokeServerScaleDownPolicy}]
-      Dimensions:
-      - Name: AutoScalingGroupName
-        Value: {Ref: SmokeServerGroup}
-      ComparisonOperator: LessThanThreshold
-  CfnUser:
-    Type: AWS::IAM::User
-  SmokeKeys:
-    Type: AWS::IAM::AccessKey
-    Properties:
-      UserName: {Ref: CfnUser}
-  SmokeSecurityGroup:
-    Type: AWS::EC2::SecurityGroup
-    Properties:
-      GroupDescription: Standard firewall rules
-      SecurityGroupIngress:
-      - {IpProtocol: tcp, FromPort: '22', ToPort: '22', CidrIp: 0.0.0.0/0}
-      - {IpProtocol: tcp, FromPort: '80', ToPort: '80', CidrIp: 0.0.0.0/0}
-  LaunchConfig:
-    Type: AWS::AutoScaling::LaunchConfiguration
-    Metadata:
-      AWS::CloudFormation::Init:
-        config:
-          files:
-            /etc/cfn/cfn-credentials:
-              content:
-                Fn::Replace:
-                - $AWSAccessKeyId: {Ref: SmokeKeys}
-                  $AWSSecretKey: {'Fn::GetAtt': [SmokeKeys, SecretAccessKey]}
-                - |
-                  AWSAccessKeyId=$AWSAccessKeyId
-                  AWSSecretKey=$AWSSecretKey
-              mode: '000400'
-              owner: root
-              group: root
-            /root/watch_loop:
-              content:
-                Fn::Replace:
-                - _hi_: {Ref: MEMAlarmHigh}
-                  _lo_: {Ref: MEMAlarmLow}
-                - |
-                  #!/bin/bash
-                  while :
-                  do
-                    /opt/aws/bin/cfn-push-stats --watch _hi_ --mem-util
-                    /opt/aws/bin/cfn-push-stats --watch _lo_ --mem-util
-                    sleep 4
-                  done
-              mode: '000700'
-              owner: root
-              group: root
-            /root/consume_memory:
-              content:
-                Fn::Replace:
-                - StackStart: {Ref: StackStart}
-                  ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
-                  ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
-                  ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
-                - |
-                  #!/usr/bin/env python
-                  import psutil
-                  import time
-                  import datetime
-                  import sys
-                  a = []
-                  sleep_until_consume = ConsumeStartSeconds
-                  stack_start = StackStart
-                  consume_stop_time = stack_start + ConsumeStopSeconds
-                  memory_limit = ConsumeMemoryLimit
-                  if sleep_until_consume > 0:
-                      sys.stdout.flush()
-                      time.sleep(sleep_until_consume)
-                  while psutil.virtual_memory().percent < memory_limit:
-                      sys.stdout.flush()
-                      a.append(' ' * 10**5)
-                      time.sleep(0.1)
-                  sleep_until_exit = consume_stop_time - time.time()
-                  if sleep_until_exit > 0:
-                      time.sleep(sleep_until_exit)
-              mode: '000700'
-              owner: root
-              group: root
-    Properties:
-      ImageId: {Ref: ImageId}
-      InstanceType: {Ref: InstanceType}
-      KeyName: {Ref: KeyName}
-      SecurityGroups: [{Ref: SmokeSecurityGroup}]
-      UserData:
-        Fn::Base64:
-          Fn::Replace:
-          - ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
-            ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
-            ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
-          - |
-            #!/bin/bash -v
-            /opt/aws/bin/cfn-init
-            # report on memory consumption every 4 seconds
-            /root/watch_loop &
-            # wait ConsumeStartSeconds then ramp up memory consumption
-            # until it is over ConsumeMemoryLimit%
-            # then exits ConsumeStopSeconds seconds after stack launch
-            /root/consume_memory > /root/consume_memory.log &
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index 4e85429..dd7e7d4 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -83,7 +83,7 @@
         server_ip =\
             server['addresses'][CONF.compute.network_for_ssh][0]['addr']
 
-        if not self._ping_ip_address(server_ip):
+        if not self.ping_ip_address(server_ip):
             self._log_console_output(servers=[server])
             self.fail(
                 "Timed out waiting for %s to become reachable" % server_ip)
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index efbf4ce..35571c6 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -41,26 +41,23 @@
           expected state transitions
     """
     def rebuild_instance(self, preserve_ephemeral=False):
-        self.rebuild_server(self.instance,
+        self.rebuild_server(server_id=self.instance['id'],
                             preserve_ephemeral=preserve_ephemeral,
                             wait=False)
 
-        node = self.get_node(instance_id=self.instance.id)
-        self.instance = self.compute_client.servers.get(self.instance.id)
-
-        self.addCleanup_with_wait(self.compute_client.servers,
-                                  self.instance.id,
-                                  cleanup_callable=self.delete_wrapper,
-                                  cleanup_args=[self.instance])
+        node = self.get_node(instance_id=self.instance['id'])
 
         # We should remain on the same node
-        self.assertEqual(self.node.uuid, node.uuid)
+        self.assertEqual(self.node['uuid'], node['uuid'])
         self.node = node
 
-        self.status_timeout(self.compute_client.servers, self.instance.id,
-                            'REBUILD')
-        self.status_timeout(self.compute_client.servers, self.instance.id,
-                            'ACTIVE')
+        self.servers_client.wait_for_server_status(
+            server_id=self.instance['id'],
+            status='REBUILD',
+            ready_wait=False)
+        self.servers_client.wait_for_server_status(
+            server_id=self.instance['id'],
+            status='ACTIVE')
 
     def create_remote_file(self, client, filename):
         """Create a file on the remote client connection.
@@ -99,23 +96,26 @@
 
     def get_flavor_ephemeral_size(self):
         """Returns size of the ephemeral partition in GiB."""
-        f_id = self.instance.flavor['id']
-        ephemeral = self.compute_client.flavors.get(f_id).ephemeral
-        if ephemeral != 'N/A':
-            return int(ephemeral)
-        return None
+        f_id = self.instance['flavor']['id']
+        _, flavor = self.flavors_client.get_flavor_details(f_id)
+        ephemeral = flavor.get('OS-FLV-EXT-DATA:ephemeral')
+        if not ephemeral or ephemeral == 'N/A':
+            return None
+        return int(ephemeral)
 
     def add_floating_ip(self):
-        floating_ip = self.compute_client.floating_ips.create()
-        self.instance.add_floating_ip(floating_ip)
-        return floating_ip.ip
+        _, floating_ip = self.floating_ips_client.create_floating_ip()
+        self.floating_ips_client.associate_floating_ip_to_server(
+            floating_ip['ip'], self.instance['id'])
+        return floating_ip['ip']
 
     def validate_ports(self):
-        for port in self.get_ports(self.node.uuid):
-            n_port_id = port.extra['vif_port_id']
-            n_port = self.network_client.show_port(n_port_id)['port']
-            self.assertEqual(n_port['device_id'], self.instance.id)
-            self.assertEqual(n_port['mac_address'], port.address)
+        for port in self.get_ports(self.node['uuid']):
+            n_port_id = port['extra']['vif_port_id']
+            _, body = self.network_client.show_port(n_port_id)
+            n_port = body['port']
+            self.assertEqual(n_port['device_id'], self.instance['id'])
+            self.assertEqual(n_port['mac_address'], port['address'])
 
     @test.services('baremetal', 'compute', 'image', 'network')
     def test_baremetal_server_ops(self):
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 72cc8b0..4fcc70a 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -69,7 +69,6 @@
         response = self.opener.open(CONF.dashboard.dashboard_url)
         self.assertIn('Overview', response.read())
 
-    @test.skip_because(bug="1345955")
     @test.services('dashboard')
     def test_basic_scenario(self):
         self.check_login_page()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 35e50e8..5e83ff9 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -28,7 +28,7 @@
 config = config.CONF
 
 
-class TestLoadBalancerBasic(manager.NetworkScenarioTest):
+class TestLoadBalancerBasic(manager.NeutronScenarioTest):
 
     """
     This test checks basic load balancing.
@@ -72,7 +72,7 @@
         super(TestLoadBalancerBasic, self).setUp()
         self.server_ips = {}
         self.server_fixed_ips = {}
-        self._create_security_group()
+        self._create_security_group_for_test()
         self._set_net_and_subnet()
 
     def _set_net_and_subnet(self):
@@ -103,8 +103,8 @@
             subnet = self._list_subnets(network_id=self.network['id'])[0]
             self.subnet = net_common.AttributeDict(subnet)
 
-    def _create_security_group(self):
-        self.security_group = self._create_security_group_neutron(
+    def _create_security_group_for_test(self):
+        self.security_group = self._create_security_group(
             tenant_id=self.tenant_id)
         self._create_security_group_rules_for_port(self.port1)
         self._create_security_group_rules_for_port(self.port2)
@@ -117,35 +117,35 @@
             'port_range_max': port,
         }
         self._create_security_group_rule(
-            client=self.network_client,
             secgroup=self.security_group,
             tenant_id=self.tenant_id,
             **rule)
 
     def _create_server(self, name):
-        keypair = self.create_keypair(name='keypair-%s' % name)
-        security_groups = [self.security_group.name]
+        keypair = self.create_keypair()
+        security_groups = [self.security_group]
         create_kwargs = {
             'nics': [
                 {'net-id': self.network['id']},
             ],
-            'key_name': keypair.name,
+            'key_name': keypair['name'],
             'security_groups': security_groups,
         }
-        server = self.create_server(name=name,
-                                    create_kwargs=create_kwargs)
-        self.servers_keypairs[server.id] = keypair
         net_name = self.network['name']
+        server = self.create_server(name=name, create_kwargs=create_kwargs)
+        self.servers_keypairs[server['id']] = keypair
         if (config.network.public_network_id and not
                 config.network.tenant_networks_reachable):
             public_network_id = config.network.public_network_id
             floating_ip = self._create_floating_ip(
                 server, public_network_id)
             self.floating_ips[floating_ip] = server
-            self.server_ips[server.id] = floating_ip.floating_ip_address
+            self.server_ips[server['id']] = floating_ip.floating_ip_address
         else:
-            self.server_ips[server.id] = server.networks[net_name][0]
-        self.server_fixed_ips[server.id] = server.networks[net_name][0]
+            self.server_ips[server['id']] =\
+                server['addresses'][net_name][0]['addr']
+        self.server_fixed_ips[server['id']] =\
+            server['addresses'][net_name][0]['addr']
         self.assertTrue(self.servers_keypairs)
         return server
 
@@ -162,8 +162,8 @@
         2. Start two http backends listening on ports 80 and 88 respectively
         """
         for server_id, ip in self.server_ips.iteritems():
-            private_key = self.servers_keypairs[server_id].private_key
-            server_name = self.compute_client.servers.get(server_id).name
+            private_key = self.servers_keypairs[server_id]['private_key']
+            server_name = self.servers_client.get_server(server_id)[1]['name']
             username = config.scenario.ssh_user
             ssh_client = self.get_remote_client(
                 server_or_ip=ip,
@@ -269,11 +269,7 @@
                                     protocol_port=80,
                                     subnet_id=self.subnet.id,
                                     pool_id=self.pool.id)
-        self.status_timeout(NeutronRetriever(self.network_client,
-                                             self.network_client.vip_path,
-                                             net_common.DeletableVip),
-                            self.vip.id,
-                            expected_status='ACTIVE')
+        self.vip.wait_for_status('ACTIVE')
         if (config.network.public_network_id and not
                 config.network.tenant_networks_reachable):
             self._assign_floating_ip_to_vip(self.vip)
@@ -286,8 +282,8 @@
         # vip port - see https://bugs.launchpad.net/neutron/+bug/1163569
         # However the linuxbridge-agent does, and it is necessary to add a
         # security group with a rule that allows tcp port 80 to the vip port.
-        body = {'port': {'security_groups': [self.security_group.id]}}
-        self.network_client.update_port(self.vip.port_id, body)
+        self.network_client.update_port(
+            self.vip.port_id, security_groups=[self.security_group.id])
 
     def _check_load_balancing(self):
         """
@@ -318,27 +314,3 @@
         self._start_servers()
         self._create_load_balancer()
         self._check_load_balancing()
-
-
-class NeutronRetriever(object):
-    """
-    Helper class to make possible handling neutron objects returned by GET
-    requests as attribute dicts.
-
-    Whet get() method is called, the returned dictionary is wrapped into
-    a corresponding DeletableResource class which provides attribute access
-    to dictionary values.
-
-    Usage:
-        This retriever is used to allow using status_timeout from
-        tempest.manager with Neutron objects.
-    """
-
-    def __init__(self, network_client, path, resource):
-        self.network_client = network_client
-        self.path = path
-        self.resource = resource
-
-    def get(self, thing_id):
-        obj = self.network_client.get(self.path % thing_id)
-        return self.resource(client=self.network_client, **obj.values()[0])
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 4783273..f2c3dcd 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -15,7 +15,6 @@
 
 import time
 
-from cinderclient import exceptions as cinder_exceptions
 import testtools
 
 from tempest.common.utils import data_utils
@@ -30,7 +29,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestStampPattern(manager.OfficialClientTest):
+class TestStampPattern(manager.ScenarioTest):
     """
     This test is for snapshotting an instance/volume and attaching the volume
     created from snapshot to the instance booted from snapshot.
@@ -59,13 +58,13 @@
             raise cls.skipException("Cinder volume snapshots are disabled")
 
     def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
-        self.status_timeout(self.volume_client.volume_snapshots,
-                            volume_snapshot.id, status)
+        self.snapshots_client.wait_for_snapshot_status(volume_snapshot['id'],
+                                                       status)
 
     def _boot_image(self, image_id):
-        security_groups = [self.security_group.name]
+        security_groups = [self.security_group]
         create_kwargs = {
-            'key_name': self.keypair.name,
+            'key_name': self.keypair['name'],
             'security_groups': security_groups
         }
         return self.create_server(image=image_id, create_kwargs=create_kwargs)
@@ -74,53 +73,54 @@
         self.keypair = self.create_keypair()
 
     def _create_floating_ip(self):
-        floating_ip = self.compute_client.floating_ips.create()
-        self.addCleanup(self.delete_wrapper, floating_ip)
+        _, floating_ip = self.floating_ips_client.create_floating_ip()
+        self.addCleanup(self.delete_wrapper,
+                        self.floating_ips_client.delete_floating_ip,
+                        floating_ip['id'])
         return floating_ip
 
     def _add_floating_ip(self, server, floating_ip):
-        server.add_floating_ip(floating_ip)
+        self.floating_ips_client.associate_floating_ip_to_server(
+            floating_ip['ip'], server['id'])
 
     def _ssh_to_server(self, server_or_ip):
         return self.get_remote_client(server_or_ip)
 
     def _create_volume_snapshot(self, volume):
         snapshot_name = data_utils.rand_name('scenario-snapshot-')
-        volume_snapshots = self.volume_client.volume_snapshots
-        snapshot = volume_snapshots.create(
-            volume.id, display_name=snapshot_name)
+        _, snapshot = self.snapshots_client.create_snapshot(
+            volume['id'], display_name=snapshot_name)
 
         def cleaner():
-            volume_snapshots.delete(snapshot)
+            self.snapshots_client.delete_snapshot(snapshot['id'])
             try:
-                while volume_snapshots.get(snapshot.id):
+                while self.snapshots_client.get_snapshot(snapshot['id']):
                     time.sleep(1)
-            except cinder_exceptions.NotFound:
+            except exceptions.NotFound:
                 pass
         self.addCleanup(cleaner)
         self._wait_for_volume_status(volume, 'available')
-        self._wait_for_volume_snapshot_status(snapshot, 'available')
-        self.assertEqual(snapshot_name, snapshot.display_name)
+        self.snapshots_client.wait_for_snapshot_status(snapshot['id'],
+                                                       'available')
+        self.assertEqual(snapshot_name, snapshot['display_name'])
         return snapshot
 
     def _wait_for_volume_status(self, volume, status):
-        self.status_timeout(
-            self.volume_client.volumes, volume.id, status)
+        self.volumes_client.wait_for_volume_status(volume['id'], status)
 
     def _create_volume(self, snapshot_id=None):
         return self.create_volume(snapshot_id=snapshot_id)
 
     def _attach_volume(self, server, volume):
-        attach_volume_client = self.compute_client.volumes.create_server_volume
-        attached_volume = attach_volume_client(server.id,
-                                               volume.id,
-                                               '/dev/vdb')
-        self.assertEqual(volume.id, attached_volume.id)
+        # TODO(andreaf) we should use device from config instead if vdb
+        _, attached_volume = self.servers_client.attach_volume(
+            server['id'], volume['id'], device='/dev/vdb')
+        attached_volume = attached_volume['volumeAttachment']
+        self.assertEqual(volume['id'], attached_volume['id'])
         self._wait_for_volume_status(attached_volume, 'in-use')
 
     def _detach_volume(self, server, volume):
-        detach_volume_client = self.compute_client.volumes.delete_server_volume
-        detach_volume_client(server.id, volume.id)
+        self.servers_client.detach_volume(server['id'], volume['id'])
         self._wait_for_volume_status(volume, 'available')
 
     def _wait_for_volume_available_on_the_system(self, server_or_ip):
@@ -157,7 +157,7 @@
     def test_stamp_pattern(self):
         # prepare for booting a instance
         self._add_keypair()
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
 
         # boot an instance and create a timestamp file in it
         volume = self._create_volume()
@@ -167,7 +167,7 @@
         if CONF.compute.use_floatingip_for_ssh:
             floating_ip_for_server = self._create_floating_ip()
             self._add_floating_ip(server, floating_ip_for_server)
-            ip_for_server = floating_ip_for_server.ip
+            ip_for_server = floating_ip_for_server['ip']
         else:
             ip_for_server = server
 
@@ -184,17 +184,17 @@
 
         # create second volume from the snapshot(volume2)
         volume_from_snapshot = self._create_volume(
-            snapshot_id=volume_snapshot.id)
+            snapshot_id=volume_snapshot['id'])
 
         # boot second instance from the snapshot(instance2)
-        server_from_snapshot = self._boot_image(snapshot_image.id)
+        server_from_snapshot = self._boot_image(snapshot_image['id'])
 
         # create and add floating IP to server_from_snapshot
         if CONF.compute.use_floatingip_for_ssh:
             floating_ip_for_snapshot = self._create_floating_ip()
             self._add_floating_ip(server_from_snapshot,
                                   floating_ip_for_snapshot)
-            ip_for_snapshot = floating_ip_for_snapshot.ip
+            ip_for_snapshot = floating_ip_for_snapshot['ip']
         else:
             ip_for_snapshot = server_from_snapshot
 
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 88b68d3..947ba7a 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -175,11 +175,12 @@
         return resp, body
 
     def wait_for_server_status(self, server_id, status, extra_timeout=0,
-                               raise_on_error=True):
+                               raise_on_error=True, ready_wait=True):
         """Waits for a server to reach a given status."""
         return waiters.wait_for_server_status(self, server_id, status,
                                               extra_timeout=extra_timeout,
-                                              raise_on_error=raise_on_error)
+                                              raise_on_error=raise_on_error,
+                                              ready_wait=ready_wait)
 
     def wait_for_server_termination(self, server_id, ignore_error=False):
         """Waits for server to reach termination."""
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 94ba5aa..5ad5f37 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -13,6 +13,7 @@
 import time
 import urllib
 
+from tempest.common.utils import misc
 from tempest import config
 from tempest import exceptions
 
@@ -227,3 +228,39 @@
         except exceptions.NotFound:
             return True
         return False
+
+    def wait_for_resource_status(self, fetch, status, interval=None,
+                                 timeout=None):
+        """
+        @summary: Waits for a network resource to reach a status
+        @param fetch: the callable to be used to query the resource status
+        @type fecth: callable that takes no parameters and returns the resource
+        @param status: the status that the resource has to reach
+        @type status: String
+        @param interval: the number of seconds to wait between each status
+          query
+        @type interval: Integer
+        @param timeout: the maximum number of seconds to wait for the resource
+          to reach the desired status
+        @type timeout: Integer
+        """
+        if not interval:
+            interval = self.build_interval
+        if not timeout:
+            timeout = self.build_timeout
+        start_time = time.time()
+
+        while time.time() - start_time <= timeout:
+            resource = fetch()
+            if resource['status'] == status:
+                return
+            time.sleep(interval)
+
+        # At this point, the wait has timed out
+        message = 'Resource %s' % (str(resource))
+        message += ' failed to reach status %s' % status
+        message += ' within the required time %s' % timeout
+        caller = misc.find_test_caller()
+        if caller:
+            message = '(%s) %s' % (caller, message)
+        raise exceptions.TimeoutException(message)
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
index b2feb87..2b182d0 100644
--- a/tempest/services/network/resources.py
+++ b/tempest/services/network/resources.py
@@ -51,9 +51,19 @@
     def delete(self):
         return
 
+    @abc.abstractmethod
+    def show(self):
+        return
+
     def __hash__(self):
         return hash(self.id)
 
+    def wait_for_status(self, status):
+        if not hasattr(self, 'status'):
+            return
+
+        return self.client.wait_for_resource_status(self.show, status)
+
 
 class DeletableNetwork(DeletableResource):
 
@@ -161,3 +171,8 @@
 
     def delete(self):
         self.client.delete_vip(self.id)
+
+    def show(self):
+        _, result = self.client.show_vip(self.id)
+        super(DeletableVip, self).update(**result['vip'])
+        return self
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index 478cd07..d78112c 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -30,7 +30,7 @@
         proc = subprocess.Popen(cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
-        proc.wait()
+        proc.communicate()
         success = proc.returncode == 0
         return success
 
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index 3f4ac7d..0fd41f9 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -62,14 +62,11 @@
         p = subprocess.Popen(
             "bash %s" % cmd, shell=True,
             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-        # wait in the general case is dangerous, however the amount of
-        # data coming back on those pipes is small enough it shouldn't be
-        # a problem.
-        p.wait()
+        out, err = p.communicate()
 
         self.assertEqual(
             p.returncode, expected,
-            "Stdout: %s; Stderr: %s" % (p.stdout.read(), p.stderr.read()))
+            "Stdout: %s; Stderr: %s" % (out, err))
 
     def test_pretty_tox(self):
         # Git init is required for the pbr testr command. pbr requires a git