Merge "Add sec-group rules for ping6 and ssh -6"
diff --git a/tempest/api/baremetal/admin/test_api_discovery.py b/tempest/api/baremetal/admin/test_api_discovery.py
index 7368b3e..09788f2 100644
--- a/tempest/api/baremetal/admin/test_api_discovery.py
+++ b/tempest/api/baremetal/admin/test_api_discovery.py
@@ -19,10 +19,8 @@
 
     @test.attr(type='smoke')
     def test_api_versions(self):
-        resp, descr = self.client.get_api_description()
-        self.assertEqual('200', resp['status'])
+        _, descr = self.client.get_api_description()
         expected_versions = ('v1',)
-
         versions = [version['id'] for version in descr['versions']]
 
         for v in expected_versions:
@@ -30,16 +28,13 @@
 
     @test.attr(type='smoke')
     def test_default_version(self):
-        resp, descr = self.client.get_api_description()
-        self.assertEqual('200', resp['status'])
+        _, descr = self.client.get_api_description()
         default_version = descr['default_version']
-
         self.assertEqual(default_version['id'], 'v1')
 
     @test.attr(type='smoke')
     def test_version_1_resources(self):
-        resp, descr = self.client.get_version_description(version='v1')
-        self.assertEqual('200', resp['status'])
+        _, descr = self.client.get_version_description(version='v1')
         expected_resources = ('nodes', 'chassis',
                               'ports', 'links', 'media_types')
 
diff --git a/tempest/api/baremetal/admin/test_chassis.py b/tempest/api/baremetal/admin/test_chassis.py
index c306c34..254a969 100644
--- a/tempest/api/baremetal/admin/test_chassis.py
+++ b/tempest/api/baremetal/admin/test_chassis.py
@@ -35,8 +35,7 @@
     @test.attr(type='smoke')
     def test_create_chassis(self):
         descr = data_utils.rand_name('test-chassis-')
-        resp, chassis = self.create_chassis(description=descr)
-        self.assertEqual('201', resp['status'])
+        _, chassis = self.create_chassis(description=descr)
         self.assertEqual(chassis['description'], descr)
 
     @test.attr(type='smoke')
@@ -44,40 +43,35 @@
         # Use a unicode string for testing:
         # 'We ♡ OpenStack in Ukraine'
         descr = u'В Україні ♡ OpenStack!'
-        resp, chassis = self.create_chassis(description=descr)
-        self.assertEqual('201', resp['status'])
+        _, chassis = self.create_chassis(description=descr)
         self.assertEqual(chassis['description'], descr)
 
     @test.attr(type='smoke')
     def test_show_chassis(self):
-        resp, chassis = self.client.show_chassis(self.chassis['uuid'])
-        self.assertEqual('200', resp['status'])
+        _, chassis = self.client.show_chassis(self.chassis['uuid'])
         self._assertExpected(self.chassis, chassis)
 
     @test.attr(type="smoke")
     def test_list_chassis(self):
-        resp, body = self.client.list_chassis()
-        self.assertEqual('200', resp['status'])
+        _, body = self.client.list_chassis()
         self.assertIn(self.chassis['uuid'],
                       [i['uuid'] for i in body['chassis']])
 
     @test.attr(type='smoke')
     def test_delete_chassis(self):
-        resp, body = self.create_chassis()
+        _, body = self.create_chassis()
         uuid = body['uuid']
 
-        resp = self.delete_chassis(uuid)
-        self.assertEqual('204', resp['status'])
+        self.delete_chassis(uuid)
         self.assertRaises(exc.NotFound, self.client.show_chassis, uuid)
 
     @test.attr(type='smoke')
     def test_update_chassis(self):
-        resp, body = self.create_chassis()
+        _, body = self.create_chassis()
         uuid = body['uuid']
 
         new_description = data_utils.rand_name('new-description-')
-        resp, body = (self.client.update_chassis(uuid,
-                      description=new_description))
-        self.assertEqual('200', resp['status'])
-        resp, chassis = self.client.show_chassis(uuid)
+        _, body = (self.client.update_chassis(uuid,
+                   description=new_description))
+        _, chassis = self.client.show_chassis(uuid)
         self.assertEqual(chassis['description'], new_description)
diff --git a/tempest/api/baremetal/admin/test_drivers.py b/tempest/api/baremetal/admin/test_drivers.py
index 649886b..9e215dc 100644
--- a/tempest/api/baremetal/admin/test_drivers.py
+++ b/tempest/api/baremetal/admin/test_drivers.py
@@ -29,13 +29,11 @@
 
     @test.attr(type="smoke")
     def test_list_drivers(self):
-        resp, drivers = self.client.list_drivers()
-        self.assertEqual('200', resp['status'])
+        _, drivers = self.client.list_drivers()
         self.assertIn(self.driver_name,
                       [d['name'] for d in drivers['drivers']])
 
     @test.attr(type="smoke")
     def test_show_driver(self):
-        resp, driver = self.client.show_driver(self.driver_name)
-        self.assertEqual('200', resp['status'])
+        _, driver = self.client.show_driver(self.driver_name)
         self.assertEqual(self.driver_name, driver['name'])
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index fc67854..43ea1e6 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -40,30 +40,25 @@
                   'storage': '10240',
                   'memory': '1024'}
 
-        resp, body = self.create_node(self.chassis['uuid'], **params)
-        self.assertEqual('201', resp['status'])
+        _, body = self.create_node(self.chassis['uuid'], **params)
         self._assertExpected(params, body['properties'])
 
     @test.attr(type='smoke')
     def test_delete_node(self):
-        resp, node = self.create_node(self.chassis['uuid'])
-        self.assertEqual('201', resp['status'])
+        _, node = self.create_node(self.chassis['uuid'])
 
-        resp = self.delete_node(node['uuid'])
+        self.delete_node(node['uuid'])
 
-        self.assertEqual(resp['status'], '204')
         self.assertRaises(exc.NotFound, self.client.show_node, node['uuid'])
 
     @test.attr(type='smoke')
     def test_show_node(self):
-        resp, loaded_node = self.client.show_node(self.node['uuid'])
-        self.assertEqual('200', resp['status'])
+        _, loaded_node = self.client.show_node(self.node['uuid'])
         self._assertExpected(self.node, loaded_node)
 
     @test.attr(type='smoke')
     def test_list_nodes(self):
-        resp, body = self.client.list_nodes()
-        self.assertEqual('200', resp['status'])
+        _, body = self.client.list_nodes()
         self.assertIn(self.node['uuid'],
                       [i['uuid'] for i in body['nodes']])
 
@@ -74,24 +69,20 @@
                  'storage': '10',
                  'memory': '128'}
 
-        resp, node = self.create_node(self.chassis['uuid'], **props)
-        self.assertEqual('201', resp['status'])
+        _, node = self.create_node(self.chassis['uuid'], **props)
 
         new_p = {'cpu_arch': 'x86',
                  'cpu_num': '1',
                  'storage': '10000',
                  'memory': '12300'}
 
-        resp, body = self.client.update_node(node['uuid'], properties=new_p)
-        self.assertEqual('200', resp['status'])
-        resp, node = self.client.show_node(node['uuid'])
-        self.assertEqual('200', resp['status'])
+        _, body = self.client.update_node(node['uuid'], properties=new_p)
+        _, node = self.client.show_node(node['uuid'])
         self._assertExpected(new_p, node['properties'])
 
     @test.attr(type='smoke')
     def test_validate_driver_interface(self):
-        resp, body = self.client.validate_driver_interface(self.node['uuid'])
-        self.assertEqual('200', resp['status'])
+        _, body = self.client.validate_driver_interface(self.node['uuid'])
         core_interfaces = ['power', 'deploy']
         for interface in core_interfaces:
             self.assertIn(interface, body)
diff --git a/tempest/api/baremetal/admin/test_nodestates.py b/tempest/api/baremetal/admin/test_nodestates.py
index f24f490..76f47f9 100644
--- a/tempest/api/baremetal/admin/test_nodestates.py
+++ b/tempest/api/baremetal/admin/test_nodestates.py
@@ -24,8 +24,8 @@
     @classmethod
     def setUpClass(cls):
         super(TestNodeStates, cls).setUpClass()
-        resp, cls.chassis = cls.create_chassis()
-        resp, cls.node = cls.create_node(cls.chassis['uuid'])
+        _, cls.chassis = cls.create_chassis()
+        _, cls.node = cls.create_node(cls.chassis['uuid'])
 
     def _validate_power_state(self, node_uuid, power_state):
         # Validate that power state is set within timeout
@@ -34,8 +34,7 @@
         start = timeutils.utcnow()
         while timeutils.delta_seconds(
                 start, timeutils.utcnow()) < self.power_timeout:
-            resp, node = self.client.show_node(node_uuid)
-            self.assertEqual(200, resp.status)
+            _, node = self.client.show_node(node_uuid)
             if node['power_state'] == power_state:
                 return
         message = ('Failed to set power state within '
@@ -44,20 +43,16 @@
 
     @test.attr(type='smoke')
     def test_list_nodestates(self):
-        resp, nodestates = self.client.list_nodestates(self.node['uuid'])
-        self.assertEqual('200', resp['status'])
+        _, nodestates = self.client.list_nodestates(self.node['uuid'])
         for key in nodestates:
             self.assertEqual(nodestates[key], self.node[key])
 
     @test.attr(type='smoke')
     def test_set_node_power_state(self):
-        resp, node = self.create_node(self.chassis['uuid'])
-        self.assertEqual('201', resp['status'])
+        _, node = self.create_node(self.chassis['uuid'])
         states = ["power on", "rebooting", "power off"]
         for state in states:
             # Set power state
-            resp, _ = self.client.set_node_power_state(node['uuid'],
-                                                       state)
-            self.assertEqual('202', resp['status'])
+            self.client.set_node_power_state(node['uuid'], state)
             # Check power state after state is set
             self._validate_power_state(node['uuid'], state)
diff --git a/tempest/api/baremetal/admin/test_ports.py b/tempest/api/baremetal/admin/test_ports.py
index d4adba9..b3f9b7f 100644
--- a/tempest/api/baremetal/admin/test_ports.py
+++ b/tempest/api/baremetal/admin/test_ports.py
@@ -39,12 +39,10 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual(201, resp.status)
+        _, port = self.create_port(node_id=node_id, address=address)
 
-        resp, body = self.client.show_port(port['uuid'])
+        _, body = self.client.show_port(port['uuid'])
 
-        self.assertEqual(200, resp.status)
         self._assertExpected(port, body)
 
     @test.attr(type='smoke')
@@ -53,12 +51,10 @@
         address = data_utils.rand_mac_address()
         uuid = data_utils.rand_uuid()
 
-        resp, port = self.create_port(node_id=node_id,
-                                      address=address, uuid=uuid)
-        self.assertEqual(201, resp.status)
+        _, port = self.create_port(node_id=node_id,
+                                   address=address, uuid=uuid)
 
-        resp, body = self.client.show_port(uuid)
-        self.assertEqual(200, resp.status)
+        _, body = self.client.show_port(uuid)
         self._assertExpected(port, body)
 
     @test.attr(type='smoke')
@@ -67,44 +63,37 @@
         address = data_utils.rand_mac_address()
         extra = {'key': 'value'}
 
-        resp, port = self.create_port(node_id=node_id, address=address,
-                                      extra=extra)
-        self.assertEqual(201, resp.status)
+        _, port = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)
 
-        resp, body = self.client.show_port(port['uuid'])
-        self.assertEqual(200, resp.status)
+        _, body = self.client.show_port(port['uuid'])
         self._assertExpected(port, body)
 
     @test.attr(type='smoke')
     def test_delete_port(self):
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual(201, resp.status)
+        _, port = self.create_port(node_id=node_id, address=address)
 
-        resp = self.delete_port(port['uuid'])
+        self.delete_port(port['uuid'])
 
-        self.assertEqual(204, resp.status)
         self.assertRaises(exc.NotFound, self.client.show_port, port['uuid'])
 
     @test.attr(type='smoke')
     def test_show_port(self):
-        resp, port = self.client.show_port(self.port['uuid'])
-        self.assertEqual(200, resp.status)
+        _, port = self.client.show_port(self.port['uuid'])
         self._assertExpected(self.port, port)
 
     @test.attr(type='smoke')
     def test_show_port_with_links(self):
-        resp, port = self.client.show_port(self.port['uuid'])
-        self.assertEqual(200, resp.status)
+        _, port = self.client.show_port(self.port['uuid'])
         self.assertIn('links', port.keys())
         self.assertEqual(2, len(port['links']))
         self.assertIn(port['uuid'], port['links'][0]['href'])
 
     @test.attr(type='smoke')
     def test_list_ports(self):
-        resp, body = self.client.list_ports()
-        self.assertEqual(200, resp.status)
+        _, body = self.client.list_ports()
         self.assertIn(self.port['uuid'],
                       [i['uuid'] for i in body['ports']])
         # Verify self links.
@@ -114,8 +103,7 @@
 
     @test.attr(type='smoke')
     def test_list_with_limit(self):
-        resp, body = self.client.list_ports(limit=3)
-        self.assertEqual(200, resp.status)
+        _, body = self.client.list_ports(limit=3)
 
         next_marker = body['ports'][-1]['uuid']
         self.assertIn(next_marker, body['next'])
@@ -128,8 +116,7 @@
                              address=data_utils.rand_mac_address())
             [1]['uuid'] for i in range(0, 5)]
 
-        resp, body = self.client.list_ports_detail()
-        self.assertEqual(200, resp.status)
+        _, body = self.client.list_ports_detail()
 
         ports_dict = dict((port['uuid'], port) for port in body['ports']
                           if port['uuid'] in uuids)
@@ -153,8 +140,7 @@
             self.create_port(node_id=node_id,
                              address=data_utils.rand_mac_address())
 
-        resp, body = self.client.list_ports_detail(address=address)
-        self.assertEqual(200, resp.status)
+        _, body = self.client.list_ports_detail(address=address)
         self.assertEqual(1, len(body['ports']))
         self.assertEqual(address, body['ports'][0]['address'])
 
@@ -164,9 +150,8 @@
         address = data_utils.rand_mac_address()
         extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
 
-        resp, port = self.create_port(node_id=node_id, address=address,
-                                      extra=extra)
-        self.assertEqual(201, resp.status)
+        _, port = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)
 
         new_address = data_utils.rand_mac_address()
         new_extra = {'key1': 'new-value1', 'key2': 'new-value2',
@@ -185,11 +170,9 @@
                   'op': 'replace',
                   'value': new_extra['key3']}]
 
-        resp, _ = self.client.update_port(port['uuid'], patch)
-        self.assertEqual(200, resp.status)
+        self.client.update_port(port['uuid'], patch)
 
-        resp, body = self.client.show_port(port['uuid'])
-        self.assertEqual(200, resp.status)
+        _, body = self.client.show_port(port['uuid'])
         self.assertEqual(new_address, body['address'])
         self.assertEqual(new_extra, body['extra'])
 
@@ -199,26 +182,21 @@
         address = data_utils.rand_mac_address()
         extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
 
-        resp, port = self.create_port(node_id=node_id, address=address,
-                                      extra=extra)
-        self.assertEqual(201, resp.status)
+        _, port = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)
 
         # Removing one item from the collection
-        resp, _ = self.client.update_port(port['uuid'],
-                                          [{'path': '/extra/key2',
-                                           'op': 'remove'}])
-        self.assertEqual(200, resp.status)
+        self.client.update_port(port['uuid'],
+                                [{'path': '/extra/key2',
+                                 'op': 'remove'}])
         extra.pop('key2')
-        resp, body = self.client.show_port(port['uuid'])
-        self.assertEqual(200, resp.status)
+        _, body = self.client.show_port(port['uuid'])
         self.assertEqual(extra, body['extra'])
 
         # Removing the collection
-        resp, _ = self.client.update_port(port['uuid'], [{'path': '/extra',
-                                                         'op': 'remove'}])
-        self.assertEqual(200, resp.status)
-        resp, body = self.client.show_port(port['uuid'])
-        self.assertEqual(200, resp.status)
+        self.client.update_port(port['uuid'], [{'path': '/extra',
+                                               'op': 'remove'}])
+        _, body = self.client.show_port(port['uuid'])
         self.assertEqual({}, body['extra'])
 
         # Assert nothing else was changed
@@ -230,8 +208,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual(201, resp.status)
+        _, port = self.create_port(node_id=node_id, address=address)
 
         extra = {'key1': 'value1', 'key2': 'value2'}
 
@@ -242,11 +219,9 @@
                   'op': 'add',
                   'value': extra['key2']}]
 
-        resp, _ = self.client.update_port(port['uuid'], patch)
-        self.assertEqual(200, resp.status)
+        self.client.update_port(port['uuid'], patch)
 
-        resp, body = self.client.show_port(port['uuid'])
-        self.assertEqual(200, resp.status)
+        _, body = self.client.show_port(port['uuid'])
         self.assertEqual(extra, body['extra'])
 
     @test.attr(type='smoke')
@@ -255,9 +230,8 @@
         address = data_utils.rand_mac_address()
         extra = {'key1': 'value1', 'key2': 'value2'}
 
-        resp, port = self.create_port(node_id=node_id, address=address,
-                                      extra=extra)
-        self.assertEqual(201, resp.status)
+        _, port = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)
 
         new_address = data_utils.rand_mac_address()
         new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
@@ -274,10 +248,8 @@
                   'op': 'add',
                   'value': new_extra['key3']}]
 
-        resp, _ = self.client.update_port(port['uuid'], patch)
-        self.assertEqual(200, resp.status)
+        self.client.update_port(port['uuid'], patch)
 
-        resp, body = self.client.show_port(port['uuid'])
-        self.assertEqual(200, resp.status)
+        _, body = self.client.show_port(port['uuid'])
         self.assertEqual(new_address, body['address'])
         self.assertEqual(new_extra, body['extra'])
diff --git a/tempest/api/baremetal/admin/test_ports_negative.py b/tempest/api/baremetal/admin/test_ports_negative.py
index 7646677..ead3799 100644
--- a/tempest/api/baremetal/admin/test_ports_negative.py
+++ b/tempest/api/baremetal/admin/test_ports_negative.py
@@ -22,11 +22,8 @@
     def setUp(self):
         super(TestPortsNegative, self).setUp()
 
-        resp, self.chassis = self.create_chassis()
-        self.assertEqual('201', resp['status'])
-
-        resp, self.node = self.create_node(self.chassis['uuid'])
-        self.assertEqual('201', resp['status'])
+        _, self.chassis = self.create_chassis()
+        _, self.node = self.create_node(self.chassis['uuid'])
 
     @test.attr(type=['negative', 'smoke'])
     def test_create_port_malformed_mac(self):
@@ -137,13 +134,11 @@
         address = data_utils.rand_mac_address()
         extra = {'key': 'value'}
 
-        resp, port = self.create_port(node_id=node_id, address=address,
-                                      extra=extra)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)
         port_id = port['uuid']
 
-        resp, body = self.client.delete_port(port_id)
-        self.assertEqual('204', resp['status'])
+        _, body = self.client.delete_port(port_id)
 
         patch = [{'path': '/extra/key',
                   'op': 'replace',
@@ -169,8 +164,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -182,8 +176,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -196,8 +189,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -209,8 +201,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         patch = [{'path': '/node_uuid',
@@ -225,11 +216,9 @@
         address1 = data_utils.rand_mac_address()
         address2 = data_utils.rand_mac_address()
 
-        resp, port1 = self.create_port(node_id=node_id, address=address1)
-        self.assertEqual('201', resp['status'])
+        _, port1 = self.create_port(node_id=node_id, address=address1)
 
-        resp, port2 = self.create_port(node_id=node_id, address=address2)
-        self.assertEqual('201', resp['status'])
+        _, port2 = self.create_port(node_id=node_id, address=address2)
         port_id = port2['uuid']
 
         patch = [{'path': '/address',
@@ -243,8 +232,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         patch = [{'path': '/node_uuid',
@@ -258,8 +246,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         patch = [{'path': '/address',
@@ -275,9 +262,8 @@
         address = data_utils.rand_mac_address()
         extra = {'key': 'value'}
 
-        resp, port = self.create_port(node_id=node_id, address=address,
-                                      extra=extra)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)
         port_id = port['uuid']
 
         patch = [{'path': '/extra/key',
@@ -291,8 +277,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         patch = [{'path': '/extra',
@@ -307,8 +292,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         patch = [{'path': '/nonexistent', ' op': 'replace', 'value': 'value'}]
@@ -321,8 +305,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -333,8 +316,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -345,8 +327,7 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        resp, port = self.create_port(node_id=node_id, address=address)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address)
         port_id = port['uuid']
 
         self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -366,9 +347,8 @@
         address = data_utils.rand_mac_address()
         extra = {'key1': 'value1', 'key2': 'value2'}
 
-        resp, port = self.create_port(node_id=node_id, address=address,
-                                      extra=extra)
-        self.assertEqual('201', resp['status'])
+        _, port = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)
         port_id = port['uuid']
 
         new_address = data_utils.rand_mac_address()
@@ -393,7 +373,6 @@
                           patch)
 
         # patch should not be applied
-        resp, body = self.client.show_port(port_id)
-        self.assertEqual(200, resp.status)
+        _, body = self.client.show_port(port_id)
         self.assertEqual(address, body['address'])
         self.assertEqual(extra, body['extra'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index f684a5a..fd6df3e 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -259,6 +259,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting not available, backup not possible.')
     @test.attr(type='gate')
+    @test.services('image')
     def test_create_backup(self):
         # Positive test:create backup successfully and rotate backups correctly
         # create the first and the second backup
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index ab98d88..b737888 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -54,7 +54,6 @@
 
         # Server for positive tests
         resp, server = cls.create_test_server(wait_until='BUILD')
-        resp, resc_server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
         cls.password = server['adminPass']
         cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
diff --git a/tempest/api/database/flavors/test_flavors.py b/tempest/api/database/flavors/test_flavors.py
index 64d71b9..7d30f26 100644
--- a/tempest/api/database/flavors/test_flavors.py
+++ b/tempest/api/database/flavors/test_flavors.py
@@ -55,6 +55,7 @@
                 self.assertNotIn(name, db_flavor)
 
     @test.attr(type='smoke')
+    @test.services('compute')
     def test_compare_db_flavors_with_os(self):
         resp, db_flavors = self.client.list_db_flavors()
         self.assertEqual(200, resp.status)
diff --git a/tempest/api/network/common.py b/tempest/api/network/common.py
index 97e120f..5ac8b5a 100644
--- a/tempest/api/network/common.py
+++ b/tempest/api/network/common.py
@@ -13,6 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import abc
+
+import six
+
 
 class AttributeDict(dict):
 
@@ -27,6 +31,7 @@
         return super(AttributeDict, self).__getattribute__(name)
 
 
+@six.add_metaclass(abc.ABCMeta)
 class DeletableResource(AttributeDict):
 
     """
@@ -42,8 +47,9 @@
         return '<%s id="%s" name="%s">' % (self.__class__.__name__,
                                            self.id, self.name)
 
+    @abc.abstractmethod
     def delete(self):
-        raise NotImplemented()
+        return
 
     def __hash__(self):
         return id(self)
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index ac3a072..e21aad7 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -75,6 +75,7 @@
         self.assertEqual('201', resp['status'])
         network = body['network']
         net_id = network['id']
+        self.assertEqual('ACTIVE', network['status'])
         # Verify network update
         new_name = "New_network"
         resp, body = self.client.update_network(net_id, name=new_name)
@@ -312,7 +313,7 @@
         # Creates 2 networks in one request
         network_names = [data_utils.rand_name('network-'),
                          data_utils.rand_name('network-')]
-        resp, body = self.client.create_bulk_network(2, network_names)
+        resp, body = self.client.create_bulk_network(network_names)
         created_networks = body['networks']
         self.assertEqual('201', resp['status'])
         self.addCleanup(self._delete_networks, created_networks)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index cfebc2c..a091ce1 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -86,8 +86,11 @@
                 pass
 
         for stack_identifier in cls.stacks:
-            cls.client.wait_for_stack_status(
-                stack_identifier, 'DELETE_COMPLETE')
+            try:
+                cls.client.wait_for_stack_status(
+                    stack_identifier, 'DELETE_COMPLETE')
+            except exceptions.NotFound:
+                pass
 
     @classmethod
     def _create_keypair(cls, name_start='keypair-heat-'):
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/test_nova_manage.py
index dae0cf8..c27b12e 100644
--- a/tempest/cli/simple_read_only/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/test_nova_manage.py
@@ -79,10 +79,6 @@
     def test_flavor_list(self):
         self.assertNotEqual("", self.nova_manage('flavor list'))
 
-    def test_db_archive_deleted_rows(self):
-        # make sure command doesn't error out
-        self.nova_manage('db archive_deleted_rows 50')
-
     def test_db_sync(self):
         # make sure command doesn't error out
         self.nova_manage('db sync')
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index f711f2f..d5e49db 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -162,9 +162,11 @@
         email = data_utils.rand_name(root) + suffix + "@example.com"
         user = self._create_user(username, self.password,
                                  tenant, email)
-        # NOTE(andrey-mp): user needs this role to create containers in swift
-        swift_operator_role = CONF.object_storage.operator_role
-        self._assign_user_role(tenant, user, swift_operator_role)
+        if CONF.service_available.swift:
+            # NOTE(andrey-mp): user needs this role to create containers
+            # in swift
+            swift_operator_role = CONF.object_storage.operator_role
+            self._assign_user_role(tenant, user, swift_operator_role)
         if admin:
             self._assign_user_role(tenant, user, CONF.identity.admin_role)
         return self._get_credentials(user, tenant)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 12f65eb..856b751 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -54,16 +54,39 @@
 
 
 class ScenarioTest(tempest.test.BaseTestCase):
+    """Replaces the OfficialClientTest base class.
+
+    Uses tempest own clients as opposed to OfficialClients.
+
+    Common differences:
+    - replace resource.attribute with resource['attribute']
+    - replace resouce.delete with delete_callable(resource['id'])
+    - replace local waiters with common / rest_client waiters
+    """
 
     @classmethod
     def setUpClass(cls):
         super(ScenarioTest, cls).setUpClass()
+        # Using tempest client for isolated credentials as well
         cls.isolated_creds = isolated_creds.IsolatedCreds(
             cls.__name__, tempest_client=True,
             network_resources=cls.network_resources)
         cls.manager = clients.Manager(
             credentials=cls.credentials()
         )
+        cls.admin_manager = clients.Manager(cls.admin_credentials())
+        # Clients (in alphabetical order)
+        cls.floating_ips_client = cls.manager.floating_ips_client
+        # Glance image client v1
+        cls.image_client = cls.manager.image_client
+        # Compute image client
+        cls.images_client = cls.manager.images_client
+        cls.keypairs_client = cls.manager.keypairs_client
+        cls.networks_client = cls.admin_manager.networks_client
+        # Nova security groups client
+        cls.security_groups_client = cls.manager.security_groups_client
+        cls.servers_client = cls.manager.servers_client
+        cls.volumes_client = cls.manager.volumes_client
 
     @classmethod
     def _get_credentials(cls, get_creds, ctype):
@@ -83,6 +106,312 @@
         return cls._get_credentials(cls.isolated_creds.get_admin_creds,
                                     'identity_admin')
 
+    # ## Methods to handle sync and async deletes
+
+    def setUp(self):
+        super(ScenarioTest, self).setUp()
+        self.cleanup_waits = []
+        # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
+        # because scenario tests in the same test class should not share
+        # resources. If resources were shared between test cases then it
+        # should be a single scenario test instead of multiples.
+
+        # NOTE(yfried): this list is cleaned at the end of test_methods and
+        # not at the end of the class
+        self.addCleanup(self._wait_for_cleanups)
+
+    def delete_wrapper(self, delete_thing, thing_id):
+        """Ignores NotFound exceptions for delete operations.
+
+        @param delete_thing: delete method of a resource
+        @param thing_id: id of the resource to be deleted
+        """
+        try:
+            # Tempest clients return dicts, so there is no common delete
+            # method available. Using a callable instead
+            delete_thing(thing_id)
+        except exceptions.NotFound:
+            # If the resource is already missing, mission accomplished.
+            pass
+
+    def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
+                             cleanup_callable, cleanup_args=[],
+                             cleanup_kwargs={}, ignore_error=True):
+        """Adds wait for ansyc resource deletion at the end of cleanups
+
+        @param waiter_callable: callable to wait for the resource to delete
+        @param thing_id: the id of the resource to be cleaned-up
+        @param thing_id_param: the name of the id param in the waiter
+        @param cleanup_callable: method to load pass to self.addCleanup with
+            the following *cleanup_args, **cleanup_kwargs.
+            usually a delete method.
+        """
+        self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
+        wait_dict = {
+            'waiter_callable': waiter_callable,
+            thing_id_param: thing_id
+        }
+        self.cleanup_waits.append(wait_dict)
+
+    def _wait_for_cleanups(self):
+        """To handle async delete actions, a list of waits is added
+        which will be iterated over as the last step of clearing the
+        cleanup queue. That way all the delete calls are made up front
+        and the tests won't succeed unless the deletes are eventually
+        successful. This is the same basic approach used in the api tests to
+        limit cleanup execution time except here it is multi-resource,
+        because of the nature of the scenario tests.
+        """
+        for wait in self.cleanup_waits:
+            waiter_callable = wait.pop('waiter_callable')
+            waiter_callable(**wait)
+
+    # ## Test functions library
+    #
+    # The create_[resource] functions only return body and discard the
+    # resp part which is not used in scenario tests
+
+    def create_keypair(self):
+        name = data_utils.rand_name(self.__class__.__name__)
+        # We don't need to create a keypair by pubkey in scenario
+        resp, body = self.keypairs_client.create_keypair(name)
+        self.addCleanup(self.keypairs_client.delete_keypair, name)
+        return body
+
+    def create_server(self, name=None, image=None, flavor=None,
+                      wait_on_boot=True, wait_on_delete=True,
+                      create_kwargs={}):
+        """Creates VM instance.
+
+        @param image: image from which to create the instance
+        @param wait_on_boot: wait for status ACTIVE before continue
+        @param wait_on_delete: force synchronous delete on cleanup
+        @param create_kwargs: additional details for instance creation
+        @return: server dict
+        """
+        if name is None:
+            name = data_utils.rand_name(self.__class__.__name__)
+        if image is None:
+            image = CONF.compute.image_ref
+        if flavor is None:
+            flavor = CONF.compute.flavor_ref
+
+        fixed_network_name = CONF.compute.fixed_network_name
+        if 'nics' not in create_kwargs and fixed_network_name:
+            _, networks = self.networks_client.list_networks()
+            # If several networks found, set the NetID on which to connect the
+            # server to avoid the following error "Multiple possible networks
+            # found, use a Network ID to be more specific."
+            # See Tempest #1250866
+            if len(networks) > 1:
+                for network in networks:
+                    if network['label'] == fixed_network_name:
+                        create_kwargs['nics'] = [{'net-id': network['id']}]
+                        break
+                # If we didn't find the network we were looking for :
+                else:
+                    msg = ("The network on which the NIC of the server must "
+                           "be connected can not be found : "
+                           "fixed_network_name=%s. Starting instance without "
+                           "specifying a network.") % fixed_network_name
+                    LOG.info(msg)
+
+        LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
+                  name, image, flavor)
+        _, server = self.servers_client.create_server(name, image, flavor,
+                                                      **create_kwargs)
+        if wait_on_delete:
+            self.addCleanup(self.servers_client.wait_for_server_termination,
+                            server['id'])
+        self.addCleanup_with_wait(
+            waiter_callable=self.servers_client.wait_for_server_termination,
+            thing_id=server['id'], thing_id_param='server_id',
+            cleanup_callable=self.delete_wrapper,
+            cleanup_args=[self.servers_client.delete_server, server['id']])
+        if wait_on_boot:
+            self.servers_client.wait_for_server_status(server_id=server['id'],
+                                                       status='ACTIVE')
+        # The instance retrieved on creation is missing network
+        # details, necessitating retrieval after it becomes active to
+        # ensure correct details.
+        _, server = self.servers_client.get_server(server['id'])
+        self.assertEqual(server['name'], name)
+        return server
+
+    def create_volume(self, size=1, name=None, snapshot_id=None,
+                      imageRef=None, volume_type=None, wait_on_delete=True):
+        if name is None:
+            name = data_utils.rand_name(self.__class__.__name__)
+        _, volume = self.volumes_client.create_volume(
+            size=size, display_name=name, snapshot_id=snapshot_id,
+            imageRef=imageRef, volume_type=volume_type)
+        if wait_on_delete:
+            self.addCleanup(self.volumes_client.wait_for_resource_deletion,
+                            volume['id'])
+        self.addCleanup_with_wait(
+            waiter_callable=self.volumes_client.wait_for_resource_deletion,
+            thing_id=volume['id'], thing_id_param='id',
+            cleanup_callable=self.delete_wrapper,
+            cleanup_args=[self.volumes_client.delete_volume, volume['id']])
+
+        self.assertEqual(name, volume['display_name'])
+        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        # The volume retrieved on creation has a non-up-to-date status.
+        # Retrieval after it becomes active ensures correct details.
+        _, volume = self.volumes_client.get_volume(volume['id'])
+        return volume
+
+    def _create_loginable_secgroup_rule_nova(self, secgroup_id=None):
+        _client = self.security_groups_client
+        if secgroup_id is None:
+            _, sgs = _client.list_security_groups()
+            for sg in sgs:
+                if sg['name'] == 'default':
+                    secgroup_id = sg['id']
+
+        # These rules are intended to permit inbound ssh and icmp
+        # traffic from all sources, so no group_id is provided.
+        # Setting a group_id would only permit traffic from ports
+        # belonging to the same security group.
+        rulesets = [
+            {
+                # ssh
+                'ip_proto': 'tcp',
+                'from_port': 22,
+                'to_port': 22,
+                'cidr': '0.0.0.0/0',
+            },
+            {
+                # ping
+                'ip_proto': 'icmp',
+                'from_port': -1,
+                'to_port': -1,
+                'cidr': '0.0.0.0/0',
+            }
+        ]
+        rules = list()
+        for ruleset in rulesets:
+            _, sg_rule = _client.create_security_group_rule(secgroup_id,
+                                                            **ruleset)
+            self.addCleanup(self.delete_wrapper,
+                            _client.delete_security_group_rule,
+                            sg_rule['id'])
+            rules.append(sg_rule)
+        return rules
+
+    def _create_security_group_nova(self):
+        # Create security group
+        sg_name = data_utils.rand_name(self.__class__.__name__)
+        sg_desc = sg_name + " description"
+        _, secgroup = self.security_groups_client.create_security_group(
+            sg_name, sg_desc)
+        self.assertEqual(secgroup['name'], sg_name)
+        self.assertEqual(secgroup['description'], sg_desc)
+        self.addCleanup(self.delete_wrapper,
+                        self.security_groups_client.delete_security_group,
+                        secgroup['id'])
+
+        # Add rules to the security group
+        self._create_loginable_secgroup_rule_nova(secgroup['id'])
+
+        return secgroup
+
+    def get_remote_client(self, server_or_ip, username=None, private_key=None):
+        if isinstance(server_or_ip, six.string_types):
+            ip = server_or_ip
+        else:
+            network_name_for_ssh = CONF.compute.network_for_ssh
+            ip = server_or_ip.networks[network_name_for_ssh][0]
+        if username is None:
+            username = CONF.scenario.ssh_user
+        if private_key is None:
+            private_key = self.keypair['private_key']
+        linux_client = remote_client.RemoteClient(ip, username,
+                                                  pkey=private_key)
+        try:
+            linux_client.validate_authentication()
+        except exceptions.SSHTimeout:
+            LOG.exception('ssh connection to %s failed' % ip)
+            debug.log_net_debug()
+            raise
+
+        return linux_client
+
+    def _image_create(self, name, fmt, path, properties={}):
+        name = data_utils.rand_name('%s-' % name)
+        image_file = open(path, 'rb')
+        self.addCleanup(image_file.close)
+        params = {
+            'name': name,
+            'container_format': fmt,
+            'disk_format': fmt,
+            'is_public': 'False',
+        }
+        params.update(properties)
+        _, image = self.image_client.create_image(**params)
+        self.addCleanup(self.image_client.delete_image, image['id'])
+        self.assertEqual("queued", image['status'])
+        self.image_client.update_image(image['id'], data=image_file)
+        return image['id']
+
+    def glance_image_create(self):
+        qcow2_img_path = (CONF.scenario.img_dir + "/" +
+                          CONF.scenario.qcow2_img_file)
+        aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
+        ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
+        ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
+        LOG.debug("paths: img: %s, ami: %s, ari: %s, aki: %s"
+                  % (qcow2_img_path, ami_img_path, ari_img_path, aki_img_path))
+        try:
+            self.image = self._image_create('scenario-img',
+                                            'bare',
+                                            qcow2_img_path,
+                                            properties={'disk_format':
+                                                        'qcow2'})
+        except IOError:
+            LOG.debug("A qcow2 image was not found. Try to get a uec image.")
+            kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
+            ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
+            properties = {
+                'properties': {'kernel_id': kernel, 'ramdisk_id': ramdisk}
+            }
+            self.image = self._image_create('scenario-ami', 'ami',
+                                            path=ami_img_path,
+                                            properties=properties)
+        LOG.debug("image:%s" % self.image)
+
+    def _log_console_output(self, servers=None):
+        if not servers:
+            _, servers = self.servers_client.list_servers()
+            servers = servers['servers']
+        for server in servers:
+            LOG.debug('Console output for %s', server['id'])
+            LOG.debug(self.servers_client.get_console_output(server['id'],
+                                                             length=None))
+
+    def create_server_snapshot(self, server, name=None):
+        # Glance client
+        _image_client = self.image_client
+        # Compute client
+        _images_client = self.images_client
+        if name is None:
+            name = data_utils.rand_name('scenario-snapshot-')
+        LOG.debug("Creating a snapshot image for server: %s", server['name'])
+        resp, image = _images_client.create_image(server['id'], name)
+        image_id = resp['location'].split('images/')[1]
+        _image_client.wait_for_image_status(image_id, 'active')
+        self.addCleanup_with_wait(
+            waiter_callable=_image_client.wait_for_resource_deletion,
+            thing_id=image_id, thing_id_param='id',
+            cleanup_callable=self.delete_wrapper,
+            cleanup_args=[_image_client.delete_image, image_id])
+        _, snapshot_image = _image_client.get_image_meta(image_id)
+        image_name = snapshot_image['name']
+        self.assertEqual(name, image_name)
+        LOG.debug("Created snapshot image %s for server %s",
+                  image_name, server['name'])
+        return snapshot_image
+
 
 class OfficialClientTest(tempest.test.BaseTestCase):
     """
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 29fdc74..7316674 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -13,6 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.common import custom_matchers
 from tempest.common import debug
 from tempest import config
 from tempest.openstack.common import log as logging
@@ -24,7 +25,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestMinimumBasicScenario(manager.OfficialClientTest):
+class TestMinimumBasicScenario(manager.ScenarioTest):
 
     """
     This is a basic minimum scenario test.
@@ -38,61 +39,69 @@
     """
 
     def _wait_for_server_status(self, status):
-        server_id = self.server.id
-        self.status_timeout(
-            self.compute_client.servers, server_id, status)
+        server_id = self.server['id']
+        # Raise on error defaults to True, which is consistent with the
+        # original function from scenario tests here
+        self.servers_client.wait_for_server_status(server_id, status)
 
     def nova_keypair_add(self):
         self.keypair = self.create_keypair()
 
     def nova_boot(self):
-        create_kwargs = {'key_name': self.keypair.name}
+        create_kwargs = {'key_name': self.keypair['name']}
         self.server = self.create_server(image=self.image,
                                          create_kwargs=create_kwargs)
 
     def nova_list(self):
-        servers = self.compute_client.servers.list()
-        LOG.debug("server_list:%s" % servers)
-        self.assertIn(self.server, servers)
+        _, servers = self.servers_client.list_servers()
+        # The list servers in the compute client is inconsistent...
+        servers = servers['servers']
+        self.assertIn(self.server['id'], [x['id'] for x in servers])
 
     def nova_show(self):
-        got_server = self.compute_client.servers.get(self.server)
-        LOG.debug("got server:%s" % got_server)
-        self.assertEqual(self.server, got_server)
+        _, got_server = self.servers_client.get_server(self.server['id'])
+        self.assertThat(
+            self.server, custom_matchers.MatchesDictExceptForKeys(
+                got_server, excluded_keys=['OS-EXT-AZ:availability_zone']))
 
     def cinder_create(self):
         self.volume = self.create_volume()
 
     def cinder_list(self):
-        volumes = self.volume_client.volumes.list()
-        self.assertIn(self.volume, volumes)
+        _, volumes = self.volumes_client.list_volumes()
+        self.assertIn(self.volume['id'], [x['id'] for x in volumes])
 
     def cinder_show(self):
-        volume = self.volume_client.volumes.get(self.volume.id)
+        _, volume = self.volumes_client.get_volume(self.volume['id'])
         self.assertEqual(self.volume, volume)
 
     def nova_volume_attach(self):
-        attach_volume_client = self.compute_client.volumes.create_server_volume
-        volume = attach_volume_client(self.server.id,
-                                      self.volume.id,
-                                      '/dev/vdb')
-        self.assertEqual(self.volume.id, volume.id)
-        self.wait_for_volume_status('in-use')
+        # TODO(andreaf) Device should be here CONF.compute.volume_device_name
+        _, volume_attachment = self.servers_client.attach_volume(
+            self.server['id'], self.volume['id'], '/dev/vdb')
+        volume = volume_attachment['volumeAttachment']
+        self.assertEqual(self.volume['id'], volume['id'])
+        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+        # Refresh the volume after the attachment
+        _, self.volume = self.volumes_client.get_volume(volume['id'])
 
     def nova_reboot(self):
-        self.server.reboot()
+        self.servers_client.reboot(self.server['id'], 'SOFT')
         self._wait_for_server_status('ACTIVE')
 
     def nova_floating_ip_create(self):
-        self.floating_ip = self.compute_client.floating_ips.create()
-        self.addCleanup(self.delete_wrapper, self.floating_ip)
+        _, self.floating_ip = self.floating_ips_client.create_floating_ip()
+        self.addCleanup(self.delete_wrapper,
+                        self.floating_ips_client.delete_floating_ip,
+                        self.floating_ip['id'])
 
     def nova_floating_ip_add(self):
-        self.server.add_floating_ip(self.floating_ip)
+        self.floating_ips_client.associate_floating_ip_to_server(
+            self.floating_ip['ip'], self.server['id'])
 
     def ssh_to_server(self):
         try:
-            self.linux_client = self.get_remote_client(self.floating_ip.ip)
+            self.linux_client = self.get_remote_client(self.floating_ip['ip'])
         except Exception as e:
             LOG.exception('ssh to server failed')
             self._log_console_output()
@@ -102,21 +111,24 @@
             raise
 
     def check_partitions(self):
+        # NOTE(andreaf) The device name may be different on different guest OS
         partitions = self.linux_client.get_partitions()
         self.assertEqual(1, partitions.count('vdb'))
 
     def nova_volume_detach(self):
-        detach_volume_client = self.compute_client.volumes.delete_server_volume
-        detach_volume_client(self.server.id, self.volume.id)
-        self.wait_for_volume_status('available')
+        self.servers_client.detach_volume(self.server['id'], self.volume['id'])
+        self.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                   'available')
 
-        volume = self.volume_client.volumes.get(self.volume.id)
-        self.assertEqual('available', volume.status)
+        _, volume = self.volumes_client.get_volume(self.volume['id'])
+        self.assertEqual('available', volume['status'])
 
     def create_and_add_security_group(self):
         secgroup = self._create_security_group_nova()
-        self.server.add_security_group(secgroup.name)
-        self.addCleanup(self.server.remove_security_group, secgroup.name)
+        self.servers_client.add_security_group(self.server['id'],
+                                               secgroup['name'])
+        self.addCleanup(self.servers_client.remove_security_group,
+                        self.server['id'], secgroup['name'])
 
     @test.services('compute', 'volume', 'image', 'network')
     def test_minimum_basic_scenario(self):
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index ffdd006..d500065 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -25,7 +25,7 @@
 LOG = log.getLogger(__name__)
 
 
-class TestSnapshotPattern(manager.OfficialClientTest):
+class TestSnapshotPattern(manager.ScenarioTest):
     """
     This test is for snapshotting an instance and booting with it.
     The following is the scenario outline:
@@ -37,9 +37,9 @@
     """
 
     def _boot_image(self, image_id):
-        security_groups = [self.security_group.name]
+        security_groups = [self.security_group]
         create_kwargs = {
-            'key_name': self.keypair.name,
+            'key_name': self.keypair['name'],
             'security_groups': security_groups
         }
         return self.create_server(image=image_id, create_kwargs=create_kwargs)
@@ -66,12 +66,15 @@
         self.assertEqual(self.timestamp, got_timestamp)
 
     def _create_floating_ip(self):
-        floating_ip = self.compute_client.floating_ips.create()
-        self.addCleanup(self.delete_wrapper, floating_ip)
+        _, floating_ip = self.floating_ips_client.create_floating_ip()
+        self.addCleanup(self.delete_wrapper,
+                        self.floating_ips_client.delete_floating_ip,
+                        floating_ip['id'])
         return floating_ip
 
     def _set_floating_ip_to_server(self, server, floating_ip):
-        server.add_floating_ip(floating_ip)
+        self.floating_ips_client.associate_floating_ip_to_server(
+            floating_ip['ip'], server['id'])
 
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
@@ -86,7 +89,7 @@
         if CONF.compute.use_floatingip_for_ssh:
             fip_for_server = self._create_floating_ip()
             self._set_floating_ip_to_server(server, fip_for_server)
-            self._write_timestamp(fip_for_server.ip)
+            self._write_timestamp(fip_for_server['ip'])
         else:
             self._write_timestamp(server)
 
@@ -94,13 +97,13 @@
         snapshot_image = self.create_server_snapshot(server=server)
 
         # boot a second instance from the snapshot
-        server_from_snapshot = self._boot_image(snapshot_image.id)
+        server_from_snapshot = self._boot_image(snapshot_image['id'])
 
         # check the existence of the timestamp file in the second instance
         if CONF.compute.use_floatingip_for_ssh:
             fip_for_snapshot = self._create_floating_ip()
             self._set_floating_ip_to_server(server_from_snapshot,
                                             fip_for_snapshot)
-            self._check_timestamp(fip_for_snapshot.ip)
+            self._check_timestamp(fip_for_snapshot['ip'])
         else:
             self._check_timestamp(server_from_snapshot)
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index 86e0867..b5f3a07 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -25,7 +25,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestSwiftBasicOps(manager.OfficialClientTest):
+class TestSwiftBasicOps(manager.ScenarioTest):
     """
     Test swift with the follow operations:
      * get swift stat.
@@ -46,34 +46,37 @@
             skip_msg = ("%s skipped as swift is not available" %
                         cls.__name__)
             raise cls.skipException(skip_msg)
+        # Clients for Swift
+        cls.account_client = cls.manager.account_client
+        cls.container_client = cls.manager.container_client
+        cls.object_client = cls.manager.object_client
 
     def _get_swift_stat(self):
         """get swift status for our user account."""
-        self.object_storage_client.get_account()
+        self.account_client.list_account_containers()
         LOG.debug('Swift status information obtained successfully')
 
     def _create_container(self, container_name=None):
         name = container_name or data_utils.rand_name(
             'swift-scenario-container')
-        self.object_storage_client.put_container(name)
+        self.container_client.create_container(name)
         # look for the container to assure it is created
         self._list_and_check_container_objects(name)
         LOG.debug('Container %s created' % (name))
         return name
 
     def _delete_container(self, container_name):
-        self.object_storage_client.delete_container(container_name)
+        self.container_client.delete_container(container_name)
         LOG.debug('Container %s deleted' % (container_name))
 
     def _upload_object_to_container(self, container_name, obj_name=None):
         obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
-        self.object_storage_client.put_object(container_name, obj_name,
-                                              data_utils.rand_name('obj_data'),
-                                              content_type='text/plain')
+        self.object_client.create_object(container_name, obj_name,
+                                         data_utils.arbitrary_string())
         return obj_name
 
     def _delete_object(self, container_name, filename):
-        self.object_storage_client.delete_object(container_name, filename)
+        self.object_client.delete_object(container_name, filename)
         self._list_and_check_container_objects(container_name,
                                                not_present_obj=[filename])
 
@@ -83,10 +86,8 @@
         List objects for a given container and assert which are present and
         which are not.
         """
-        meta, response = self.object_storage_client.get_container(
+        _, object_list = self.container_client.list_container_contents(
             container_name)
-        # create a list with file name only
-        object_list = [obj['name'] for obj in response]
         if present_obj:
             for obj in present_obj:
                 self.assertIn(obj, object_list)
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index f98ecff..0b97f74 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -119,6 +119,7 @@
             uri += "?%s" % urllib.urlencode(kwargs)
 
         resp, body = self.get(uri)
+        self.expected_success(200, resp['status'])
 
         return resp, self.deserialize(body)
 
@@ -135,6 +136,7 @@
         else:
             uri = self._get_uri(resource, uuid=uuid, permanent=permanent)
         resp, body = self.get(uri)
+        self.expected_success(200, resp['status'])
 
         return resp, self.deserialize(body)
 
@@ -153,6 +155,7 @@
         uri = self._get_uri(resource)
 
         resp, body = self.post(uri, body=body)
+        self.expected_success(201, resp['status'])
 
         return resp, self.deserialize(body)
 
@@ -168,6 +171,7 @@
         uri = self._get_uri(resource, uuid)
 
         resp, body = self.delete(uri)
+        self.expected_success(204, resp['status'])
         return resp, body
 
     def _patch_request(self, resource, uuid, patch_object):
@@ -184,6 +188,7 @@
         patch_body = json.dumps(patch_object)
 
         resp, body = self.patch(uri, body=patch_body)
+        self.expected_success(200, resp['status'])
         return resp, self.deserialize(body)
 
     @handle_errors
@@ -212,4 +217,5 @@
         put_body = json.dumps(put_object)
 
         resp, body = self.put(uri, body=put_body)
+        self.expected_success(202, resp['status'])
         return resp, body
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index d57b931..0522f37 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -525,7 +525,7 @@
     def __init__(self):
         super(V3TokenClientJSON, self).__init__(None)
         auth_url = CONF.identity.uri_v3
-        if not auth_url and CONF.identity_feature_enabled.api_v3:
+        if not auth_url:
             raise exceptions.InvalidConfiguration('you must specify a v3 uri '
                                                   'if using the v3 identity '
                                                   'api')
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index c2bd77e..5b761b3 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -520,7 +520,7 @@
     def __init__(self):
         super(V3TokenClientXML, self).__init__(None)
         auth_url = CONF.identity.uri_v3
-        if not auth_url and CONF.identity_feature_enabled.api_v3:
+        if not auth_url:
             raise exceptions.InvalidConfiguration('you must specify a v3 uri '
                                                   'if using the v3 identity '
                                                   'api')
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 4ee8302..e29f1b5 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -174,10 +174,8 @@
         raise AttributeError(name)
 
     # Common methods that are hard to automate
-    def create_bulk_network(self, count, names):
-        network_list = list()
-        for i in range(count):
-            network_list.append({'name': names[i]})
+    def create_bulk_network(self, names):
+        network_list = [{'name': name} for name in names]
         post_data = {'networks': network_list}
         body = self.serialize_list(post_data, "networks", "network")
         uri = self.get_uri("networks")
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index f6770ab..286e022 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -12,12 +12,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import abc
 import signal
 import sys
 
+import six
+
 from tempest.openstack.common import log as logging
 
 
+@six.add_metaclass(abc.ABCMeta)
 class StressAction(object):
 
     def __init__(self, manager, max_runs=None, stop_on_error=False):
@@ -83,6 +87,7 @@
                     self.tearDown()
                     sys.exit(1)
 
+    @abc.abstractmethod
     def run(self):
         """This method is where the stress test code runs."""
-        raise NotImplemented()
+        return
diff --git a/tox.ini b/tox.ini
index edcb901..a071d4b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -32,7 +32,6 @@
 
 [testenv:all]
 sitepackages = True
-setenv = VIRTUAL_ENV={envdir}
 commands =
   bash tools/pretty_tox.sh '{posargs}'