Merge "Don't output auth tokens with trace output"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 2fdbb7e..4a567e7 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -101,14 +101,32 @@
 # Options defined in tempest.config
 #
 
-# Catalog type of the baremetal provisioning service. (string
+# Catalog type of the baremetal provisioning service (string
 # value)
 #catalog_type=baremetal
 
+# Whether the Ironic nova-compute driver is enabled (boolean
+# value)
+#driver_enabled=false
+
 # The endpoint type to use for the baremetal provisioning
-# service. (string value)
+# service (string value)
 #endpoint_type=publicURL
 
+# Timeout for Ironic node to completely provision (integer
+# value)
+#active_timeout=300
+
+# Timeout for association of Nova instance and Ironic node
+# (integer value)
+#association_timeout=10
+
+# Timeout for Ironic power transitions. (integer value)
+#power_timeout=20
+
+# Timeout for unprovisioning an Ironic node. (integer value)
+#unprovision_timeout=20
+
 
 [boto]
 
@@ -193,7 +211,7 @@
 # admin credentials are known. (boolean value)
 #allow_tenant_isolation=false
 
-# Valid secondary image reference to be used in tests. (string
+# Valid primary image reference to be used in tests. (string
 # value)
 #image_ref={$IMAGE_ID}
 
@@ -232,6 +250,19 @@
 # Should the tests ssh to instances? (boolean value)
 #run_ssh=false
 
+# Auth method used for authenticate to the instance. Valid
+# choices are: keypair, configured, adminpass. keypair: start
+# the servers with an ssh keypair. configured: use the
+# configured user and password. adminpass: use the injected
+# adminPass. disabled: avoid using ssh when it is an option.
+# (string value)
+#ssh_auth_method=keypair
+
+# How to connect to the instance? fixed: using the first ip
+# belongs the fixed network floating: creating and using a
+# floating ip (string value)
+#ssh_connect_method=fixed
+
 # User name used to authenticate to an instance. (string
 # value)
 #ssh_user=root
@@ -734,7 +765,7 @@
 
 # Timeout in seconds to wait for a stack to build. (integer
 # value)
-#build_timeout=600
+#build_timeout=1200
 
 # Instance type for tests. Needs to be big enough for a full
 # OS plus the test workload (string value)
@@ -960,6 +991,10 @@
 # value)
 #disk_format=raw
 
+# Default size in GB for volumes created by volumes tests
+# (integer value)
+#volume_size=1
+
 
 [volume-feature-enabled]
 
diff --git a/requirements.txt b/requirements.txt
index 3521df0..fe3e5e5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-pbr>=0.6,<1.0
+pbr>=0.6,!=0.7,<1.0
 anyjson>=0.3.3
 httplib2>=0.7.5
 jsonschema>=2.0.0,<3.0.0
@@ -13,13 +13,14 @@
 python-neutronclient>=2.3.4,<3
 python-cinderclient>=1.0.6
 python-heatclient>=0.2.3
+python-ironicclient
 python-saharaclient>=0.6.0
 python-swiftclient>=1.6
 testresources>=0.2.4
-keyring>=1.6.1,<2.0,>=2.1
+keyring>=2.1
 testrepository>=0.0.18
 oslo.config>=1.2.0
-six>=1.5.2
+six>=1.6.0
 iso8601>=0.1.9
 fixtures>=0.3.14
 testscenarios>=0.4
diff --git a/setup.cfg b/setup.cfg
index a701572..f4aa3e1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -17,6 +17,10 @@
     Programming Language :: Python :: 2
     Programming Language :: Python :: 2.7
 
+[entry_points]
+console_scripts =
+    verify-tempest-config = tempest.cmd.verify_tempest_config:main
+
 [build_sphinx]
 all_files = 1
 build-dir = doc/build
diff --git a/tempest/api/baremetal/base.py b/tempest/api/baremetal/base.py
index 2e745f8..021adaf 100644
--- a/tempest/api/baremetal/base.py
+++ b/tempest/api/baremetal/base.py
@@ -106,17 +106,20 @@
 
     @classmethod
     @creates('port')
-    def create_port(cls, node_id, address=None):
+    def create_port(cls, node_id, address, extra=None, uuid=None):
         """
         Wrapper utility for creating test ports.
 
-        :param address: MAC address of the port. If not supplied, a random
-            value will be generated.
+        :param address: MAC address of the port.
+        :param extra: Meta data of the port. If not supplied, an empty
+            dictionary will be created.
+        :param uuid: UUID of the port.
         :return: Created port.
 
         """
-        address = address or data_utils.rand_mac_address()
-        resp, body = cls.client.create_port(address=address, node_id=node_id)
+        extra = extra or {}
+        resp, body = cls.client.create_port(address=address, node_id=node_id,
+                                            extra=extra, uuid=uuid)
 
         return {'port': body, 'response': resp}
 
@@ -170,3 +173,12 @@
             cls.created_objects['port'].remove(port_id)
 
         return resp
+
+    def validate_self_link(self, resource, uuid, link):
+        """Check whether the given self link formatted correctly."""
+        expected_link = "{base}/{pref}/{res}/{uuid}".format(
+                        base=self.client.base_url,
+                        pref=self.client.uri_prefix,
+                        res=resource,
+                        uuid=uuid)
+        self.assertEqual(expected_link, link)
diff --git a/tempest/api/baremetal/test_drivers.py b/tempest/api/baremetal/test_drivers.py
new file mode 100644
index 0000000..445ca60
--- /dev/null
+++ b/tempest/api/baremetal/test_drivers.py
@@ -0,0 +1,26 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.baremetal import base
+from tempest import test
+
+
+class TestDrivers(base.BaseBaremetalTest):
+    """Tests for drivers."""
+
+    @test.attr(type="smoke")
+    def test_list_drivers(self):
+        resp, drivers = self.client.list_drivers()
+        self.assertEqual('200', resp['status'])
+        self.assertIn('fake', [d['name'] for d in drivers['drivers']])
diff --git a/tempest/api/baremetal/test_nodestates.py b/tempest/api/baremetal/test_nodestates.py
new file mode 100644
index 0000000..c658d7f
--- /dev/null
+++ b/tempest/api/baremetal/test_nodestates.py
@@ -0,0 +1,33 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.baremetal import base
+from tempest import test
+
+
+class TestNodeStates(base.BaseBaremetalTest):
+    """Tests for baremetal NodeStates."""
+
+    @classmethod
+    def setUpClass(self):
+        super(TestNodeStates, self).setUpClass()
+        chassis = self.create_chassis()['chassis']
+        self.node = self.create_node(chassis['uuid'])['node']
+
+    @test.attr(type='smoke')
+    def test_list_nodestates(self):
+        resp, nodestates = self.client.list_nodestates(self.node['uuid'])
+        self.assertEqual('200', resp['status'])
+        for key in nodestates:
+            self.assertEqual(nodestates[key], self.node[key])
diff --git a/tempest/api/baremetal/test_ports.py b/tempest/api/baremetal/test_ports.py
index fb2acc7..8b76811 100644
--- a/tempest/api/baremetal/test_ports.py
+++ b/tempest/api/baremetal/test_ports.py
@@ -30,54 +30,268 @@
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        port = self.create_port(node_id=node_id, address=address)['port']
+        result = self.create_port(node_id=node_id, address=address)
 
-        self.assertEqual(port['address'], address)
-        self.assertEqual(port['node_uuid'], node_id)
+        port = result['port']
+
+        resp, body = self.client.show_port(port['uuid'])
+
+        self.assertEqual(200, resp.status)
+        self.assertEqual(port['uuid'], body['uuid'])
+        self.assertEqual(address, body['address'])
+        self.assertEqual({}, body['extra'])
+        self.assertEqual(node_id, body['node_uuid'])
+
+    @test.attr(type='smoke')
+    def test_create_port_specifying_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        uuid = data_utils.rand_uuid()
+
+        self.create_port(node_id=node_id, address=address, uuid=uuid)
+
+        resp, body = self.client.show_port(uuid)
+
+        self.assertEqual(200, resp.status)
+        self.assertEqual(uuid, body['uuid'])
+        self.assertEqual(address, body['address'])
+        self.assertEqual({}, body['extra'])
+        self.assertEqual(node_id, body['node_uuid'])
+
+    @test.attr(type='smoke')
+    def test_create_port_with_extra(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        result = self.create_port(node_id=node_id, address=address,
+                                  extra=extra)
+        port = result['port']
+
+        resp, body = self.client.show_port(port['uuid'])
+
+        self.assertEqual(200, resp.status)
+        self.assertEqual(port['uuid'], body['uuid'])
+        self.assertEqual(address, body['address'])
+        self.assertEqual(extra, body['extra'])
+        self.assertEqual(node_id, body['node_uuid'])
 
     @test.attr(type='smoke')
     def test_delete_port(self):
         node_id = self.node['uuid']
-        port_id = self.create_port(node_id=node_id)['port']['uuid']
+        address = data_utils.rand_mac_address()
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
 
         resp = self.delete_port(port_id)
 
-        self.assertEqual(resp['status'], '204')
+        self.assertEqual(204, resp.status)
         self.assertRaises(exc.NotFound, self.client.show_port, port_id)
 
     @test.attr(type='smoke')
     def test_show_port(self):
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
 
-        port_id = self.create_port(node_id=node_id,
-                                   address=address)['port']['uuid']
+        port_id = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)['port']['uuid']
 
         resp, port = self.client.show_port(port_id)
 
-        self.assertEqual(port['uuid'], port_id)
-        self.assertEqual(port['address'], address)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(port_id, port['uuid'])
+        self.assertEqual(address, port['address'])
+        self.assertEqual(extra, port['extra'])
+
+    @test.attr(type='smoke')
+    def test_show_port_with_links(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+
+        resp, body = self.client.show_port(port_id)
+
+        self.assertEqual(200, resp.status)
+        self.assertIn('links', body.keys())
+        self.assertEqual(2, len(body['links']))
+        self.assertIn(port_id, body['links'][0]['href'])
 
     @test.attr(type='smoke')
     def test_list_ports(self):
         node_id = self.node['uuid']
 
-        uuids = [self.create_port(node_id=node_id)['port']['uuid']
-                 for i in range(0, 5)]
+        uuids = [self.create_port(node_id=node_id,
+                                  address=data_utils.rand_mac_address())
+                 ['port']['uuid'] for i in xrange(5)]
 
         resp, body = self.client.list_ports()
+        self.assertEqual(200, resp.status)
         loaded_uuids = [p['uuid'] for p in body['ports']]
 
-        for u in uuids:
-            self.assertIn(u, loaded_uuids)
+        for uuid in uuids:
+            self.assertIn(uuid, loaded_uuids)
+
+        # Verify self links.
+        for port in body['ports']:
+            self.validate_self_link('ports', port['uuid'],
+                                    port['links'][0]['href'])
 
     @test.attr(type='smoke')
-    def test_update_port(self):
+    def test_list_with_limit(self):
         node_id = self.node['uuid']
-        port_id = self.create_port(node_id=node_id)['port']['uuid']
+
+        for i in xrange(5):
+            self.create_port(node_id=node_id,
+                             address=data_utils.rand_mac_address())
+
+        resp, body = self.client.list_ports(limit=3)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(3, len(body['ports']))
+
+        next_marker = body['ports'][-1]['uuid']
+        self.assertIn(next_marker, body['next'])
+
+    def test_list_ports_details(self):
+        node_id = self.node['uuid']
+
+        uuids = [
+            self.create_port(node_id=node_id,
+                             address=data_utils.rand_mac_address())
+            ['port']['uuid'] for i in range(0, 5)]
+
+        resp, body = self.client.list_ports_detail()
+        self.assertEqual(200, resp.status)
+
+        ports_dict = {port['uuid']: port for port in body['ports']
+                      if port['uuid'] in uuids}
+
+        for uuid in uuids:
+            self.assertIn(uuid, ports_dict)
+            port = ports_dict[uuid]
+            self.assertIn('extra', port)
+            self.assertIn('node_uuid', port)
+            # never expose the node_id
+            self.assertNotIn('node_id', port)
+            # Verify self link.
+            self.validate_self_link('ports', port['uuid'],
+                                    port['links'][0]['href'])
+
+    @test.attr(type='smoke')
+    def test_update_port_replace(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
+
+        port_id = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)['port']['uuid']
 
         new_address = data_utils.rand_mac_address()
-        self.client.update_port(port_id, address=new_address)
+        new_extra = {'key1': 'new-value1', 'key2': 'new-value2',
+                     'key3': 'new-value3'}
+
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': new_address},
+                 {'path': '/extra/key1',
+                  'op': 'replace',
+                  'value': new_extra['key1']},
+                 {'path': '/extra/key2',
+                  'op': 'replace',
+                  'value': new_extra['key2']},
+                 {'path': '/extra/key3',
+                  'op': 'replace',
+                  'value': new_extra['key3']}]
+
+        self.client.update_port(port_id, patch)
 
         resp, body = self.client.show_port(port_id)
-        self.assertEqual(body['address'], new_address)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(new_address, body['address'])
+        self.assertEqual(new_extra, body['extra'])
+
+    @test.attr(type='smoke')
+    def test_update_port_remove(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
+
+        port_id = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)['port']['uuid']
+
+        # Removing one item from the collection
+        resp, _ = self.client.update_port(port_id, [{'path': '/extra/key2',
+                                                     'op': 'remove'}])
+        self.assertEqual(200, resp.status)
+        extra.pop('key2')
+        resp, body = self.client.show_port(port_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(extra, body['extra'])
+
+        # Removing the collection
+        resp, _ = self.client.update_port(port_id, [{'path': '/extra',
+                                                     'op': 'remove'}])
+        self.assertEqual(200, resp.status)
+        resp, body = self.client.show_port(port_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual({}, body['extra'])
+
+        # Assert nothing else was changed
+        self.assertEqual(node_id, body['node_uuid'])
+        self.assertEqual(address, body['address'])
+
+    @test.attr(type='smoke')
+    def test_update_port_add(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+
+        extra = {'key1': 'value1', 'key2': 'value2'}
+
+        patch = [{'path': '/extra/key1',
+                  'op': 'add',
+                  'value': extra['key1']},
+                 {'path': '/extra/key2',
+                  'op': 'add',
+                  'value': extra['key2']}]
+
+        self.client.update_port(port_id, patch)
+
+        resp, body = self.client.show_port(port_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(extra, body['extra'])
+
+    @test.attr(type='smoke')
+    def test_update_port_mixed_ops(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key1': 'value1', 'key2': 'value2'}
+
+        port_id = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)['port']['uuid']
+
+        new_address = data_utils.rand_mac_address()
+        new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': new_address},
+                 {'path': '/extra/key1',
+                  'op': 'replace',
+                  'value': new_extra['key1']},
+                 {'path': '/extra/key2',
+                  'op': 'remove'},
+                 {'path': '/extra/key3',
+                  'op': 'add',
+                  'value': new_extra['key3']}]
+
+        self.client.update_port(port_id, patch)
+
+        resp, body = self.client.show_port(port_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(new_address, body['address'])
+        self.assertEqual(new_extra, body['extra'])
diff --git a/tempest/api/baremetal/test_ports_negative.py b/tempest/api/baremetal/test_ports_negative.py
index 6cb8812..4cbe00e 100644
--- a/tempest/api/baremetal/test_ports_negative.py
+++ b/tempest/api/baremetal/test_ports_negative.py
@@ -25,16 +25,346 @@
         chassis = self.create_chassis()['chassis']
         self.node = self.create_node(chassis['uuid'])['node']
 
-    @test.attr(type='negative')
-    def test_create_port_invalid_mac(self):
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_malformed_mac(self):
         node_id = self.node['uuid']
-        address = 'not an uuid'
+        address = 'malformed:mac'
 
         self.assertRaises(exc.BadRequest,
                           self.create_port, node_id=node_id, address=address)
 
-    @test.attr(type='negative')
-    def test_create_port_wrong_node_id(self):
-        node_id = str(data_utils.rand_uuid())
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_malformed_extra(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 0.123}
+        self.assertRaises(exc.BadRequest,
+                          self.create_port, node_id=node_id,
+                          address=address, extra=extra)
 
-        self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id)
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_nonexsistent_node_id(self):
+        node_id = str(data_utils.rand_uuid())
+        address = data_utils.rand_mac_address()
+        self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+                          address=address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_show_port_malformed_uuid(self):
+        self.assertRaises(exc.BadRequest, self.client.show_port,
+                          'malformed:uuid')
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_show_port_nonexistent_uuid(self):
+        self.assertRaises(exc.NotFound, self.client.show_port,
+                          data_utils.rand_uuid())
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_show_port_by_mac_not_allowed(self):
+        self.assertRaises(exc.BadRequest, self.client.show_port,
+                          data_utils.rand_mac_address())
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_duplicated_port_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        uuid = data_utils.rand_uuid()
+
+        self.create_port(node_id=node_id, address=address, uuid=uuid)
+        self.assertRaises(exc.Conflict, self.create_port, node_id=node_id,
+                          address=address, uuid=uuid)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_no_mandatory_field_node_id(self):
+        address = data_utils.rand_mac_address()
+
+        self.assertRaises(exc.BadRequest, self.create_port, node_id=None,
+                          address=address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_no_mandatory_field_mac(self):
+        node_id = self.node['uuid']
+
+        self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+                          address=None)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_malformed_port_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        uuid = 'malformed:uuid'
+
+        self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+                          address=address, uuid=uuid)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_malformed_node_id(self):
+        address = data_utils.rand_mac_address()
+        self.assertRaises(exc.BadRequest, self.create_port,
+                          node_id='malformed:nodeid', address=address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_duplicated_mac(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        self.create_port(node_id=node_id, address=address)
+        self.assertRaises(exc.Conflict,
+                          self.create_port, node_id=node_id,
+                          address=address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_by_mac_not_allowed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        self.create_port(node_id=node_id, address=address, extra=extra)
+
+        patch = [{'path': '/extra/key',
+                  'op': 'replace',
+                  'value': 'new-value'}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, address,
+                          patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_nonexistent(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        port_id = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)['port']['uuid']
+        self.client.delete_port(port_id)
+
+        patch = [{'path': '/extra/key',
+                  'op': 'replace',
+                  'value': 'new-value'}]
+        self.assertRaises(exc.NotFound,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_malformed_port_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        self.create_port(node_id=node_id, address=address)
+
+        new_address = data_utils.rand_mac_address()
+        self.assertRaises(exc.BadRequest, self.client.update_port,
+                          uuid='malformed:uuid',
+                          patch=[{'path': '/address', 'op': 'replace',
+                                  'value': new_address}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_add_malformed_extra(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/extra/key', ' op': 'add',
+                            'value': 0.123}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_add_whole_malformed_extra(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/extra',
+                            'op': 'add',
+                            'value': [1, 2, 3, 4, 'a']}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_add_nonexistent_property(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/nonexistent', ' op': 'add',
+                            'value': 'value'}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_node_id_with_malformed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address)['port']['uuid']
+
+        patch = [{'path': '/node_uuid',
+                  'op': 'replace',
+                  'value': 'malformed:node_uuid'}]
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_mac_with_duplicated(self):
+        node_id = self.node['uuid']
+        address1 = data_utils.rand_mac_address()
+        address2 = data_utils.rand_mac_address()
+
+        self.create_port(node_id=node_id, address=address1)
+        port_id = self.create_port(node_id=node_id,
+                                   address=address2)['port']['uuid']
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': address1}]
+        self.assertRaises(exc.Conflict,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_node_id_with_nonexistent(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address)['port']['uuid']
+
+        patch = [{'path': '/node_uuid',
+                  'op': 'replace',
+                  'value': data_utils.rand_uuid()}]
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_mac_with_malformed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address)['port']['uuid']
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': 'malformed:mac'}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_extra_item_with_malformed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address,
+                                   extra=extra)['port']['uuid']
+        patch = [{'path': '/extra/key',
+                  'op': 'replace',
+                  'value': 0.123}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_whole_extra_with_malformed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address,
+                                   extra=extra)['port']['uuid']
+        patch = [{'path': '/extra',
+                  'op': 'replace',
+                  'value': [1, 2, 3, 4, 'a']}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_nonexistent_property(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address)['port']['uuid']
+
+        patch = [{'path': '/nonexistent', ' op': 'replace', 'value': 'value'}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_remove_mandatory_field_mac(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/address', 'op': 'remove'}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_remove_mandatory_field_port_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/uuid', 'op': 'remove'}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_remove_nonexistent_property(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/nonexistent', 'op': 'remove'}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_delete_port_by_mac_not_allowed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        self.create_port(node_id=node_id, address=address)
+        self.assertRaises(exc.BadRequest, self.client.delete_port, address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_mixed_ops_integrity(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key1': 'value1', 'key2': 'value2'}
+
+        port_id = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)['port']['uuid']
+
+        new_address = data_utils.rand_mac_address()
+        new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': new_address},
+                 {'path': '/extra/key1',
+                  'op': 'replace',
+                  'value': new_extra['key1']},
+                 {'path': '/extra/key2',
+                  'op': 'remove'},
+                 {'path': '/extra/key3',
+                  'op': 'add',
+                  'value': new_extra['key3']},
+                 {'path': '/nonexistent',
+                  'op': 'replace',
+                  'value': 'value'}]
+
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          patch)
+
+        # patch should not be applied
+        resp, body = self.client.show_port(port_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(address, body['address'])
+        self.assertEqual(extra, body['extra'])
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
new file mode 100644
index 0000000..4808601
--- /dev/null
+++ b/tempest/api/compute/admin/test_agents.py
@@ -0,0 +1,123 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest.common.utils import data_utils
+from tempest import exceptions
+from tempest.openstack.common import log
+from tempest import test
+
+LOG = log.getLogger(__name__)
+
+
+class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
+    """
+    Tests Agents API
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(AgentsAdminTestJSON, cls).setUpClass()
+        cls.client = cls.os_adm.agents_client
+
+    def setUp(self):
+        super(AgentsAdminTestJSON, self).setUp()
+        params = self._param_helper(
+            hypervisor='common', os='linux', architecture='x86_64',
+            version='7.0', url='xxx://xxxx/xxx/xxx',
+            md5hash='add6bb58e139be103324d04d82d8f545')
+        resp, body = self.client.create_agent(**params)
+        self.assertEqual(200, resp.status)
+        self.agent_id = body['agent_id']
+
+    def tearDown(self):
+        try:
+            self.client.delete_agent(self.agent_id)
+        except exceptions.NotFound:
+            pass
+        except Exception:
+            LOG.exception('Exception raised deleting agent %s', self.agent_id)
+        super(AgentsAdminTestJSON, self).tearDown()
+
+    def _param_helper(self, **kwargs):
+        rand_key = 'architecture'
+        if rand_key in kwargs:
+            # NOTE: The rand_name is for avoiding agent conflicts.
+            # If you try to create an agent with the same hypervisor,
+            # os and architecture as an exising agent, Nova will return
+            # an HTTPConflict or HTTPServerError.
+            kwargs[rand_key] = data_utils.rand_name(kwargs[rand_key])
+        return kwargs
+
+    @test.attr(type='gate')
+    def test_create_agent(self):
+        # Create an agent.
+        params = self._param_helper(
+            hypervisor='kvm', os='win', architecture='x86',
+            version='7.0', url='xxx://xxxx/xxx/xxx',
+            md5hash='add6bb58e139be103324d04d82d8f545')
+        resp, body = self.client.create_agent(**params)
+        self.assertEqual(200, resp.status)
+        self.addCleanup(self.client.delete_agent, body['agent_id'])
+        for expected_item, value in params.items():
+            self.assertEqual(value, body[expected_item])
+
+    @test.attr(type='gate')
+    def test_update_agent(self):
+        # Update an agent.
+        params = self._param_helper(
+            version='8.0', url='xxx://xxxx/xxx/xxx2',
+            md5hash='add6bb58e139be103324d04d82d8f547')
+        resp, body = self.client.update_agent(self.agent_id, **params)
+        self.assertEqual(200, resp.status)
+        for expected_item, value in params.items():
+            self.assertEqual(value, body[expected_item])
+
+    @test.attr(type='gate')
+    def test_delete_agent(self):
+        # Delete an agent.
+        resp, _ = self.client.delete_agent(self.agent_id)
+        self.assertEqual(200, resp.status)
+
+        # Verify the list doesn't contain the deleted agent.
+        resp, agents = self.client.list_agents()
+        self.assertEqual(200, resp.status)
+        self.assertNotIn(self.agent_id, map(lambda x: x['agent_id'], agents))
+
+    @test.attr(type='gate')
+    def test_list_agents(self):
+        # List all agents.
+        resp, agents = self.client.list_agents()
+        self.assertEqual(200, resp.status)
+        self.assertTrue(len(agents) > 0, 'Cannot get any agents.(%s)' % agents)
+        self.assertIn(self.agent_id, map(lambda x: x['agent_id'], agents))
+
+    @test.attr(type='gate')
+    def test_list_agents_with_filter(self):
+        # List the agent builds by the filter.
+        params = self._param_helper(
+            hypervisor='xen', os='linux', architecture='x86',
+            version='7.0', url='xxx://xxxx/xxx/xxx1',
+            md5hash='add6bb58e139be103324d04d82d8f546')
+        resp, agent_xen = self.client.create_agent(**params)
+        self.assertEqual(200, resp.status)
+        self.addCleanup(self.client.delete_agent, agent_xen['agent_id'])
+
+        agent_id_xen = agent_xen['agent_id']
+        params_filter = {'hypervisor': agent_xen['hypervisor']}
+        resp, agents = self.client.list_agents(params_filter)
+        self.assertEqual(200, resp.status)
+        self.assertTrue(len(agents) > 0, 'Cannot get any agents.(%s)' % agents)
+        self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
+        self.assertNotIn(self.agent_id, map(lambda x: x['agent_id'], agents))
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index fb249e5..c2376c9 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -16,6 +16,7 @@
 from tempest.api.compute import base
 from tempest.common import tempest_fixtures as fixtures
 from tempest.common.utils import data_utils
+from tempest import exceptions
 from tempest import test
 
 
@@ -39,11 +40,20 @@
                     filter(lambda y: y['service'] == 'compute', hosts_all))
         cls.host = hosts[0]
 
+    def _try_delete_aggregate(self, aggregate_id):
+        # delete aggregate, if it exists
+        try:
+            self.client.delete_aggregate(aggregate_id)
+        # if aggregate not found, it depict it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
     @test.attr(type='gate')
     def test_aggregate_create_delete(self):
         # Create and delete an aggregate.
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         resp, aggregate = self.client.create_aggregate(name=aggregate_name)
+        self.addCleanup(self._try_delete_aggregate, aggregate['id'])
         self.assertEqual(200, resp.status)
         self.assertEqual(aggregate_name, aggregate['name'])
         self.assertIsNone(aggregate['availability_zone'])
@@ -59,6 +69,7 @@
         az_name = data_utils.rand_name(self.az_name_prefix)
         resp, aggregate = self.client.create_aggregate(
             name=aggregate_name, availability_zone=az_name)
+        self.addCleanup(self._try_delete_aggregate, aggregate['id'])
         self.assertEqual(200, resp.status)
         self.assertEqual(aggregate_name, aggregate['name'])
         self.assertEqual(az_name, aggregate['availability_zone'])
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
new file mode 100644
index 0000000..514f1fa
--- /dev/null
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -0,0 +1,55 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(MigrationsAdminTest, cls).setUpClass()
+        cls.client = cls.os_adm.migrations_client
+
+    @test.attr(type='gate')
+    def test_list_migrations(self):
+        # Admin can get the migrations list
+        resp, _ = self.client.list_migrations()
+        self.assertEqual(200, resp.status)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='gate')
+    def test_list_migrations_in_flavor_resize_situation(self):
+        # Admin can get the migrations list which contains the resized server
+        resp, server = self.create_test_server(wait_until="ACTIVE")
+        server_id = server['id']
+
+        resp, _ = self.servers_client.resize(server_id, self.flavor_ref_alt)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(server_id, 'VERIFY_RESIZE')
+        self.servers_client.confirm_resize(server_id)
+        self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+        resp, body = self.client.list_migrations()
+        self.assertEqual(200, resp.status)
+
+        instance_uuids = [x['instance_uuid'] for x in body]
+        self.assertIn(server_id, instance_uuids)
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 09c7274..32e0478 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -76,21 +76,38 @@
     # TODO(afazekas): merge these test cases
     @test.attr(type='gate')
     def test_get_updated_quotas(self):
-        # Verify that GET shows the updated quota set
+        # Verify that GET shows the updated quota set of tenant
         tenant_name = data_utils.rand_name('cpu_quota_tenant_')
         tenant_desc = tenant_name + '-desc'
         identity_client = self.os_adm.identity_client
         _, tenant = identity_client.create_tenant(name=tenant_name,
                                                   description=tenant_desc)
         tenant_id = tenant['id']
-        self.addCleanup(identity_client.delete_tenant,
-                        tenant_id)
+        self.addCleanup(identity_client.delete_tenant, tenant_id)
 
-        self.adm_client.update_quota_set(tenant_id,
-                                         ram='5120')
+        self.adm_client.update_quota_set(tenant_id, ram='5120')
         resp, quota_set = self.adm_client.get_quota_set(tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(quota_set['ram'], 5120)
+        self.assertEqual(5120, quota_set['ram'])
+
+        # Verify that GET shows the updated quota set of user
+        user_name = data_utils.rand_name('cpu_quota_user_')
+        password = data_utils.rand_name('password-')
+        email = user_name + '@testmail.tm'
+        _, user = identity_client.create_user(name=user_name,
+                                              password=password,
+                                              tenant_id=tenant_id,
+                                              email=email)
+        user_id = user['id']
+        self.addCleanup(identity_client.delete_user, user_id)
+
+        self.adm_client.update_quota_set(tenant_id,
+                                         user_id=user_id,
+                                         ram='2048')
+        resp, quota_set = self.adm_client.get_quota_set(tenant_id,
+                                                        user_id=user_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(2048, quota_set['ram'])
 
     @test.attr(type='gate')
     def test_delete_quota(self):
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 1f2ddf4..49af645 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -14,7 +14,6 @@
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
-from tempest import exceptions
 from tempest import test
 
 
@@ -44,16 +43,6 @@
                                               wait_until='ACTIVE')
         cls.s2_id = server['id']
 
-    def _get_unused_flavor_id(self):
-        flavor_id = data_utils.rand_int_id(start=1000)
-        while True:
-            try:
-                resp, body = self.flavors_client.get_flavor_details(flavor_id)
-            except exceptions.NotFound:
-                break
-            flavor_id = data_utils.rand_int_id(start=1000)
-        return flavor_id
-
     @test.attr(type='gate')
     def test_list_servers_by_admin(self):
         # Listing servers by admin user returns empty list by default
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index abd36a6..a65d7b7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -203,6 +203,13 @@
             LOG.warn("Unable to delete volume '%s' since it was not found. "
                      "Maybe it was already deleted?" % volume_id)
 
+    @classmethod
+    def prepare_instance_network(cls):
+        if (CONF.compute.ssh_auth_method != 'disabled' and
+                CONF.compute.ssh_connect_method == 'floating'):
+            cls.set_network_resources(network=True, subnet=True, router=True,
+                                      dhcp=True)
+
 
 class BaseV2ComputeTest(BaseComputeTest):
 
@@ -227,12 +234,14 @@
         cls.interfaces_client = cls.os.interfaces_client
         cls.fixed_ips_client = cls.os.fixed_ips_client
         cls.availability_zone_client = cls.os.availability_zone_client
+        cls.agents_client = cls.os.agents_client
         cls.aggregates_client = cls.os.aggregates_client
         cls.services_client = cls.os.services_client
         cls.instance_usages_audit_log_client = \
             cls.os.instance_usages_audit_log_client
         cls.hypervisor_client = cls.os.hypervisor_client
         cls.certificates_client = cls.os.certificates_client
+        cls.migrations_client = cls.os.migrations_client
 
     @classmethod
     def create_image_from_server(cls, server_id, **kwargs):
@@ -310,11 +319,6 @@
     @classmethod
     def setUpClass(cls):
         # By default compute tests do not create network resources
-        if cls._interface == "xml":
-            skip_msg = ("XML interface is being removed from Nova v3. "
-                        "%s will be removed shortly" % cls.__name__)
-            raise cls.skipException(skip_msg)
-
         if not CONF.compute_feature_enabled.api_v3:
             skip_msg = ("%s skipped as nova v3 api is not available" %
                         cls.__name__)
@@ -339,6 +343,7 @@
         cls.hosts_client = cls.os.hosts_v3_client
         cls.quotas_client = cls.os.quotas_v3_client
         cls.version_client = cls.os.version_v3_client
+        cls.migrations_client = cls.os.migrations_v3_client
 
     @classmethod
     def create_image_from_server(cls, server_id, **kwargs):
@@ -412,3 +417,4 @@
         cls.hosts_admin_client = cls.os_adm.hosts_v3_client
         cls.quotas_admin_client = cls.os_adm.quotas_v3_client
         cls.agents_admin_client = cls.os_adm.agents_v3_client
+        cls.migrations_admin_client = cls.os_adm.migrations_v3_client
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index abd8a4c..b3789f8 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -45,6 +45,14 @@
             resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
         super(FloatingIPsTestJSON, cls).tearDownClass()
 
+    def _try_delete_floating_ip(self, floating_ip_id):
+        # delete floating ip, if it exists
+        try:
+            self.client.delete_floating_ip(floating_ip_id)
+        # if not found, it depicts it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
     @test.attr(type='gate')
     def test_allocate_floating_ip(self):
         # Positive test:Allocation of a new floating IP to a project
@@ -66,6 +74,7 @@
         # should be successful
         # Creating the floating IP that is to be deleted in this method
         resp, floating_ip_body = self.client.create_floating_ip()
+        self.addCleanup(self._try_delete_floating_ip, floating_ip_body['id'])
         # Storing the details of floating IP before deleting it
         cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
         resp, floating_ip_details = cli_resp
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index f0a8c8d..e135eca 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -32,6 +32,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersTestJSON, cls).setUpClass()
         cls.meta = {'hello': 'world'}
         cls.accessIPv4 = '1.1.1.1'
@@ -53,13 +54,6 @@
         resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
     @test.attr(type='smoke')
-    def test_create_server_response(self):
-        # Check that the required fields are returned with values
-        self.assertEqual(202, self.resp.status)
-        self.assertTrue(self.server_initial['id'] is not None)
-        self.assertTrue(self.server_initial['adminPass'] is not None)
-
-    @test.attr(type='smoke')
     def test_verify_server_details(self):
         # Verify the specified server attributes are set correctly
         self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
@@ -114,26 +108,10 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersWithSpecificFlavorTestJSON, cls).setUpClass()
-        cls.meta = {'hello': 'world'}
-        cls.accessIPv4 = '1.1.1.1'
-        cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
-        cls.name = data_utils.rand_name('server')
-        file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
-                       'contents': base64.b64encode(file_contents)}]
-        cls.client = cls.servers_client
         cls.flavor_client = cls.os_adm.flavors_client
-        cli_resp = cls.create_test_server(name=cls.name,
-                                          meta=cls.meta,
-                                          accessIPv4=cls.accessIPv4,
-                                          accessIPv6=cls.accessIPv6,
-                                          personality=personality,
-                                          disk_config=cls.disk_config)
-        cls.resp, cls.server_initial = cli_resp
-        cls.password = cls.server_initial['adminPass']
-        cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
-        resp, cls.server = cls.client.get_server(cls.server_initial['id'])
+        cls.client = cls.servers_client
 
     @testtools.skipUnless(CONF.compute.run_ssh,
                           'Instance validation tests are disabled.')
@@ -141,7 +119,7 @@
     def test_verify_created_server_ephemeral_disk(self):
         # Verify that the ephemeral disk is created when creating server
 
-        def create_flavor_with_extra_specs(self):
+        def create_flavor_with_extra_specs():
             flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
             flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
             ram = 64
@@ -154,12 +132,12 @@
                                           ram, vcpus, disk,
                                           flavor_with_eph_disk_id,
                                           ephemeral=1))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
+            self.addCleanup(flavor_clean_up, flavor['id'])
             self.assertEqual(200, resp.status)
 
             return flavor['id']
 
-        def create_flavor_without_extra_specs(self):
+        def create_flavor_without_extra_specs():
             flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
             flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
 
@@ -172,18 +150,18 @@
                             create_flavor(flavor_no_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_no_eph_disk_id))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
+            self.addCleanup(flavor_clean_up, flavor['id'])
             self.assertEqual(200, resp.status)
 
             return flavor['id']
 
-        def flavor_clean_up(self, flavor_id):
+        def flavor_clean_up(flavor_id):
             resp, body = self.flavor_client.delete_flavor(flavor_id)
             self.assertEqual(resp.status, 202)
             self.flavor_client.wait_for_resource_deletion(flavor_id)
 
-        flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
-        flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+        flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+        flavor_no_eph_disk_id = create_flavor_without_extra_specs()
 
         admin_pass = self.image_ssh_password
 
@@ -196,13 +174,18 @@
                                       adminPass=admin_pass,
                                       flavor=flavor_with_eph_disk_id))
         # Get partition number of server without extra specs.
+        _, server_no_eph_disk = self.client.get_server(
+            server_no_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_no_eph_disk,
-                                                  self.ssh_user, self.password)
-        partition_num = len(linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num = len(linux_client.get_partitions().split('\n'))
 
+        _, server_with_eph_disk = self.client.get_server(
+            server_with_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_with_eph_disk,
-                                                  self.ssh_user, self.password)
-        self.assertEqual(partition_num + 1, linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num_emph = len(linux_client.get_partitions().split('\n'))
+        self.assertEqual(partition_num + 1, partition_num_emph)
 
 
 class ServersTestManualDisk(ServersTestJSON):
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 451d08f..9e34922 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -102,6 +102,26 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @test.attr(type='gate')
+    def test_delete_server_while_in_attached_volume(self):
+        # Delete a server while a volume is attached to it
+        volumes_client = self.volumes_extensions_client
+        device = '/dev/%s' % CONF.compute.volume_device_name
+        resp, server = self.create_test_server(wait_until='ACTIVE')
+
+        resp, volume = volumes_client.create_volume(1)
+        self.addCleanup(volumes_client.delete_volume, volume['id'])
+        volumes_client.wait_for_volume_status(volume['id'], 'available')
+        resp, body = self.client.attach_volume(server['id'],
+                                               volume['id'],
+                                               device=device)
+        volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+        resp, _ = self.client.delete_server(server['id'])
+        self.assertEqual('204', resp['status'])
+        self.client.wait_for_server_termination(server['id'])
+        volumes_client.wait_for_volume_status(volume['id'], 'available')
+
 
 class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
     # NOTE: Server creations of each test class should be under 10
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index f0913f1..f66020c 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -28,6 +28,7 @@
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
+        cls.set_network_resources(network=True, subnet=True, dhcp=True)
         super(ListServerFiltersTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
 
@@ -69,8 +70,12 @@
         resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
                                               flavor=cls.flavor_ref_alt,
                                               wait_until='ACTIVE')
-
-        cls.fixed_network_name = CONF.compute.fixed_network_name
+        if (CONF.service_available.neutron and
+                CONF.compute.allow_tenant_isolation):
+            network = cls.isolated_creds.get_primary_network()
+            cls.fixed_network_name = network['name']
+        else:
+            cls.fixed_network_name = CONF.compute.fixed_network_name
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @test.attr(type='gate')
@@ -226,7 +231,6 @@
         self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
         self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
 
-    @test.skip_because(bug="1170718")
     @test.attr(type='gate')
     def test_list_servers_filtered_by_ip(self):
         # Filter servers by ip
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index cc2d1ee..1f2bca9 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -44,6 +44,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServerActionsTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
         cls.server_id = cls.rebuild_server(None)
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 65797e9..dae4709 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -12,12 +12,16 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import testtools
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class ServerRescueNegativeTestJSON(base.BaseV2ComputeTest):
 
@@ -67,6 +71,8 @@
         self.assertEqual(202, resp.status)
         self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_rescue_paused_instance(self):
         # Rescue a paused server
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 7f909d7..c87f24e 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -60,7 +60,7 @@
         resp, cls.server = cls.client.get_server(server['id'])
 
         name = data_utils.rand_name('image')
-        resp, body = cls.client.create_image(server['id'], name)
+        resp, body = cls.images_client.create_image(server['id'], name)
         image_id = data_utils.parse_image_id(resp['location'])
         cls.images_client.wait_for_image_status(image_id, 'ACTIVE')
         resp, cls.image = cls.images_client.get_image(image_id)
diff --git a/tempest/api/compute/v3/admin/test_migrations.py b/tempest/api/compute/v3/admin/test_migrations.py
new file mode 100644
index 0000000..e8bd473
--- /dev/null
+++ b/tempest/api/compute/v3/admin/test_migrations.py
@@ -0,0 +1,50 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class MigrationsAdminV3Test(base.BaseV3ComputeAdminTest):
+
+    @test.attr(type='gate')
+    def test_list_migrations(self):
+        # Admin can get the migrations list
+        resp, _ = self.migrations_admin_client.list_migrations()
+        self.assertEqual(200, resp.status)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='gate')
+    def test_list_migrations_in_flavor_resize_situation(self):
+        # Admin can get the migrations list which contains the resized server
+        resp, server = self.create_test_server(wait_until="ACTIVE")
+        server_id = server['id']
+
+        resp, _ = self.servers_client.resize(server_id, self.flavor_ref_alt)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(server_id, 'VERIFY_RESIZE')
+        self.servers_client.confirm_resize(server_id)
+        self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+        resp, body = self.migrations_admin_client.list_migrations()
+        self.assertEqual(200, resp.status)
+
+        instance_uuids = [x['instance_uuid'] for x in body]
+        self.assertIn(server_id, instance_uuids)
diff --git a/tempest/api/compute/v3/admin/test_quotas.py b/tempest/api/compute/v3/admin/test_quotas.py
index 917c115..b70e254 100644
--- a/tempest/api/compute/v3/admin/test_quotas.py
+++ b/tempest/api/compute/v3/admin/test_quotas.py
@@ -94,21 +94,38 @@
     # TODO(afazekas): merge these test cases
     @test.attr(type='gate')
     def test_get_updated_quotas(self):
-        # Verify that GET shows the updated quota set
+        # Verify that GET shows the updated quota set of tenant
         tenant_name = data_utils.rand_name('cpu_quota_tenant_')
         tenant_desc = tenant_name + '-desc'
         identity_client = self.os_adm.identity_client
         _, tenant = identity_client.create_tenant(name=tenant_name,
                                                   description=tenant_desc)
         tenant_id = tenant['id']
-        self.addCleanup(identity_client.delete_tenant,
-                        tenant_id)
+        self.addCleanup(identity_client.delete_tenant, tenant_id)
 
-        self.adm_client.update_quota_set(tenant_id,
-                                         ram='5120')
+        self.adm_client.update_quota_set(tenant_id, ram='5120')
         resp, quota_set = self.adm_client.get_quota_set(tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(quota_set['ram'], 5120)
+        self.assertEqual(5120, quota_set['ram'])
+
+        # Verify that GET shows the updated quota set of user
+        user_name = data_utils.rand_name('cpu_quota_user_')
+        password = data_utils.rand_name('password-')
+        email = user_name + '@testmail.tm'
+        _, user = identity_client.create_user(name=user_name,
+                                              password=password,
+                                              tenant_id=tenant_id,
+                                              email=email)
+        user_id = user['id']
+        self.addCleanup(identity_client.delete_user, user_id)
+
+        self.adm_client.update_quota_set(tenant_id,
+                                         user_id=user_id,
+                                         ram='2048')
+        resp, quota_set = self.adm_client.get_quota_set(tenant_id,
+                                                        user_id=user_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(2048, quota_set['ram'])
 
     @test.attr(type='gate')
     def test_delete_quota(self):
diff --git a/tempest/api/compute/v3/admin/test_servers.py b/tempest/api/compute/v3/admin/test_servers.py
index 579a535..366cfc6 100644
--- a/tempest/api/compute/v3/admin/test_servers.py
+++ b/tempest/api/compute/v3/admin/test_servers.py
@@ -14,7 +14,6 @@
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
-from tempest import exceptions
 from tempest import test
 
 
@@ -44,16 +43,6 @@
                                               wait_until='ACTIVE')
         cls.s2_id = server['id']
 
-    def _get_unused_flavor_id(self):
-        flavor_id = data_utils.rand_int_id(start=1000)
-        while True:
-            try:
-                resp, body = self.flavors_client.get_flavor_details(flavor_id)
-            except exceptions.NotFound:
-                break
-            flavor_id = data_utils.rand_int_id(start=1000)
-        return flavor_id
-
     @test.attr(type='gate')
     def test_list_servers_by_admin(self):
         # Listing servers by admin user returns empty list by default
diff --git a/tempest/api/compute/v3/servers/test_attach_volume.py b/tempest/api/compute/v3/servers/test_attach_volume.py
index 28d8517..e994c7f 100644
--- a/tempest/api/compute/v3/servers/test_attach_volume.py
+++ b/tempest/api/compute/v3/servers/test_attach_volume.py
@@ -33,6 +33,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(AttachVolumeV3Test, cls).setUpClass()
         cls.device = CONF.compute.volume_device_name
         if not CONF.service_available.cinder:
diff --git a/tempest/api/compute/v3/servers/test_create_server.py b/tempest/api/compute/v3/servers/test_create_server.py
index 80c40a2..c59fe91 100644
--- a/tempest/api/compute/v3/servers/test_create_server.py
+++ b/tempest/api/compute/v3/servers/test_create_server.py
@@ -32,6 +32,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersV3Test, cls).setUpClass()
         cls.meta = {'hello': 'world'}
         cls.accessIPv4 = '1.1.1.1'
@@ -53,13 +54,6 @@
         resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
     @test.attr(type='smoke')
-    def test_create_server_response(self):
-        # Check that the required fields are returned with values
-        self.assertEqual(202, self.resp.status)
-        self.assertTrue(self.server_initial['id'] is not None)
-        self.assertTrue(self.server_initial['admin_password'] is not None)
-
-    @test.attr(type='smoke')
     def test_verify_server_details(self):
         # Verify the specified server attributes are set correctly
         self.assertEqual(self.accessIPv4,
@@ -115,26 +109,10 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersWithSpecificFlavorV3Test, cls).setUpClass()
-        cls.meta = {'hello': 'world'}
-        cls.accessIPv4 = '1.1.1.1'
-        cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
-        cls.name = data_utils.rand_name('server')
-        file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
-                       'contents': base64.b64encode(file_contents)}]
         cls.client = cls.servers_client
         cls.flavor_client = cls.flavors_admin_client
-        cli_resp = cls.create_test_server(name=cls.name,
-                                          meta=cls.meta,
-                                          access_ip_v4=cls.accessIPv4,
-                                          access_ip_v6=cls.accessIPv6,
-                                          personality=personality,
-                                          disk_config=cls.disk_config)
-        cls.resp, cls.server_initial = cli_resp
-        cls.password = cls.server_initial['admin_password']
-        cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
-        resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
     @testtools.skipUnless(CONF.compute.run_ssh,
                           'Instance validation tests are disabled.')
@@ -142,7 +120,7 @@
     def test_verify_created_server_ephemeral_disk(self):
         # Verify that the ephemeral disk is created when creating server
 
-        def create_flavor_with_extra_specs(self):
+        def create_flavor_with_extra_specs():
             flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
             flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
             ram = 512
@@ -154,13 +132,13 @@
                             create_flavor(flavor_with_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_with_eph_disk_id,
-                                          ephemeral=1, swap=1024, rxtx=1))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
-            self.assertEqual(200, resp.status)
+                                          ephemeral=1, rxtx=1))
+            self.addCleanup(flavor_clean_up, flavor['id'])
+            self.assertEqual(201, resp.status)
 
             return flavor['id']
 
-        def create_flavor_without_extra_specs(self):
+        def create_flavor_without_extra_specs():
             flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
             flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
 
@@ -173,18 +151,18 @@
                             create_flavor(flavor_no_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_no_eph_disk_id))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
-            self.assertEqual(200, resp.status)
+            self.addCleanup(flavor_clean_up, flavor['id'])
+            self.assertEqual(201, resp.status)
 
             return flavor['id']
 
-        def flavor_clean_up(self, flavor_id):
+        def flavor_clean_up(flavor_id):
             resp, body = self.flavor_client.delete_flavor(flavor_id)
-            self.assertEqual(resp.status, 202)
+            self.assertEqual(resp.status, 204)
             self.flavor_client.wait_for_resource_deletion(flavor_id)
 
-        flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
-        flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+        flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+        flavor_no_eph_disk_id = create_flavor_without_extra_specs()
 
         admin_pass = self.image_ssh_password
 
@@ -197,13 +175,17 @@
                                       adminPass=admin_pass,
                                       flavor=flavor_with_eph_disk_id))
         # Get partition number of server without extra specs.
+        _, server_no_eph_disk = self.client.get_server(
+            server_no_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_no_eph_disk,
-                                                  self.ssh_user, self.password)
-        partition_num = len(linux_client.get_partitions())
-
+                                                  self.ssh_user, admin_pass)
+        partition_num = len(linux_client.get_partitions().split('\n'))
+        _, server_with_eph_disk = self.client.get_server(
+            server_with_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_with_eph_disk,
-                                                  self.ssh_user, self.password)
-        self.assertEqual(partition_num + 1, linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num_emph = len(linux_client.get_partitions().split('\n'))
+        self.assertEqual(partition_num + 1, partition_num_emph)
 
 
 class ServersV3TestManualDisk(ServersV3Test):
diff --git a/tempest/api/compute/v3/servers/test_delete_server.py b/tempest/api/compute/v3/servers/test_delete_server.py
index d694a33..add69ab 100644
--- a/tempest/api/compute/v3/servers/test_delete_server.py
+++ b/tempest/api/compute/v3/servers/test_delete_server.py
@@ -56,6 +56,8 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type='gate')
     def test_delete_server_while_in_pause_state(self):
         # Delete a server while it's VM state is Pause
@@ -99,6 +101,25 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @test.attr(type='gate')
+    def test_delete_server_while_in_attached_volume(self):
+        # Delete a server while a volume is attached to it
+        device = '/dev/%s' % CONF.compute.volume_device_name
+        resp, server = self.create_test_server(wait_until='ACTIVE')
+
+        resp, volume = self.volumes_client.create_volume(1)
+        self.addCleanup(self.volumes_client.delete_volume, volume['id'])
+        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        resp, body = self.client.attach_volume(server['id'],
+                                               volume['id'],
+                                               device=device)
+        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+        resp, _ = self.client.delete_server(server['id'])
+        self.assertEqual('204', resp['status'])
+        self.client.wait_for_server_termination(server['id'])
+        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+
 
 class DeleteServersAdminV3Test(base.BaseV3ComputeAdminTest):
     # NOTE: Server creations of each test class should be under 10
diff --git a/tempest/api/compute/v3/servers/test_instance_actions.py b/tempest/api/compute/v3/servers/test_instance_actions.py
index 7d25100..399541b 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions.py
@@ -27,25 +27,27 @@
         cls.resp = resp
         cls.server_id = server['id']
 
+    @test.skip_because(bug="1206032")
     @test.attr(type='gate')
-    def test_list_instance_actions(self):
+    def test_list_server_actions(self):
         # List actions of the provided server
         resp, body = self.client.reboot(self.server_id, 'HARD')
         self.client.wait_for_server_status(self.server_id, 'ACTIVE')
 
-        resp, body = self.client.list_instance_actions(self.server_id)
+        resp, body = self.client.list_server_actions(self.server_id)
         self.assertEqual(200, resp.status)
         self.assertTrue(len(body) == 2, str(body))
         self.assertTrue(any([i for i in body if i['action'] == 'create']))
         self.assertTrue(any([i for i in body if i['action'] == 'reboot']))
 
+    @test.skip_because(bug="1206032")
     @test.attr(type='gate')
     @test.skip_because(bug="1281915")
-    def test_get_instance_action(self):
+    def test_get_server_action(self):
         # Get the action details of the provided server
         request_id = self.resp['x-compute-request-id']
-        resp, body = self.client.get_instance_action(self.server_id,
-                                                     request_id)
+        resp, body = self.client.get_server_action(self.server_id,
+                                                   request_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(self.server_id, body['instance_uuid'])
+        self.assertEqual(self.server_id, body['server_uuid'])
         self.assertEqual('create', body['action'])
diff --git a/tempest/api/compute/v3/servers/test_instance_actions_negative.py b/tempest/api/compute/v3/servers/test_instance_actions_negative.py
index b0a7050..0b2c6f9 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions_negative.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions_negative.py
@@ -29,15 +29,15 @@
         cls.server_id = server['id']
 
     @test.attr(type=['negative', 'gate'])
-    def test_list_instance_actions_invalid_server(self):
+    def test_list_server_actions_invalid_server(self):
         # List actions of the invalid server id
         invalid_server_id = data_utils.rand_uuid()
         self.assertRaises(exceptions.NotFound,
-                          self.client.list_instance_actions, invalid_server_id)
+                          self.client.list_server_actions, invalid_server_id)
 
     @test.attr(type=['negative', 'gate'])
-    def test_get_instance_action_invalid_request(self):
+    def test_get_server_action_invalid_request(self):
         # Get the action details of the provided server with invalid request
         invalid_request_id = 'req-' + data_utils.rand_uuid()
-        self.assertRaises(exceptions.NotFound, self.client.get_instance_action,
+        self.assertRaises(exceptions.NotFound, self.client.get_server_action,
                           self.server_id, invalid_request_id)
diff --git a/tempest/api/compute/v3/servers/test_list_server_filters.py b/tempest/api/compute/v3/servers/test_list_server_filters.py
index 2cb176c..778b033 100644
--- a/tempest/api/compute/v3/servers/test_list_server_filters.py
+++ b/tempest/api/compute/v3/servers/test_list_server_filters.py
@@ -28,6 +28,7 @@
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
+        cls.set_network_resources(network=True, subnet=True, dhcp=True)
         super(ListServerFiltersV3Test, cls).setUpClass()
         cls.client = cls.servers_client
 
@@ -70,7 +71,12 @@
                                               flavor=cls.flavor_ref_alt,
                                               wait_until='ACTIVE')
 
-        cls.fixed_network_name = CONF.compute.fixed_network_name
+        if (CONF.service_available.neutron and
+                CONF.compute.allow_tenant_isolation):
+            network = cls.isolated_creds.get_primary_network()
+            cls.fixed_network_name = network['name']
+        else:
+            cls.fixed_network_name = CONF.compute.fixed_network_name
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @test.attr(type='gate')
@@ -226,7 +232,6 @@
         self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
         self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
 
-    @test.skip_because(bug="1170718")
     @test.attr(type='gate')
     def test_list_servers_filtered_by_ip(self):
         # Filter servers by ip
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index c377c30..1495cb7 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -41,6 +41,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServerActionsV3Test, cls).setUpClass()
         cls.client = cls.servers_client
         cls.server_id = cls.rebuild_server(None)
@@ -329,6 +330,8 @@
 
         self.wait_for(self._get_output)
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type='gate')
     def test_pause_unpause_server(self):
         resp, server = self.client.pause_server(self.server_id)
diff --git a/tempest/api/compute/v3/servers/test_server_rescue_negative.py b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
index 08fb127..eb6bcdd 100644
--- a/tempest/api/compute/v3/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
@@ -12,12 +12,16 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import testtools
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class ServerRescueNegativeV3Test(base.BaseV3ComputeTest):
 
@@ -66,6 +70,8 @@
         self.assertEqual(202, resp.status)
         self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_rescue_paused_instance(self):
         # Rescue a paused server
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index 586a52a..c1d1935 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -115,6 +115,8 @@
         self.assertRaises(exceptions.NotFound, self.client.reboot,
                           nonexistent_server, 'SOFT')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_pause_paused_server(self):
         # Pause a paused server.
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index ab9d144..4585912 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -33,6 +33,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(AttachVolumeTestJSON, cls).setUpClass()
         cls.device = CONF.compute.volume_device_name
         if not CONF.service_available.cinder:
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 84d5be6..fc313f2 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 from tempest import config
+from tempest import exceptions
 import tempest.test
 
 
@@ -27,46 +28,35 @@
     def setUpClass(cls):
         super(BaseDataProcessingTest, cls).setUpClass()
         if not CONF.service_available.sahara:
-            raise cls.skipException("Sahara support is required")
+            raise cls.skipException('Sahara support is required')
 
         os = cls.get_client_manager()
         cls.client = os.data_processing_client
 
-        # set some constants
         cls.flavor_ref = CONF.compute.flavor_ref
-        cls.simple_node_group_template = {
-            'plugin_name': 'vanilla',
-            'hadoop_version': '1.2.1',
-            'node_processes': [
-                "datanode",
-                "tasktracker"
-            ],
-            'flavor_id': cls.flavor_ref,
-            'node_configs': {
-                'HDFS': {
-                    'Data Node Heap Size': 1024
-                },
-                'MapReduce': {
-                    'Task Tracker Heap Size': 1024
-                }
-            }
-        }
 
         # add lists for watched resources
         cls._node_group_templates = []
+        cls._cluster_templates = []
 
     @classmethod
     def tearDownClass(cls):
-        # cleanup node group templates
-        for ngt_id in getattr(cls, '_node_group_templates', []):
-            try:
-                cls.client.delete_node_group_template(ngt_id)
-            except Exception:
-                # ignore errors while auto removing created resource
-                pass
+        cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
+                              cls.client.delete_cluster_template)
+        cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
+                              cls.client.delete_node_group_template)
         cls.clear_isolated_creds()
         super(BaseDataProcessingTest, cls).tearDownClass()
 
+    @staticmethod
+    def cleanup_resources(resource_id_list, method):
+        for resource_id in resource_id_list:
+            try:
+                method(resource_id)
+            except exceptions.NotFound:
+                # ignore errors while auto removing created resource
+                pass
+
     @classmethod
     def create_node_group_template(cls, name, plugin_name, hadoop_version,
                                    node_processes, flavor_id,
@@ -77,16 +67,32 @@
         object. All resources created in this method will be automatically
         removed in tearDownClass method.
         """
-
         resp, body = cls.client.create_node_group_template(name, plugin_name,
                                                            hadoop_version,
                                                            node_processes,
                                                            flavor_id,
                                                            node_configs,
                                                            **kwargs)
-
         # store id of created node group template
-        template_id = body['id']
-        cls._node_group_templates.append(template_id)
+        cls._node_group_templates.append(body['id'])
 
-        return resp, body, template_id
+        return resp, body
+
+    @classmethod
+    def create_cluster_template(cls, name, plugin_name, hadoop_version,
+                                node_groups, cluster_configs=None, **kwargs):
+        """Creates watched cluster template with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object. All resources created in this method will be automatically
+        removed in tearDownClass method.
+        """
+        resp, body = cls.client.create_cluster_template(name, plugin_name,
+                                                        hadoop_version,
+                                                        node_groups,
+                                                        cluster_configs,
+                                                        **kwargs)
+        # store id of created cluster template
+        cls._cluster_templates.append(body['id'])
+
+        return resp, body
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index a64c345..ed4cf1f 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -19,65 +19,87 @@
 
 
 class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
-    def _create_simple_node_group_template(self, template_name=None):
-        """Creates simple Node Group Template with optional name specified.
+    @classmethod
+    def setUpClass(cls):
+        super(NodeGroupTemplateTest, cls).setUpClass()
+        cls.node_group_template = {
+            'description': 'Test node group template',
+            'plugin_name': 'vanilla',
+            'hadoop_version': '1.2.1',
+            'node_processes': [
+                'datanode',
+                'tasktracker'
+            ],
+            'flavor_id': cls.flavor_ref,
+            'node_configs': {
+                'HDFS': {
+                    'Data Node Heap Size': 1024
+                },
+                'MapReduce': {
+                    'Task Tracker Heap Size': 1024
+                }
+            }
+        }
+
+    def _create_node_group_template(self, template_name=None):
+        """Creates Node Group Template with optional name specified.
 
         It creates template and ensures response status and template name.
         Returns id and name of created template.
         """
-
         if template_name is None:
             # generate random name if it's not specified
-            template_name = data_utils.rand_name('sahara')
+            template_name = data_utils.rand_name('sahara-ng-template')
 
-        # create simple node group template
-        resp, body, template_id = self.create_node_group_template(
-            template_name, **self.simple_node_group_template)
+        # create node group template
+        resp, body = self.create_node_group_template(
+            template_name, **self.node_group_template)
 
         # ensure that template created successfully
         self.assertEqual(202, resp.status)
         self.assertEqual(template_name, body['name'])
 
-        return template_id, template_name
+        return body['id'], template_name
 
     @attr(type='smoke')
     def test_node_group_template_create(self):
-        # just create and ensure template
-        self._create_simple_node_group_template()
+        template_name = data_utils.rand_name('sahara-ng-template')
+        resp, body = self.create_node_group_template(
+            template_name, **self.node_group_template)
+
+        # check that template created successfully
+        self.assertEqual(resp.status, 202)
+        self.assertEqual(template_name, body['name'])
+        self.assertDictContainsSubset(self.node_group_template, body)
 
     @attr(type='smoke')
     def test_node_group_template_list(self):
-        template_info = self._create_simple_node_group_template()
+        template_info = self._create_node_group_template()
 
         # check for node group template in list
         resp, templates = self.client.list_node_group_templates()
 
         self.assertEqual(200, resp.status)
-        templates_info = list([(template['id'], template['name'])
-                               for template in templates])
+        templates_info = [(template['id'], template['name'])
+                          for template in templates]
         self.assertIn(template_info, templates_info)
 
     @attr(type='smoke')
     def test_node_group_template_get(self):
-        template_id, template_name = self._create_simple_node_group_template()
+        template_id, template_name = self._create_node_group_template()
 
         # check node group template fetch by id
         resp, template = self.client.get_node_group_template(template_id)
 
         self.assertEqual(200, resp.status)
         self.assertEqual(template_name, template['name'])
-        self.assertEqual(self.simple_node_group_template['plugin_name'],
-                         template['plugin_name'])
-        self.assertEqual(self.simple_node_group_template['node_processes'],
-                         template['node_processes'])
-        self.assertEqual(self.simple_node_group_template['flavor_id'],
-                         template['flavor_id'])
+        self.assertDictContainsSubset(self.node_group_template, template)
 
     @attr(type='smoke')
     def test_node_group_template_delete(self):
-        template_id, template_name = self._create_simple_node_group_template()
+        template_id = self._create_node_group_template()[0]
 
         # delete the node group template by id
-        resp = self.client.delete_node_group_template(template_id)
+        resp = self.client.delete_node_group_template(template_id)[0]
 
-        self.assertEqual('204', resp[0]['status'])
+        self.assertEqual(204, resp.status)
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 24c7b83..90dccca 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -73,7 +73,7 @@
         self.assertIn(role_id, fetched_role_ids)
 
     @test.attr(type='smoke')
-    def test_role_create_update_get(self):
+    def test_role_create_update_get_list(self):
         r_name = data_utils.rand_name('Role-')
         resp, role = self.client.create_role(r_name)
         self.addCleanup(self.client.delete_role, role['id'])
@@ -94,6 +94,10 @@
         self.assertEqual(new_name, new_role['name'])
         self.assertEqual(updated_role['id'], new_role['id'])
 
+        resp, roles = self.client.list_roles()
+        self.assertEqual(resp['status'], '200')
+        self.assertIn(role['id'], [r['id'] for r in roles])
+
     @test.attr(type='smoke')
     def test_grant_list_revoke_role_to_user_on_project(self):
         resp, _ = self.client.assign_user_role_on_project(
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index 342bc6a..b848994 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -37,8 +37,10 @@
         agents = body['agents']
         # Hearthbeats must be excluded from comparison
         self.agent.pop('heartbeat_timestamp', None)
+        self.agent.pop('configurations', None)
         for agent in agents:
             agent.pop('heartbeat_timestamp', None)
+            agent.pop('configurations', None)
         self.assertIn(self.agent, agents)
 
     @test.attr(type=['smoke'])
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 13ae1c0..25e1cc0 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -20,6 +20,7 @@
     _interface = 'json'
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(DHCPAgentSchedulersTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
diff --git a/tempest/api/network/admin/test_lbaas_agent_scheduler.py b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
index a5ba90f..675c62d 100644
--- a/tempest/api/network/admin/test_lbaas_agent_scheduler.py
+++ b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
@@ -35,6 +35,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(LBaaSAgentSchedulerTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('lbaas_agent_scheduler', 'network'):
diff --git a/tempest/api/network/admin/test_load_balancer_admin_actions.py b/tempest/api/network/admin/test_load_balancer_admin_actions.py
index 34a8e32..bc7f1d6 100644
--- a/tempest/api/network/admin/test_load_balancer_admin_actions.py
+++ b/tempest/api/network/admin/test_load_balancer_admin_actions.py
@@ -29,6 +29,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(LoadBalancerAdminTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('lbaas', 'network'):
@@ -42,6 +43,8 @@
             tenant_name)['id']
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
+        cls.pool = cls.create_pool(data_utils.rand_name('pool-'),
+                                   "ROUND_ROBIN", "HTTP", cls.subnet)
 
     @test.attr(type='smoke')
     def test_create_vip_as_admin_for_another_tenant(self):
@@ -89,6 +92,29 @@
         show_health_monitor = body['health_monitor']
         self.assertEqual(health_monitor['id'], show_health_monitor['id'])
 
+    @test.attr(type='smoke')
+    def test_create_pool_from_admin_user_other_tenant(self):
+        resp, body = self.admin_client.create_pool(
+            name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN",
+            protocol="HTTP", subnet_id=self.subnet['id'],
+            tenant_id=self.tenant_id)
+        self.assertEqual('201', resp['status'])
+        pool = body['pool']
+        self.addCleanup(self.admin_client.delete_pool, pool['id'])
+        self.assertIsNotNone(pool['id'])
+        self.assertEqual(self.tenant_id, pool['tenant_id'])
+
+    @test.attr(type='smoke')
+    def test_create_member_from_admin_user_other_tenant(self):
+        resp, body = self.admin_client.create_member(
+            address="10.0.9.47", protocol_port=80, pool_id=self.pool['id'],
+            tenant_id=self.tenant_id)
+        self.assertEqual('201', resp['status'])
+        member = body['member']
+        self.addCleanup(self.admin_client.delete_member, member['id'])
+        self.assertIsNotNone(member['id'])
+        self.assertEqual(self.tenant_id, member['tenant_id'])
+
 
 class LoadBalancerAdminTestXML(LoadBalancerAdminTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index f92ad68..696a1c3 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -79,9 +79,17 @@
         cls.floating_ips = []
         cls.metering_labels = []
         cls.metering_label_rules = []
+        cls.fw_rules = []
+        cls.fw_policies = []
 
     @classmethod
     def tearDownClass(cls):
+        # Clean up firewall policies
+        for fw_policy in cls.fw_policies:
+            cls.client.delete_firewall_policy(fw_policy['id'])
+        # Clean up firewall rules
+        for fw_rule in cls.fw_rules:
+            cls.client.delete_firewall_rule(fw_rule['id'])
         # Clean up ike policies
         for ikepolicy in cls.ikepolicies:
             cls.client.delete_ikepolicy(ikepolicy['id'])
@@ -93,12 +101,8 @@
             cls.client.delete_floatingip(floating_ip['id'])
         # Clean up routers
         for router in cls.routers:
-            resp, body = cls.client.list_router_interfaces(router['id'])
-            interfaces = body['ports']
-            for i in interfaces:
-                cls.client.remove_router_interface_with_subnet_id(
-                    router['id'], i['fixed_ips'][0]['subnet_id'])
-            cls.client.delete_router(router['id'])
+            cls.delete_router(router)
+
         # Clean up health monitors
         for health_monitor in cls.health_monitors:
             cls.client.delete_health_monitor(health_monitor['id'])
@@ -296,6 +300,35 @@
         cls.ikepolicies.append(ikepolicy)
         return ikepolicy
 
+    @classmethod
+    def create_firewall_rule(cls, action, protocol):
+        """Wrapper utility that returns a test firewall rule."""
+        resp, body = cls.client.create_firewall_rule(
+            name=data_utils.rand_name("fw-rule"),
+            action=action,
+            protocol=protocol)
+        fw_rule = body['firewall_rule']
+        cls.fw_rules.append(fw_rule)
+        return fw_rule
+
+    @classmethod
+    def create_firewall_policy(cls):
+        """Wrapper utility that returns a test firewall policy."""
+        resp, body = cls.client.create_firewall_policy(
+            name=data_utils.rand_name("fw-policy"))
+        fw_policy = body['firewall_policy']
+        cls.fw_policies.append(fw_policy)
+        return fw_policy
+
+    @classmethod
+    def delete_router(cls, router):
+        resp, body = cls.client.list_router_interfaces(router['id'])
+        interfaces = body['ports']
+        for i in interfaces:
+            cls.client.remove_router_interface_with_subnet_id(
+                router['id'], i['fixed_ips'][0]['subnet_id'])
+        cls.client.delete_router(router['id'])
+
 
 class BaseAdminNetworkTest(BaseNetworkTest):
 
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index ed86d75..371c651 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -36,6 +36,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ExtraDHCPOptionsTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index 06871ad..7191940 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -44,6 +44,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(FloatingIPTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('router', 'network'):
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
new file mode 100644
index 0000000..555cbda
--- /dev/null
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -0,0 +1,235 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.network import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+CONF = config.CONF
+
+
+class FWaaSExtensionTestJSON(base.BaseNetworkTest):
+    _interface = 'json'
+
+    """
+    Tests the following operations in the Neutron API using the REST client for
+    Neutron:
+
+        List firewall rules
+        Create firewall rule
+        Update firewall rule
+        Delete firewall rule
+        Show firewall rule
+        List firewall policies
+        Create firewall policy
+        Update firewall policy
+        Delete firewall policy
+        Show firewall policy
+        List firewall
+        Create firewall
+        Update firewall
+        Delete firewall
+        Show firewall
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(FWaaSExtensionTestJSON, cls).setUpClass()
+        if not test.is_extension_enabled('fwaas', 'network'):
+            msg = "FWaaS Extension not enabled."
+            raise cls.skipException(msg)
+        cls.fw_rule = cls.create_firewall_rule("allow", "tcp")
+        cls.fw_policy = cls.create_firewall_policy()
+
+    def _try_delete_policy(self, policy_id):
+        # delete policy, if it exists
+        try:
+            self.client.delete_firewall_policy(policy_id)
+        # if policy is not found, this means it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
+    def _try_delete_firewall(self, fw_id):
+        # delete firewall, if it exists
+        try:
+            self.client.delete_firewall(fw_id)
+        # if firewall is not found, this means it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
+        self.client.wait_for_resource_deletion('firewall', fw_id)
+
+    def _wait_for_active(self, fw_id):
+        def _wait():
+            resp, firewall = self.client.show_firewall(fw_id)
+            self.assertEqual('200', resp['status'])
+            firewall = firewall['firewall']
+            return firewall['status'] == 'ACTIVE'
+
+        if not test.call_until_true(_wait, CONF.network.build_timeout,
+                                    CONF.network.build_interval):
+            m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
+            raise exceptions.TimeoutException(m)
+
+    @test.attr(type='smoke')
+    def test_list_firewall_rules(self):
+        # List firewall rules
+        resp, fw_rules = self.client.list_firewall_rules()
+        self.assertEqual('200', resp['status'])
+        fw_rules = fw_rules['firewall_rules']
+        self.assertIn((self.fw_rule['id'],
+                       self.fw_rule['name'],
+                       self.fw_rule['action'],
+                       self.fw_rule['protocol'],
+                       self.fw_rule['ip_version'],
+                       self.fw_rule['enabled']),
+                      [(m['id'],
+                        m['name'],
+                        m['action'],
+                        m['protocol'],
+                        m['ip_version'],
+                        m['enabled']) for m in fw_rules])
+
+    @test.attr(type='smoke')
+    def test_create_update_delete_firewall_rule(self):
+        # Create firewall rule
+        resp, body = self.client.create_firewall_rule(
+            name=data_utils.rand_name("fw-rule"),
+            action="allow",
+            protocol="tcp")
+        self.assertEqual('201', resp['status'])
+        fw_rule_id = body['firewall_rule']['id']
+
+        # Update firewall rule
+        resp, body = self.client.update_firewall_rule(fw_rule_id,
+                                                      shared=True)
+        self.assertEqual('200', resp['status'])
+        self.assertTrue(body["firewall_rule"]['shared'])
+
+        # Delete firewall rule
+        resp, _ = self.client.delete_firewall_rule(fw_rule_id)
+        self.assertEqual('204', resp['status'])
+        # Confirm deletion
+        resp, fw_rules = self.client.list_firewall_rules()
+        self.assertNotIn(fw_rule_id,
+                         [m['id'] for m in fw_rules['firewall_rules']])
+
+    @test.attr(type='smoke')
+    def test_show_firewall_rule(self):
+        # show a created firewall rule
+        resp, fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
+        self.assertEqual('200', resp['status'])
+        for key, value in fw_rule['firewall_rule'].iteritems():
+            self.assertEqual(self.fw_rule[key], value)
+
+    @test.attr(type='smoke')
+    def test_list_firewall_policies(self):
+        resp, fw_policies = self.client.list_firewall_policies()
+        self.assertEqual('200', resp['status'])
+        fw_policies = fw_policies['firewall_policies']
+        self.assertIn((self.fw_policy['id'],
+                       self.fw_policy['name'],
+                       self.fw_policy['firewall_rules']),
+                      [(m['id'],
+                        m['name'],
+                        m['firewall_rules']) for m in fw_policies])
+
+    @test.attr(type='smoke')
+    def test_create_update_delete_firewall_policy(self):
+        # Create firewall policy
+        resp, body = self.client.create_firewall_policy(
+            name=data_utils.rand_name("fw-policy"))
+        self.assertEqual('201', resp['status'])
+        fw_policy_id = body['firewall_policy']['id']
+        self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+        # Update firewall policy
+        resp, body = self.client.update_firewall_policy(fw_policy_id,
+                                                        shared=True,
+                                                        name="updated_policy")
+        self.assertEqual('200', resp['status'])
+        updated_fw_policy = body["firewall_policy"]
+        self.assertTrue(updated_fw_policy['shared'])
+        self.assertEqual("updated_policy", updated_fw_policy['name'])
+
+        # Delete firewall policy
+        resp, _ = self.client.delete_firewall_policy(fw_policy_id)
+        self.assertEqual('204', resp['status'])
+        # Confirm deletion
+        resp, fw_policies = self.client.list_firewall_policies()
+        fw_policies = fw_policies['firewall_policies']
+        self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
+
+    @test.attr(type='smoke')
+    def test_show_firewall_policy(self):
+        # show a created firewall policy
+        resp, fw_policy = self.client.show_firewall_policy(
+            self.fw_policy['id'])
+        self.assertEqual('200', resp['status'])
+        fw_policy = fw_policy['firewall_policy']
+        for key, value in fw_policy.iteritems():
+            self.assertEqual(self.fw_policy[key], value)
+
+    @test.attr(type='smoke')
+    def test_create_show_delete_firewall(self):
+        # Create tenant network resources required for an ACTIVE firewall
+        network = self.create_network()
+        subnet = self.create_subnet(network)
+        router = self.create_router(
+            data_utils.rand_name('router-'),
+            admin_state_up=True)
+        self.client.add_router_interface_with_subnet_id(
+            router['id'], subnet['id'])
+
+        # Create firewall
+        resp, body = self.client.create_firewall(
+            name=data_utils.rand_name("firewall"),
+            firewall_policy_id=self.fw_policy['id'])
+        self.assertEqual('201', resp['status'])
+        created_firewall = body['firewall']
+        firewall_id = created_firewall['id']
+        self.addCleanup(self._try_delete_firewall, firewall_id)
+
+        self._wait_for_active(firewall_id)
+
+        # show a created firewall
+        resp, firewall = self.client.show_firewall(firewall_id)
+        self.assertEqual('200', resp['status'])
+        firewall = firewall['firewall']
+
+        for key, value in firewall.iteritems():
+            if key == 'status':
+                continue
+            self.assertEqual(created_firewall[key], value)
+
+        # list firewall
+        resp, firewalls = self.client.list_firewalls()
+        self.assertEqual('200', resp['status'])
+        firewalls = firewalls['firewalls']
+        self.assertIn((created_firewall['id'],
+                       created_firewall['name'],
+                       created_firewall['firewall_policy_id']),
+                      [(m['id'],
+                        m['name'],
+                        m['firewall_policy_id']) for m in firewalls])
+
+        # Delete firewall
+        resp, _ = self.client.delete_firewall(firewall_id)
+        self.assertEqual('204', resp['status'])
+
+
+class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
index 792d61d..673fc47 100644
--- a/tempest/api/network/test_load_balancer.py
+++ b/tempest/api/network/test_load_balancer.py
@@ -38,6 +38,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(LoadBalancerTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('lbaas', 'network'):
@@ -156,10 +157,14 @@
         # Verification of pool update
         new_name = "New_pool"
         resp, body = self.client.update_pool(pool['id'],
-                                             name=new_name)
+                                             name=new_name,
+                                             description="new_description",
+                                             lb_method='LEAST_CONNECTIONS')
         self.assertEqual('200', resp['status'])
         updated_pool = body['pool']
-        self.assertEqual(updated_pool['name'], new_name)
+        self.assertEqual(new_name, updated_pool['name'])
+        self.assertEqual('new_description', updated_pool['description'])
+        self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
         # Verification of pool delete
         resp, body = self.client.delete_pool(pool['id'])
         self.assertEqual('204', resp['status'])
@@ -377,6 +382,92 @@
         self.assertIn("active_connections", stats)
         self.assertIn("bytes_out", stats)
 
+    @test.attr(type='smoke')
+    def test_update_list_of_health_monitors_associated_with_pool(self):
+        resp, _ = (self.client.associate_health_monitor_with_pool
+                   (self.health_monitor['id'], self.pool['id']))
+        self.assertEqual('201', resp['status'])
+        resp, _ = self.client.update_health_monitor(
+            self.health_monitor['id'], admin_state_up=False)
+        self.assertEqual('200', resp['status'])
+        resp, body = self.client.show_pool(self.pool['id'])
+        self.assertEqual('200', resp['status'])
+        health_monitors = body['pool']['health_monitors']
+        for health_monitor_id in health_monitors:
+            resp, body = self.client.show_health_monitor(health_monitor_id)
+            self.assertEqual('200', resp['status'])
+            self.assertFalse(body['health_monitor']['admin_state_up'])
+        resp, _ = (self.client.disassociate_health_monitor_with_pool
+                   (self.health_monitor['id'], self.pool['id']))
+        self.assertEqual('204', resp['status'])
+
+    @test.attr(type='smoke')
+    def test_update_admin_state_up_of_pool(self):
+        resp, _ = self.client.update_pool(self.pool['id'],
+                                          admin_state_up=False)
+        self.assertEqual('200', resp['status'])
+        resp, body = self.client.show_pool(self.pool['id'])
+        self.assertEqual('200', resp['status'])
+        pool = body['pool']
+        self.assertFalse(pool['admin_state_up'])
+
+    @test.attr(type='smoke')
+    def test_show_vip_associated_with_pool(self):
+        resp, body = self.client.show_pool(self.pool['id'])
+        self.assertEqual('200', resp['status'])
+        pool = body['pool']
+        resp, body = self.client.show_vip(pool['vip_id'])
+        self.assertEqual('200', resp['status'])
+        vip = body['vip']
+        self.assertEqual(self.vip['name'], vip['name'])
+        self.assertEqual(self.vip['id'], vip['id'])
+
+    @test.attr(type='smoke')
+    def test_show_members_associated_with_pool(self):
+        resp, body = self.client.show_pool(self.pool['id'])
+        self.assertEqual('200', resp['status'])
+        members = body['pool']['members']
+        for member_id in members:
+            resp, body = self.client.show_member(member_id)
+            self.assertEqual('200', resp['status'])
+            self.assertIsNotNone(body['member']['status'])
+            self.assertEqual(member_id, body['member']['id'])
+            self.assertIsNotNone(body['member']['admin_state_up'])
+
+    @test.attr(type='smoke')
+    def test_update_pool_related_to_member(self):
+        # Create new pool
+        resp, body = self.client.create_pool(
+            name=data_utils.rand_name("pool-"),
+            lb_method='ROUND_ROBIN',
+            protocol='HTTP',
+            subnet_id=self.subnet['id'])
+        self.assertEqual('201', resp['status'])
+        new_pool = body['pool']
+        self.addCleanup(self.client.delete_pool, new_pool['id'])
+        # Update member with new pool's id
+        resp, body = self.client.update_member(self.member['id'],
+                                               pool_id=new_pool['id'])
+        self.assertEqual('200', resp['status'])
+        # Confirm with show that pool_id change
+        resp, body = self.client.show_member(self.member['id'])
+        member = body['member']
+        self.assertEqual(member['pool_id'], new_pool['id'])
+        # Update member with old pool id, this is needed for clean up
+        resp, body = self.client.update_member(self.member['id'],
+                                               pool_id=self.pool['id'])
+        self.assertEqual('200', resp['status'])
+
+    @test.attr(type='smoke')
+    def test_update_member_weight(self):
+        resp, _ = self.client.update_member(self.member['id'],
+                                            weight=2)
+        self.assertEqual('200', resp['status'])
+        resp, body = self.client.show_member(self.member['id'])
+        self.assertEqual('200', resp['status'])
+        member = body['member']
+        self.assertEqual(2, member['weight'])
+
 
 class LoadBalancerTestXML(LoadBalancerTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index b9041ee..de44f4d 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -19,7 +19,7 @@
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 CONF = config.CONF
 
@@ -37,15 +37,9 @@
         create a subnet for a tenant
         list tenant's subnets
         show a tenant subnet details
-        port create
-        port delete
-        port list
-        port show
-        port update
         network update
         subnet update
         delete a network also deletes its subnets
-        create a port with no IP address associated with it
 
         All subnet tests are run once with ipv4 and once with ipv6.
 
@@ -64,6 +58,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(NetworksTestJSON, cls).setUpClass()
         cls.network = cls.create_network()
@@ -71,7 +66,7 @@
         cls.subnet = cls.create_subnet(cls.network)
         cls.cidr = cls.subnet['cidr']
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_create_update_delete_network_subnet(self):
         # Create a network
         name = data_utils.rand_name('network-')
@@ -102,7 +97,7 @@
         resp, body = self.client.delete_network(net_id)
         self.assertEqual('204', resp['status'])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_show_network(self):
         # Verify the details of a network
         resp, body = self.client.show_network(self.network['id'])
@@ -111,19 +106,19 @@
         for key in ['id', 'name']:
             self.assertEqual(network[key], self.network[key])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_show_network_fields(self):
         # Verify specific fields of a network
-        field_list = [('fields', 'id'), ('fields', 'name'), ]
+        fields = ['id', 'name']
         resp, body = self.client.show_network(self.network['id'],
-                                              field_list=field_list)
+                                              fields=fields)
         self.assertEqual('200', resp['status'])
         network = body['network']
-        self.assertEqual(len(network), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(network.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(network[field_name], self.network[field_name])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_networks(self):
         # Verify the network exists in the list of all networks
         resp, body = self.client.list_networks()
@@ -132,18 +127,18 @@
                     if network['id'] == self.network['id']]
         self.assertNotEmpty(networks, "Created network not found in the list")
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_networks_fields(self):
         # Verify specific fields of the networks
-        resp, body = self.client.list_networks(fields='id')
+        fields = ['id', 'name']
+        resp, body = self.client.list_networks(fields=fields)
         self.assertEqual('200', resp['status'])
         networks = body['networks']
         self.assertNotEmpty(networks, "Network list returned is empty")
         for network in networks:
-            self.assertEqual(len(network), 1)
-            self.assertIn('id', network)
+            self.assertEqual(sorted(network.keys()), sorted(fields))
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_show_subnet(self):
         # Verify the details of a subnet
         resp, body = self.client.show_subnet(self.subnet['id'])
@@ -154,19 +149,19 @@
             self.assertIn(key, subnet)
             self.assertEqual(subnet[key], self.subnet[key])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_show_subnet_fields(self):
         # Verify specific fields of a subnet
-        field_list = [('fields', 'id'), ('fields', 'cidr'), ]
+        fields = ['id', 'network_id']
         resp, body = self.client.show_subnet(self.subnet['id'],
-                                             field_list=field_list)
+                                             fields=fields)
         self.assertEqual('200', resp['status'])
         subnet = body['subnet']
-        self.assertEqual(len(subnet), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(subnet.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(subnet[field_name], self.subnet[field_name])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_subnets(self):
         # Verify the subnet exists in the list of all subnets
         resp, body = self.client.list_subnets()
@@ -175,16 +170,16 @@
                    if subnet['id'] == self.subnet['id']]
         self.assertNotEmpty(subnets, "Created subnet not found in the list")
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_subnets_fields(self):
         # Verify specific fields of subnets
-        resp, body = self.client.list_subnets(fields='id')
+        fields = ['id', 'network_id']
+        resp, body = self.client.list_subnets(fields=fields)
         self.assertEqual('200', resp['status'])
         subnets = body['subnets']
         self.assertNotEmpty(subnets, "Subnet list returned is empty")
         for subnet in subnets:
-            self.assertEqual(len(subnet), 1)
-            self.assertIn('id', subnet)
+            self.assertEqual(sorted(subnet.keys()), sorted(fields))
 
     def _try_delete_network(self, net_id):
         # delete network, if it exists
@@ -194,7 +189,7 @@
         except exceptions.NotFound:
             pass
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_delete_network_with_subnet(self):
         # Creates a network
         name = data_utils.rand_name('network-')
@@ -221,32 +216,6 @@
         # it from the list.
         self.subnets.pop()
 
-    @attr(type='smoke')
-    def test_create_port_with_no_ip(self):
-        # For this test create a new network - do not use any previously
-        # created networks.
-        name = data_utils.rand_name('network-nosubnet-')
-        resp, body = self.client.create_network(name=name)
-        self.assertEqual('201', resp['status'])
-        network = body['network']
-        net_id = network['id']
-        self.networks.append(network)
-
-        # Now create a port for this network - without creating any
-        # subnets for this network - this ensures no IP for the port
-        resp, body = self.client.create_port(network_id=net_id)
-        self.assertEqual('201', resp['status'])
-        port = body['port']
-        port_id = port['id']
-        self.addCleanup(self.client.delete_port, port_id)
-
-        # Verify that the port does not have any IP address
-        resp, body = self.client.show_port(port_id)
-        self.assertEqual('200', resp['status'])
-        port_resp = body['port']
-        self.assertEqual(port_id, port_resp['id'])
-        self.assertEqual(port_resp['fixed_ips'], [])
-
 
 class NetworksTestXML(NetworksTestJSON):
     _interface = 'xml'
@@ -275,6 +244,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(BulkNetworkOpsTestJSON, cls).setUpClass()
         cls.network1 = cls.create_network()
@@ -310,7 +280,7 @@
         for n in created_ports:
             self.assertNotIn(n['id'], ports_list)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_bulk_create_delete_network(self):
         # Creates 2 networks in one request
         network_names = [data_utils.rand_name('network-'),
@@ -326,7 +296,7 @@
             self.assertIsNotNone(n['id'])
             self.assertIn(n['id'], networks_list)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_bulk_create_delete_subnet(self):
         # Creates 2 subnets in one request
         cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
@@ -358,7 +328,7 @@
             self.assertIsNotNone(n['id'])
             self.assertIn(n['id'], subnets_list)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_bulk_create_delete_port(self):
         # Creates 2 ports in one request
         networks = [self.network1['id'], self.network2['id']]
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index fbb25a8..68f617b 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -26,7 +26,18 @@
 class PortsTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
+    """
+    Test the following operations for ports:
+
+        port create
+        port delete
+        port list
+        port show
+        port update
+    """
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(PortsTestJSON, cls).setUpClass()
         cls.network = cls.create_network()
@@ -79,17 +90,18 @@
         self.assertEqual(self.port['network_id'], port['network_id'])
         self.assertEqual(self.port['security_groups'],
                          port['security_groups'])
+        self.assertEqual(port['fixed_ips'], [])
 
     @test.attr(type='smoke')
     def test_show_port_fields(self):
         # Verify specific fields of a port
-        field_list = [('fields', 'id'), ]
+        fields = ['id', 'mac_address']
         resp, body = self.client.show_port(self.port['id'],
-                                           field_list=field_list)
+                                           fields=fields)
         self.assertEqual('200', resp['status'])
         port = body['port']
-        self.assertEqual(len(port), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(port.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(port[field_name], self.port[field_name])
 
     @test.attr(type='smoke')
@@ -125,14 +137,14 @@
     @test.attr(type='smoke')
     def test_list_ports_fields(self):
         # Verify specific fields of ports
-        resp, body = self.client.list_ports(fields='id')
+        fields = ['id', 'mac_address']
+        resp, body = self.client.list_ports(fields=fields)
         self.assertEqual('200', resp['status'])
         ports = body['ports']
         self.assertNotEmpty(ports, "Port list returned is empty")
         # Asserting the fields returned are correct
         for port in ports:
-            self.assertEqual(len(port), 1)
-            self.assertIn('id', port)
+            self.assertEqual(sorted(fields), sorted(port.keys()))
 
 
 class PortsTestXML(PortsTestJSON):
@@ -143,6 +155,7 @@
     _interface = 'json'
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(PortsAdminExtendedAttrsTestJSON, cls).setUpClass()
         cls.identity_client = cls._get_identity_admin_client()
@@ -180,15 +193,31 @@
 
     @test.attr(type='smoke')
     def test_list_ports_binding_ext_attr(self):
-        resp, body = self.admin_client.list_ports(
-            **{'tenant_id': self.tenant['id']})
+        # Create a new port
+        post_body = {"network_id": self.network['id']}
+        resp, body = self.admin_client.create_port(**post_body)
+        self.assertEqual('201', resp['status'])
+        port = body['port']
+        self.addCleanup(self.admin_client.delete_port, port['id'])
+
+        # Update the port's binding attributes so that is now 'bound'
+        # to a host
+        update_body = {"binding:host_id": self.host_id}
+        resp, _ = self.admin_client.update_port(port['id'], **update_body)
+        self.assertEqual('200', resp['status'])
+
+        # List all ports, ensure new port is part of list and its binding
+        # attributes are set and accurate
+        resp, body = self.admin_client.list_ports()
         self.assertEqual('200', resp['status'])
         ports_list = body['ports']
-        for port in ports_list:
-            vif_type = port['binding:vif_type']
-            self.assertIsNotNone(vif_type)
-            vif_details = port['binding:vif_details']['port_filter']
-            self.assertIsNotNone(vif_details)
+        pids_list = [p['id'] for p in ports_list]
+        self.assertIn(port['id'], pids_list)
+        listed_port = [p for p in ports_list if p['id'] == port['id']]
+        self.assertEqual(1, len(listed_port),
+                         'Multiple ports listed with id %s in ports listing: '
+                         '%s' % (port['id'], ports_list))
+        self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
 
     @test.attr(type='smoke')
     def test_show_port_binding_ext_attr(self):
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 2657031..4cc0338 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -36,6 +36,18 @@
         admin_manager = clients.AdminManager()
         cls.identity_admin_client = admin_manager.identity_client
 
+    def _cleanup_router(self, router):
+        self.delete_router(router)
+        self.routers.remove(router)
+
+    def _create_router(self, name, admin_state_up=False,
+                       external_network_id=None, enable_snat=None):
+        # associate a cleanup with created routers to avoid quota limits
+        router = self.create_router(name, admin_state_up,
+                                    external_network_id, enable_snat)
+        self.addCleanup(self._cleanup_router, router)
+        return router
+
     @test.attr(type='smoke')
     def test_create_show_list_update_delete_router(self):
         # Create a router
@@ -102,7 +114,7 @@
     def test_add_remove_router_interface_with_subnet_id(self):
         network = self.create_network()
         subnet = self.create_subnet(network)
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         # Add router interface with subnet id
         resp, interface = self.client.add_router_interface_with_subnet_id(
             router['id'], subnet['id'])
@@ -121,7 +133,7 @@
     def test_add_remove_router_interface_with_port_id(self):
         network = self.create_network()
         self.create_subnet(network)
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         resp, port_body = self.client.create_port(
             network_id=network['id'])
         # add router interface to port created above
@@ -164,7 +176,7 @@
 
     @test.attr(type='smoke')
     def test_update_router_set_gateway(self):
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         self.client.update_router(
             router['id'],
             external_gateway_info={
@@ -180,7 +192,7 @@
     @test.requires_ext(extension='ext-gw-mode', service='network')
     @test.attr(type='smoke')
     def test_update_router_set_gateway_with_snat_explicit(self):
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         self.admin_client.update_router_with_snat_gw_info(
             router['id'],
             external_gateway_info={
@@ -195,7 +207,7 @@
     @test.requires_ext(extension='ext-gw-mode', service='network')
     @test.attr(type='smoke')
     def test_update_router_set_gateway_without_snat(self):
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         self.admin_client.update_router_with_snat_gw_info(
             router['id'],
             external_gateway_info={
@@ -209,7 +221,7 @@
 
     @test.attr(type='smoke')
     def test_update_router_unset_gateway(self):
-        router = self.create_router(
+        router = self._create_router(
             data_utils.rand_name('router-'),
             external_network_id=CONF.network.public_network_id)
         self.client.update_router(router['id'], external_gateway_info={})
@@ -223,7 +235,7 @@
     @test.requires_ext(extension='ext-gw-mode', service='network')
     @test.attr(type='smoke')
     def test_update_router_reset_gateway_without_snat(self):
-        router = self.create_router(
+        router = self._create_router(
             data_utils.rand_name('router-'),
             external_network_id=CONF.network.public_network_id)
         self.admin_client.update_router_with_snat_gw_info(
@@ -244,7 +256,8 @@
         self.name = self.network['name']
         self.subnet = self.create_subnet(self.network)
         # Add router interface with subnet id
-        self.router = self.create_router(data_utils.rand_name('router-'), True)
+        self.router = self._create_router(
+            data_utils.rand_name('router-'), True)
         self.create_router_interface(self.router['id'], self.subnet['id'])
         self.addCleanup(
             self._delete_extra_routes,
@@ -259,7 +272,7 @@
 
     @test.attr(type='smoke')
     def test_update_router_admin_state(self):
-        self.router = self.create_router(data_utils.rand_name('router-'))
+        self.router = self._create_router(data_utils.rand_name('router-'))
         self.assertFalse(self.router['admin_state_up'])
         # Update router admin state
         resp, update_body = self.client.update_router(self.router['id'],
@@ -275,7 +288,7 @@
         network = self.create_network()
         subnet01 = self.create_subnet(network)
         subnet02 = self.create_subnet(network)
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         interface01 = self._add_router_interface_with_subnet_id(router['id'],
                                                                 subnet01['id'])
         self._verify_router_interface(router['id'], subnet01['id'],
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index e6ad4de..91ab9d6 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -23,6 +23,7 @@
     _interface = 'json'
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(RoutersNegativeTest, cls).setUpClass()
         if not test.is_extension_enabled('router', 'network'):
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 78bc80a..7edaaf8 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -21,7 +21,7 @@
 CONF = config.CONF
 
 
-class VPNaaSJSON(base.BaseNetworkTest):
+class VPNaaSTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
     """
@@ -37,11 +37,12 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         if not test.is_extension_enabled('vpnaas', 'network'):
             msg = "vpnaas extension not enabled."
             raise cls.skipException(msg)
-        super(VPNaaSJSON, cls).setUpClass()
+        super(VPNaaSTestJSON, cls).setUpClass()
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
         cls.router = cls.create_router(
@@ -175,3 +176,7 @@
                          ikepolicy['phase1_negotiation_mode'])
         self.assertEqual(self.ikepolicy['ike_version'],
                          ikepolicy['ike_version'])
+
+
+class VPNaaSTestXML(VPNaaSTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index a3098a5..c1f468b 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -27,6 +27,7 @@
 class AccountQuotasTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountQuotasTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -82,7 +83,8 @@
         # Set a quota of 20 bytes on the user's account before each test
         headers = {"X-Account-Meta-Quota-Bytes": "20"}
 
-        self.os.custom_account_client.request("POST", "", headers, "")
+        self.os.custom_account_client.request("POST", url="", headers=headers,
+                                              body="")
 
     def tearDown(self):
         # Set the reselleradmin auth in headers for next custom_account_client
@@ -94,12 +96,14 @@
         # remove the quota from the container
         headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
 
-        self.os.custom_account_client.request("POST", "", headers, "")
+        self.os.custom_account_client.request("POST", url="", headers=headers,
+                                              body="")
         super(AccountQuotasTest, self).tearDown()
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(AccountQuotasTest, cls).tearDownClass()
 
@@ -135,8 +139,9 @@
             )
             headers = {"X-Account-Meta-Quota-Bytes": quota}
 
-            resp, _ = self.os.custom_account_client.request("POST", "",
-                                                            headers, "")
+            resp, _ = self.os.custom_account_client.request("POST", url="",
+                                                            headers=headers,
+                                                            body="")
 
             self.assertEqual(resp["status"], "204")
             self.assertHeaders(resp, 'Account', 'POST')
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 7648ea1..4677f97 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -27,6 +27,7 @@
 class AccountQuotasNegativeTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountQuotasNegativeTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -81,7 +82,8 @@
         # Set a quota of 20 bytes on the user's account before each test
         headers = {"X-Account-Meta-Quota-Bytes": "20"}
 
-        self.os.custom_account_client.request("POST", "", headers, "")
+        self.os.custom_account_client.request("POST", url="", headers=headers,
+                                              body="")
 
     def tearDown(self):
         # Set the reselleradmin auth in headers for next custom_account_client
@@ -93,12 +95,14 @@
         # remove the quota from the container
         headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
 
-        self.os.custom_account_client.request("POST", "", headers, "")
+        self.os.custom_account_client.request("POST", url="", headers=headers,
+                                              body="")
         super(AccountQuotasNegativeTest, self).tearDown()
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(AccountQuotasNegativeTest, cls).tearDownClass()
 
@@ -120,6 +124,7 @@
                           {"Quota-Bytes": "100"})
 
     @test.attr(type=["negative", "smoke"])
+    @test.skip_because(bug="1310597")
     @test.requires_ext(extension='account_quotas', service='object')
     def test_upload_large_object(self):
         object_name = data_utils.rand_name(name="TestObject")
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 4b895d8..7fb0604 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -29,10 +29,13 @@
 
 
 class AccountTest(base.BaseObjectTest):
+
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountTest, cls).setUpClass()
-        cls.containers = []
         for i in moves.xrange(ord('a'), ord('f') + 1):
             name = data_utils.rand_name(name='%s-' % chr(i))
             cls.container_client.create_container(name)
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 6c71340..581c6d9 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -23,6 +23,7 @@
 class StaticWebTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(StaticWebTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -45,7 +46,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(StaticWebTest, cls).tearDownClass()
 
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 9bd986f..6bda83b 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -31,8 +31,10 @@
 
 
 class ContainerSyncTest(base.BaseObjectTest):
+    clients = {}
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ContainerSyncTest, cls).setUpClass()
         cls.containers = []
@@ -50,7 +52,6 @@
             int(container_sync_timeout / cls.container_sync_interval)
 
         # define container and object clients
-        cls.clients = {}
         cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
             (cls.container_client, cls.object_client)
         cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index e0d15ac..dc5585e 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -26,7 +26,11 @@
 
 class ObjectFormPostTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectFormPostTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name='TestContainer')
@@ -39,6 +43,18 @@
         cls.metadata = {'Temp-URL-Key': cls.key}
         cls.account_client.create_account_metadata(metadata=cls.metadata)
 
+    def setUp(self):
+        super(ObjectFormPostTest, self).setUp()
+
+        # make sure the metadata has been set
+        account_client_metadata, _ = \
+            self.account_client.list_account_metadata()
+        self.assertIn('x-account-meta-temp-url-key',
+                      account_client_metadata)
+        self.assertEqual(
+            account_client_metadata['x-account-meta-temp-url-key'],
+            self.key)
+
     @classmethod
     def tearDownClass(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
@@ -100,13 +116,9 @@
         headers = {'Content-Type': content_type,
                    'Content-Length': str(len(body))}
 
-        url = "%s/%s/%s" % (self.container_client.base_url,
-                            self.container_name,
-                            self.object_name)
+        url = "%s/%s" % (self.container_name, self.object_name)
 
-        # Use a raw request, otherwise authentication headers are used
-        resp, body = self.object_client.http_obj.request(url, "POST",
-                                                         body, headers=headers)
+        resp, body = self.object_client.post(url, body, headers=headers)
         self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
         self.assertHeaders(resp, "Object", "POST")
 
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index a52c248..878bf6d 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -20,12 +20,17 @@
 
 from tempest.api.object_storage import base
 from tempest.common.utils import data_utils
+from tempest import exceptions
 from tempest import test
 
 
 class ObjectFormPostNegativeTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectFormPostNegativeTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name='TestContainer')
@@ -38,6 +43,18 @@
         cls.metadata = {'Temp-URL-Key': cls.key}
         cls.account_client.create_account_metadata(metadata=cls.metadata)
 
+    def setUp(self):
+        super(ObjectFormPostNegativeTest, self).setUp()
+
+        # make sure the metadata has been set
+        account_client_metadata, _ = \
+            self.account_client.list_account_metadata()
+        self.assertIn('x-account-meta-temp-url-key',
+                      account_client_metadata)
+        self.assertEqual(
+            account_client_metadata['x-account-meta-temp-url-key'],
+            self.key)
+
     @classmethod
     def tearDownClass(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
@@ -100,12 +117,25 @@
         headers = {'Content-Type': content_type,
                    'Content-Length': str(len(body))}
 
-        url = "%s/%s/%s" % (self.container_client.base_url,
-                            self.container_name,
-                            self.object_name)
+        url = "%s/%s" % (self.container_name, self.object_name)
+        exc = self.assertRaises(
+            exceptions.Unauthorized,
+            self.object_client.post,
+            url, body, headers=headers)
+        self.assertIn('FormPost: Form Expired', str(exc))
 
-        # Use a raw request, otherwise authentication headers are used
-        resp, body = self.object_client.http_obj.request(url, "POST",
-                                                         body, headers=headers)
-        self.assertEqual(int(resp['status']), 401)
-        self.assertIn('FormPost: Form Expired', body)
+    @test.requires_ext(extension='formpost', service='object')
+    @test.attr(type='gate')
+    def test_post_object_using_form_invalid_signature(self):
+        self.key = "Wrong"
+        body, content_type = self.get_multipart_form()
+
+        headers = {'Content-Type': content_type,
+                   'Content-Length': str(len(body))}
+
+        url = "%s/%s" % (self.container_name, self.object_name)
+        exc = self.assertRaises(
+            exceptions.Unauthorized,
+            self.object_client.post,
+            url, body, headers=headers)
+        self.assertIn('FormPost: Invalid Signature', str(exc))
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 91df292..06e63a4 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -14,7 +14,10 @@
 #    under the License.
 
 import hashlib
+import random
+import re
 from six import moves
+import time
 
 from tempest.api.object_storage import base
 from tempest.common import custom_matchers
@@ -35,6 +38,29 @@
         cls.delete_containers(cls.containers)
         super(ObjectTest, cls).tearDownClass()
 
+    def _create_object(self, metadata=None):
+        # setup object
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        self.object_client.create_object(self.container_name,
+                                         object_name, data, metadata=metadata)
+
+        return object_name, data
+
+    def _upload_segments(self):
+        # create object
+        object_name = data_utils.rand_name(name='LObject')
+        data = data_utils.arbitrary_string()
+        segments = 10
+        data_segments = [data + str(i) for i in moves.xrange(segments)]
+        # uploading segments
+        for i in moves.xrange(segments):
+            resp, _ = self.object_client.create_object_segments(
+                self.container_name, object_name, i, data_segments[i])
+            self.assertEqual(resp['status'], '201')
+
+        return object_name, data_segments
+
     @test.attr(type='smoke')
     def test_create_object(self):
         # create object
@@ -64,42 +90,227 @@
         self.assertHeaders(resp, 'Object', 'DELETE')
 
     @test.attr(type='smoke')
-    def test_object_metadata(self):
-        # add metadata to storage object, test if metadata is retrievable
+    def test_update_object_metadata(self):
+        # update object metadata
+        object_name, data = self._create_object()
 
-        # create Object
-        object_name = data_utils.rand_name(name='TestObject')
-        data = data_utils.arbitrary_string()
-        resp, _ = self.object_client.create_object(self.container_name,
-                                                   object_name, data)
-        # set object metadata
-        meta_key = data_utils.rand_name(name='test-')
-        meta_value = data_utils.rand_name(name='MetaValue-')
-        orig_metadata = {meta_key: meta_value}
+        metadata = {'X-Object-Meta-test-meta': 'Meta'}
         resp, _ = self.object_client.update_object_metadata(
-            self.container_name, object_name, orig_metadata)
+            self.container_name,
+            object_name,
+            metadata,
+            metadata_prefix='')
         self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
         self.assertHeaders(resp, 'Object', 'POST')
 
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+
+    def test_update_object_metadata_with_remove_metadata(self):
+        # update object metadata with remove metadata
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        update_metadata = {'X-Remove-Object-Meta-test-meta1': 'Meta1'}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertNotIn('x-object-meta-test-meta1', resp)
+
+    @test.attr(type='smoke')
+    def test_update_object_metadata_with_create_and_remove_metadata(self):
+        # creation and deletion of metadata with one request
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        update_metadata = {'X-Object-Meta-test-meta2': 'Meta2',
+                           'X-Remove-Object-Meta-test-meta1': 'Meta1'}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertNotIn('x-object-meta-test-meta1', resp)
+        self.assertIn('x-object-meta-test-meta2', resp)
+        self.assertEqual(resp['x-object-meta-test-meta2'], 'Meta2')
+
+    @test.attr(type='smoke')
+    def test_update_object_metadata_with_x_object_manifest(self):
+        # update object metadata with x_object_manifest
+
+        # uploading segments
+        object_name, data_segments = self._upload_segments()
+        # creating a manifest file
+        data_empty = ''
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data_empty,
+                                         metadata=None)
+        object_prefix = '%s/%s' % (self.container_name, object_name)
+        update_metadata = {'X-Object-Manifest': object_prefix}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn('x-object-manifest', resp)
+        self.assertNotEqual(len(resp['x-object-manifest']), 0)
+
+    def test_update_object_metadata_with_x_object_metakey(self):
+        # update object metadata with a blenk value of metadata
+        object_name, data = self._create_object()
+
+        update_metadata = {'X-Object-Meta-test-meta': ''}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], '')
+
+    @test.attr(type='smoke')
+    def test_update_object_metadata_with_x_remove_object_metakey(self):
+        # update object metadata with a blank value of remove metadata
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        update_metadata = {'X-Remove-Object-Meta-test-meta': ''}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertNotIn('x-object-meta-test-meta', resp)
+
+    @test.attr(type='smoke')
+    def test_list_object_metadata(self):
         # get object metadata
-        resp, resp_metadata = self.object_client.list_object_metadata(
-            self.container_name, object_name)
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        metadata = {'X-Object-Meta-test-meta': 'Meta'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=metadata)
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
         self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
         self.assertHeaders(resp, 'Object', 'HEAD')
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
 
-        actual_meta_key = 'x-object-meta-' + meta_key
-        self.assertIn(actual_meta_key, resp)
-        self.assertEqual(resp[actual_meta_key], meta_value)
+    @test.attr(type='smoke')
+    def test_list_no_object_metadata(self):
+        # get empty list of object metadata
+        object_name, data = self._create_object()
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'HEAD')
+        self.assertNotIn('x-object-meta-', str(resp))
+
+    @test.attr(type='smoke')
+    def test_list_object_metadata_with_x_object_manifest(self):
+        # get object metadata with x_object_manifest
+
+        # uploading segments
+        object_name, data_segments = self._upload_segments()
+        # creating a manifest file
+        object_prefix = '%s/%s' % (self.container_name, object_name)
+        metadata = {'X-Object-Manifest': object_prefix}
+        data_empty = ''
+        resp, _ = self.object_client.create_object(
+            self.container_name,
+            object_name,
+            data_empty,
+            metadata=metadata)
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+
+        # Check only the existence of common headers with custom matcher
+        self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
+                        'Object', 'HEAD'))
+        self.assertIn('x-object-manifest', resp)
+
+        # Etag value of a large object is enclosed in double-quotations.
+        # This is a special case, therefore the formats of response headers
+        # are checked without a custom matcher.
+        self.assertTrue(resp['etag'].startswith('\"'))
+        self.assertTrue(resp['etag'].endswith('\"'))
+        self.assertTrue(resp['etag'].strip('\"').isalnum())
+        self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
+        self.assertNotEqual(len(resp['content-type']), 0)
+        self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+                                 resp['x-trans-id']))
+        self.assertNotEqual(len(resp['date']), 0)
+        self.assertEqual(resp['accept-ranges'], 'bytes')
+        self.assertEqual(resp['x-object-manifest'],
+                         '%s/%s' % (self.container_name, object_name))
 
     @test.attr(type='smoke')
     def test_get_object(self):
         # retrieve object's data (in response body)
 
         # create object
-        object_name = data_utils.rand_name(name='TestObject')
-        data = data_utils.arbitrary_string()
-        resp, _ = self.object_client.create_object(self.container_name,
-                                                   object_name, data)
+        object_name, data = self._create_object()
         # get object
         resp, body = self.object_client.get_object(self.container_name,
                                                    object_name)
@@ -109,6 +320,183 @@
         self.assertEqual(body, data)
 
     @test.attr(type='smoke')
+    def test_get_object_with_metadata(self):
+        # get object with metadata
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        metadata = {'X-Object-Meta-test-meta': 'Meta'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=metadata)
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=None)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_range(self):
+        # get object with range
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(100)
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=None)
+        rand_num = random.randint(3, len(data) - 1)
+        metadata = {'Range': 'bytes=%s-%s' % (rand_num - 3, rand_num - 1)}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data[rand_num - 3: rand_num])
+
+    @test.attr(type='smoke')
+    def test_get_object_with_x_object_manifest(self):
+        # get object with x_object_manifest
+
+        # uploading segments
+        object_name, data_segments = self._upload_segments()
+        # creating a manifest file
+        object_prefix = '%s/%s' % (self.container_name, object_name)
+        metadata = {'X-Object-Manifest': object_prefix}
+        data_empty = ''
+        resp, body = self.object_client.create_object(
+            self.container_name,
+            object_name,
+            data_empty,
+            metadata=metadata)
+
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=None)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+
+        # Check only the existence of common headers with custom matcher
+        self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
+                        'Object', 'GET'))
+        self.assertIn('x-object-manifest', resp)
+
+        # Etag value of a large object is enclosed in double-quotations.
+        # This is a special case, therefore the formats of response headers
+        # are checked without a custom matcher.
+        self.assertTrue(resp['etag'].startswith('\"'))
+        self.assertTrue(resp['etag'].endswith('\"'))
+        self.assertTrue(resp['etag'].strip('\"').isalnum())
+        self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
+        self.assertNotEqual(len(resp['content-type']), 0)
+        self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+                                 resp['x-trans-id']))
+        self.assertNotEqual(len(resp['date']), 0)
+        self.assertEqual(resp['accept-ranges'], 'bytes')
+        self.assertEqual(resp['x-object-manifest'],
+                         '%s/%s' % (self.container_name, object_name))
+
+        self.assertEqual(''.join(data_segments), body)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_match(self):
+        # get object with if_match
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(10)
+        create_md5 = hashlib.md5(data).hexdigest()
+        create_metadata = {'Etag': create_md5}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        list_metadata = {'If-Match': create_md5}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_modified_since(self):
+        # get object with if_modified_since
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        time_now = time.time()
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=None)
+
+        http_date = time.ctime(time_now - 86400)
+        list_metadata = {'If-Modified-Since': http_date}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    def test_get_object_with_if_none_match(self):
+        # get object with if_none_match
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(10)
+        create_md5 = hashlib.md5(data).hexdigest()
+        create_metadata = {'Etag': create_md5}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        list_data = data_utils.arbitrary_string(15)
+        list_md5 = hashlib.md5(list_data).hexdigest()
+        list_metadata = {'If-None-Match': list_md5}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_unmodified_since(self):
+        # get object with if_unmodified_since
+        object_name, data = self._create_object()
+
+        time_now = time.time()
+        http_date = time.ctime(time_now + 86400)
+        list_metadata = {'If-Unmodified-Since': http_date}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_x_newest(self):
+        # get object with x_newest
+        object_name, data = self._create_object()
+
+        list_metadata = {'X-Newest': 'true'}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
     def test_copy_object_in_same_container(self):
         # create source object
         src_object_name = data_utils.rand_name(name='SrcObject')
@@ -286,10 +674,7 @@
         # Make a conditional request for an object using the If-None-Match
         # header, it should get downloaded only if the local file is different,
         # otherwise the response code should be 304 Not Modified
-        object_name = data_utils.rand_name(name='TestObject')
-        data = data_utils.arbitrary_string()
-        self.object_client.create_object(self.container_name,
-                                         object_name, data)
+        object_name, data = self._create_object()
         # local copy is identical, no download
         md5 = hashlib.md5(data).hexdigest()
         headers = {'If-None-Match': md5}
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index cf24f66..7d26433 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -27,7 +27,11 @@
 
 class ObjectTempUrlNegativeTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectTempUrlNegativeTest, cls).setUpClass()
 
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 1832259..4e40de9 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -30,7 +30,7 @@
     @classmethod
     def setUpClass(cls):
         super(BaseOrchestrationTest, cls).setUpClass()
-        cls.os = clients.OrchestrationManager()
+        cls.os = clients.Manager()
         if not CONF.service_available.heat:
             raise cls.skipException("Heat support is required")
         cls.build_timeout = CONF.orchestration.build_timeout
@@ -41,6 +41,7 @@
         cls.servers_client = cls.os.servers_client
         cls.keypairs_client = cls.os.keypairs_client
         cls.network_client = cls.os.network_client
+        cls.volumes_client = cls.os.volumes_client
         cls.stacks = []
         cls.keypairs = []
 
@@ -101,9 +102,8 @@
 
     @classmethod
     def load_template(cls, name, ext='yaml'):
-        loc = ["tempest", "api", "orchestration",
-               "stacks", "templates", "%s.%s" % (name, ext)]
-        fullpath = os.path.join(*loc)
+        loc = ["stacks", "templates", "%s.%s" % (name, ext)]
+        fullpath = os.path.join(os.path.dirname(__file__), *loc)
 
         with open(fullpath, "r") as f:
             content = f.read()
@@ -120,3 +120,25 @@
         """Return a stack output value for a given key."""
         return next((o['output_value'] for o in stack['outputs']
                     if o['output_key'] == output_key), None)
+
+    def assert_fields_in_dict(self, obj, *fields):
+        for field in fields:
+            self.assertIn(field, obj)
+
+    def list_resources(self, stack_identifier):
+        """Get a dict mapping of resource names to types."""
+        resp, resources = self.client.list_resources(stack_identifier)
+        self.assertEqual('200', resp['status'])
+        self.assertIsInstance(resources, list)
+        for res in resources:
+            self.assert_fields_in_dict(res, 'logical_resource_id',
+                                       'resource_type', 'resource_status',
+                                       'updated_time')
+
+        return dict((r['resource_name'], r['resource_type'])
+                    for r in resources)
+
+    def get_stack_output(self, stack_identifier, output_key):
+        resp, body = self.client.get_stack(stack_identifier)
+        self.assertEqual('200', resp['status'])
+        return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml b/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml
index 23ad06f..fa5345e 100644
--- a/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml
+++ b/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml
@@ -11,6 +11,8 @@
     Type: String
   network:
     Type: String
+  timeout:
+    Type: Number
 Resources:
   CfnUser:
     Type: AWS::IAM::User
@@ -68,7 +70,7 @@
     DependsOn: SmokeServer
     Properties:
       Handle: {Ref: WaitHandle}
-      Timeout: '600'
+      Timeout: {Ref: timeout}
 Outputs:
   WaitConditionStatus:
     Description: Contents of /tmp/smoke-status on SmokeServer
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
new file mode 100644
index 0000000..3e03a30
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2013-05-23
+
+resources:
+    volume:
+        type: OS::Cinder::Volume
+        properties:
+            size: 1
+            description: a descriptive description
+
+outputs:
+  status:
+    description: status
+    value: { get_attr: ['volume', 'status'] }
+
+  size:
+    description: size
+    value: { get_attr: ['volume', 'size'] }
+
+  display_description:
+    description: display_description
+    value: { get_attr: ['volume', 'display_description'] }
+
+  volume_id:
+    value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
new file mode 100644
index 0000000..08e3da4
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
@@ -0,0 +1,25 @@
+heat_template_version: 2013-05-23
+
+resources:
+    volume:
+        deletion_policy: 'Retain'
+        type: OS::Cinder::Volume
+        properties:
+            size: 1
+            description: a descriptive description
+
+outputs:
+  status:
+    description: status
+    value: { get_attr: ['volume', 'status'] }
+
+  size:
+    description: size
+    value: { get_attr: ['volume', 'size'] }
+
+  display_description:
+    description: display_description
+    value: { get_attr: ['volume', 'display_description'] }
+
+  volume_id:
+    value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
index 9d90e31..63b03f4 100644
--- a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
+++ b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
@@ -12,6 +12,8 @@
     type: string
   ExternalNetworkId:
     type: string
+  timeout:
+    type: number
 resources:
   Network:
     type: OS::Neutron::Net
@@ -34,22 +36,22 @@
       admin_state_up: false
       external_gateway_info:
         network: {get_param: ExternalNetworkId}
-        enable_snat: false
   RouterInterface:
     type: OS::Neutron::RouterInterface
     properties:
       router_id: {get_param: ExternalRouterId}
       subnet_id: {get_resource: Subnet}
   Server:
-    type: AWS::EC2::Instance
+    type: OS::Nova::Server
     metadata:
       Name: SmokeServerNeutron
     properties:
-      ImageId: {get_param: ImageId}
-      InstanceType: {get_param: InstanceType}
-      KeyName: {get_param: KeyName}
-      SubnetId: {get_resource: Subnet}
-      UserData:
+      image: {get_param: ImageId}
+      flavor: {get_param: InstanceType}
+      key_name: {get_param: KeyName}
+      networks:
+      - network: {get_resource: Network}
+      user_data:
         str_replace:
           template: |
             #!/bin/bash -v
@@ -65,4 +67,4 @@
     depends_on: Server
     properties:
       Handle: {get_resource: WaitHandleNeutron}
-      Timeout: '600'
+      Timeout: {get_param: timeout}
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index 83470be..3086d78 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -53,7 +53,8 @@
                 'InstanceType': CONF.orchestration.instance_type,
                 'ImageId': CONF.orchestration.image_ref,
                 'ExternalRouterId': cls.external_router_id,
-                'ExternalNetworkId': cls.external_network_id
+                'ExternalNetworkId': cls.external_network_id,
+                'timeout': CONF.orchestration.build_timeout
             })
         cls.stack_id = cls.stack_identifier.split('/')[1]
         try:
@@ -90,7 +91,7 @@
         resources = [('Network', 'OS::Neutron::Net'),
                      ('Subnet', 'OS::Neutron::Subnet'),
                      ('RouterInterface', 'OS::Neutron::RouterInterface'),
-                     ('Server', 'AWS::EC2::Instance')]
+                     ('Server', 'OS::Nova::Server')]
         for resource_name, resource_type in resources:
             resource = self.test_resources.get(resource_name, None)
             self.assertIsInstance(resource, dict)
@@ -136,8 +137,6 @@
         self.assertEqual('NewRouter', router['name'])
         self.assertEqual(self.external_network_id,
                          router['external_gateway_info']['network_id'])
-        self.assertEqual(False,
-                         router['external_gateway_info']['enable_snat'])
         self.assertEqual(False, router['admin_state_up'])
 
     @test.attr(type='slow')
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 4b1b5ef..9ef95a1 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -40,16 +40,18 @@
         cls.resource_type = 'AWS::AutoScaling::LaunchConfiguration'
         cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
 
-    def assert_fields_in_dict(self, obj, *fields):
-        for field in fields:
-            self.assertIn(field, obj)
+    def _list_stacks(self, expected_num=None, **filter_kwargs):
+        resp, stacks = self.client.list_stacks(params=filter_kwargs)
+        self.assertEqual('200', resp['status'])
+        self.assertIsInstance(stacks, list)
+        if expected_num is not None:
+            self.assertEqual(expected_num, len(stacks))
+        return stacks
 
     @attr(type='gate')
     def test_stack_list(self):
         """Created stack should be in the list of existing stacks."""
-        resp, stacks = self.client.list_stacks()
-        self.assertEqual('200', resp['status'])
-        self.assertIsInstance(stacks, list)
+        stacks = self._list_stacks()
         stacks_names = map(lambda stack: stack['stack_name'], stacks)
         self.assertIn(self.stack_name, stacks_names)
 
@@ -89,20 +91,8 @@
     def test_list_resources(self):
         """Getting list of created resources for the stack should be possible.
         """
-        resp, resources = self.client.list_resources(self.stack_identifier)
-        self.assertEqual('200', resp['status'])
-        self.assertIsInstance(resources, list)
-        for res in resources:
-            self.assert_fields_in_dict(res, 'logical_resource_id',
-                                       'resource_type', 'resource_status',
-                                       'updated_time')
-
-        resources_names = map(lambda resource: resource['logical_resource_id'],
-                              resources)
-        self.assertIn(self.resource_name, resources_names)
-        resources_types = map(lambda resource: resource['resource_type'],
-                              resources)
-        self.assertIn(self.resource_type, resources_types)
+        resources = self.list_resources(self.stack_identifier)
+        self.assertEqual({self.resource_name: self.resource_type}, resources)
 
     @attr(type='gate')
     def test_show_resource(self):
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
index 5f65193..4b845b1 100644
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -50,7 +50,8 @@
                 'key_name': keypair_name,
                 'flavor': CONF.orchestration.instance_type,
                 'image': CONF.orchestration.image_ref,
-                'network': cls._get_default_network()['id']
+                'network': cls._get_default_network()['id'],
+                'timeout': CONF.orchestration.build_timeout
             })
 
     @test.attr(type='slow')
@@ -93,7 +94,8 @@
         try:
             self.client.wait_for_resource_status(
                 sid, 'WaitCondition', 'CREATE_COMPLETE')
-        except exceptions.TimeoutException as e:
+        except (exceptions.StackResourceBuildErrorException,
+                exceptions.TimeoutException) as e:
             # attempt to log the server console to help with debugging
             # the cause of the server not signalling the waitcondition
             # to heat.
@@ -108,14 +110,6 @@
         # wait for create to complete.
         self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
 
-        # fetch the stack
-        resp, body = self.client.get_stack(sid)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
-        # fetch the stack
-        resp, body = self.client.get_stack(sid)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
         # This is an assert of great significance, as it means the following
         # has happened:
         # - cfn-init read the provided metadata and wrote out a file
@@ -123,5 +117,5 @@
         # - a cfn-signal was built which was signed with provided credentials
         # - the wait condition was fulfilled and the stack has changed state
         wait_status = json.loads(
-            self.stack_output(body, 'WaitConditionStatus'))
+            self.get_stack_output(sid, 'WaitConditionStatus'))
         self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/orchestration/stacks/test_update.py b/tempest/api/orchestration/stacks/test_update.py
new file mode 100644
index 0000000..a9a43b6
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_update.py
@@ -0,0 +1,84 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+LOG = logging.getLogger(__name__)
+
+
+class UpdateStackTestJSON(base.BaseOrchestrationTest):
+    _interface = 'json'
+
+    template = '''
+heat_template_version: 2013-05-23
+resources:
+  random1:
+    type: OS::Heat::RandomString
+'''
+    update_template = '''
+heat_template_version: 2013-05-23
+resources:
+  random1:
+    type: OS::Heat::RandomString
+  random2:
+    type: OS::Heat::RandomString
+'''
+
+    def update_stack(self, stack_identifier, template):
+        stack_name = stack_identifier.split('/')[0]
+        resp = self.client.update_stack(
+            stack_identifier=stack_identifier,
+            name=stack_name,
+            template=template)
+        self.assertEqual('202', resp[0]['status'])
+        self.client.wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
+
+    @test.attr(type='gate')
+    def test_stack_update_nochange(self):
+        stack_name = data_utils.rand_name('heat')
+        stack_identifier = self.create_stack(stack_name, self.template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+        expected_resources = {'random1': 'OS::Heat::RandomString'}
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+
+        # Update with no changes, resources should be unchanged
+        self.update_stack(stack_identifier, self.template)
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+
+    @test.attr(type='gate')
+    @test.skip_because(bug='1308682')
+    def test_stack_update_add_remove(self):
+        stack_name = data_utils.rand_name('heat')
+        stack_identifier = self.create_stack(stack_name, self.template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+        initial_resources = {'random1': 'OS::Heat::RandomString'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        # Add one resource via a stack update
+        self.update_stack(stack_identifier, self.update_template)
+        updated_resources = {'random1': 'OS::Heat::RandomString',
+                             'random2': 'OS::Heat::RandomString'}
+        self.assertEqual(updated_resources,
+                         self.list_resources(stack_identifier))
+
+        # Then remove it by updating with the original template
+        self.update_stack(stack_identifier, self.template)
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
new file mode 100644
index 0000000..2544c41
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -0,0 +1,101 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CinderResourcesTest(base.BaseOrchestrationTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(CinderResourcesTest, cls).setUpClass()
+        if not CONF.service_available.cinder:
+            raise cls.skipException('Cinder support is required')
+
+    def _cinder_verify(self, volume_id):
+        self.assertIsNotNone(volume_id)
+        resp, volume = self.volumes_client.get_volume(volume_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual('available', volume.get('status'))
+        self.assertEqual(1, volume.get('size'))
+        self.assertEqual('a descriptive description',
+                         volume.get('display_description'))
+
+    def _outputs_verify(self, stack_identifier):
+        self.assertEqual('available',
+                         self.get_stack_output(stack_identifier, 'status'))
+        self.assertEqual('1',
+                         self.get_stack_output(stack_identifier, 'size'))
+        self.assertEqual('a descriptive description',
+                         self.get_stack_output(stack_identifier,
+                                               'display_description'))
+
+    @test.attr(type='gate')
+    def test_cinder_volume_create_delete(self):
+        """Create and delete a volume via OS::Cinder::Volume."""
+        stack_name = data_utils.rand_name('heat')
+        template = self.load_template('cinder_basic')
+        stack_identifier = self.create_stack(stack_name, template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        # Verify with cinder that the volume exists, with matching details
+        volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+        self._cinder_verify(volume_id)
+
+        # Verify the stack outputs are as expected
+        self._outputs_verify(stack_identifier)
+
+        # Delete the stack and ensure the volume is gone
+        self.client.delete_stack(stack_identifier)
+        self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+        self.assertRaises(exceptions.NotFound,
+                          self.volumes_client.get_volume,
+                          volume_id)
+
+    def _cleanup_volume(self, volume_id):
+        """Cleanup the volume direct with cinder."""
+        resp = self.volumes_client.delete_volume(volume_id)
+        self.assertEqual(202, resp[0].status)
+        self.volumes_client.wait_for_resource_deletion(volume_id)
+
+    @test.attr(type='gate')
+    def test_cinder_volume_create_delete_retain(self):
+        """Ensure the 'Retain' deletion policy is respected."""
+        stack_name = data_utils.rand_name('heat')
+        template = self.load_template('cinder_basic_delete_retain')
+        stack_identifier = self.create_stack(stack_name, template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        # Verify with cinder that the volume exists, with matching details
+        volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+        self.addCleanup(self._cleanup_volume, volume_id)
+        self._cinder_verify(volume_id)
+
+        # Verify the stack outputs are as expected
+        self._outputs_verify(stack_identifier)
+
+        # Delete the stack and ensure the volume is *not* gone
+        self.client.delete_stack(stack_identifier)
+        self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+        self._cinder_verify(volume_id)
+
+        # Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 4496f18..008f739 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -85,24 +85,6 @@
             self.volume['id'])
         self.assertEqual('error', volume_get['status'])
 
-    @test.attr(type='gate')
-    def test_volume_begin_detaching(self):
-        # test volume begin detaching : available -> detaching -> available
-        resp, body = self.client.volume_begin_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp_get, volume_get = self.client.get_volume(self.volume['id'])
-        self.assertEqual('detaching', volume_get['status'])
-
-    @test.attr(type='gate')
-    def test_volume_roll_detaching(self):
-        # test volume roll detaching : detaching -> in-use -> available
-        resp, body = self.client.volume_begin_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp, body = self.client.volume_roll_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp_get, volume_get = self.client.get_volume(self.volume['id'])
-        self.assertEqual('in-use', volume_get['status'])
-
     def test_volume_force_delete_when_volume_is_creating(self):
         # test force delete when status of volume is creating
         self._create_reset_and_force_delete_temp_volume('creating')
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index be5d76b..58da440 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -51,8 +51,7 @@
         v_name = data_utils.rand_name('Volume')
         metadata = {'Type': 'Test'}
         # Create a volume
-        resp, volume = self.client.create_volume(size=1,
-                                                 display_name=v_name,
+        resp, volume = self.client.create_volume(display_name=v_name,
                                                  metadata=metadata,
                                                  **kwargs)
         self.assertEqual(200, resp.status)
diff --git a/tempest/api_schema/compute/agents.py b/tempest/api_schema/compute/agents.py
new file mode 100644
index 0000000..b04cf64
--- /dev/null
+++ b/tempest/api_schema/compute/agents.py
@@ -0,0 +1,40 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+list_agents = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'agents': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'agent_id': {'type': ['integer', 'string']},
+                        'hypervisor': {'type': 'string'},
+                        'os': {'type': 'string'},
+                        'architecture': {'type': 'string'},
+                        'version': {'type': 'string'},
+                        'url': {'type': 'string', 'format': 'uri'},
+                        'md5hash': {'type': 'string'}
+                    },
+                    'required': ['agent_id', 'hypervisor', 'os',
+                                 'architecture', 'version', 'url', 'md5hash']
+                }
+            }
+        },
+        'required': ['agents']
+    }
+}
diff --git a/tempest/api_schema/compute/aggregates.py b/tempest/api_schema/compute/aggregates.py
index 49793fe..a3ab3c8 100644
--- a/tempest/api_schema/compute/aggregates.py
+++ b/tempest/api_schema/compute/aggregates.py
@@ -12,6 +12,26 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
+aggregate = {
+    'type': 'object',
+    'properties': {
+        'availability_zone': {'type': ['string', 'null']},
+        'created_at': {'type': 'string'},
+        'deleted': {'type': 'boolean'},
+        'deleted_at': {'type': ['string', 'null']},
+        'hosts': {'type': 'array'},
+        'id': {'type': 'integer'},
+        'metadata': {'type': 'object'},
+        'name': {'type': 'string'},
+        'updated_at': {'type': ['string', 'null']}
+    },
+    'required': ['availability_zone', 'created_at', 'deleted',
+                 'deleted_at', 'hosts', 'id', 'metadata',
+                 'name', 'updated_at']
+}
+
 list_aggregates = {
     'status_code': [200],
     'response_body': {
@@ -19,25 +39,28 @@
         'properties': {
             'aggregates': {
                 'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'availability_zone': {'type': ['string', 'null']},
-                        'created_at': {'type': 'string'},
-                        'deleted': {'type': 'boolean'},
-                        'deleted_at': {'type': ['string', 'null']},
-                        'hosts': {'type': 'array'},
-                        'id': {'type': 'integer'},
-                        'metadata': {'type': 'object'},
-                        'name': {'type': 'string'},
-                        'updated_at': {'type': ['string', 'null']}
-                    },
-                    'required': ['availability_zone', 'created_at', 'deleted',
-                                 'deleted_at', 'hosts', 'id', 'metadata',
-                                 'name', 'updated_at']
-                }
+                'items': aggregate
             }
         },
         'required': ['aggregates']
     }
 }
+
+get_aggregate = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'aggregate': aggregate
+        },
+        'required': ['aggregate']
+    }
+}
+
+aggregate_set_metadata = get_aggregate
+# The 'updated_at' attribute of 'update_aggregate' can't be null.
+update_aggregate = copy.deepcopy(get_aggregate)
+update_aggregate['response_body']['properties']['aggregate']['properties'][
+    'updated_at'] = {
+        'type': 'string'
+    }
diff --git a/tempest/api_schema/compute/availability_zone.py b/tempest/api_schema/compute/availability_zone.py
new file mode 100644
index 0000000..c1abc64
--- /dev/null
+++ b/tempest/api_schema/compute/availability_zone.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# NOTE: This is the detail information for "get az detail" API.
+# The information is the same between v2 and v3 APIs.
+detail = {
+    'type': 'object',
+    'patternProperties': {
+        # NOTE: Here is for a hostname
+        '^[a-zA-Z0-9-_.]+$': {
+            'type': 'object',
+            'patternProperties': {
+                # NOTE: Here is for a service name
+                '^.*$': {
+                    'type': 'object',
+                    'properties': {
+                        'available': {'type': 'boolean'},
+                        'active': {'type': 'boolean'},
+                        'updated_at': {'type': 'string'}
+                    },
+                    'required': ['available', 'active', 'updated_at']
+                }
+            }
+        }
+    }
+}
diff --git a/tempest/api_schema/compute/certificates.py b/tempest/api_schema/compute/certificates.py
new file mode 100644
index 0000000..caac2ab
--- /dev/null
+++ b/tempest/api_schema/compute/certificates.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+_common_schema = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'certificate': {
+                'type': 'object',
+                'properties': {
+                    'data': {'type': 'string'},
+                    'private_key': {'type': 'string'},
+                },
+                'required': ['data', 'private_key'],
+            }
+        },
+        'required': ['certificate'],
+    }
+}
+
+get_certificate = copy.deepcopy(_common_schema)
+get_certificate['response_body']['properties']['certificate'][
+    'properties']['private_key'].update({'type': 'null'})
diff --git a/tempest/api_schema/compute/flavors.py b/tempest/api_schema/compute/flavors.py
index a6367d4..aa019e4 100644
--- a/tempest/api_schema/compute/flavors.py
+++ b/tempest/api_schema/compute/flavors.py
@@ -35,3 +35,43 @@
         'required': ['flavors']
     }
 }
+
+common_flavor_info = {
+    'type': 'object',
+    'properties': {
+        'name': {'type': 'string'},
+        'links': parameter_types.links,
+        'ram': {'type': 'integer'},
+        'vcpus': {'type': 'integer'},
+        'swap': {'type': 'integer'},
+        'disk': {'type': 'integer'},
+        'id': {'type': 'string'}
+    },
+    'required': ['name', 'links', 'ram', 'vcpus',
+                 'swap', 'disk', 'id']
+}
+
+common_flavor_list_details = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'flavors': {
+                'type': 'array',
+                'items': common_flavor_info
+            }
+        },
+        'required': ['flavors']
+    }
+}
+
+common_flavor_details = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'flavor': common_flavor_info
+        },
+        'required': ['flavor']
+    }
+}
diff --git a/tempest/api_schema/compute/flavors_access.py b/tempest/api_schema/compute/flavors_access.py
index 152e24c..cd31b0a 100644
--- a/tempest/api_schema/compute/flavors_access.py
+++ b/tempest/api_schema/compute/flavors_access.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-list_flavor_access = {
+add_remove_list_flavor_access = {
     'status_code': [200],
     'response_body': {
         'type': 'object',
diff --git a/tempest/api_schema/compute/flavors_extra_specs.py b/tempest/api_schema/compute/flavors_extra_specs.py
new file mode 100644
index 0000000..4003d36
--- /dev/null
+++ b/tempest/api_schema/compute/flavors_extra_specs.py
@@ -0,0 +1,39 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+flavor_extra_specs = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'extra_specs': {
+                'type': 'object',
+                'patternProperties': {
+                    '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+                }
+            }
+        },
+        'required': ['extra_specs']
+    }
+}
+
+flavor_extra_specs_key = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'patternProperties': {
+            '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+        }
+    }
+}
diff --git a/tempest/api_schema/compute/interfaces.py b/tempest/api_schema/compute/interfaces.py
new file mode 100644
index 0000000..1e15c18
--- /dev/null
+++ b/tempest/api_schema/compute/interfaces.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_interface = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/migrations.py b/tempest/api_schema/compute/migrations.py
new file mode 100644
index 0000000..6723869
--- /dev/null
+++ b/tempest/api_schema/compute/migrations.py
@@ -0,0 +1,56 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+list_migrations = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'migrations': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        # NOTE: Now the type of 'id' is integer, but here
+                        # allows 'string' also because we will be able to
+                        # change it to 'uuid' in the future.
+                        'id': {'type': ['integer', 'string']},
+                        'status': {'type': 'string'},
+                        'instance_uuid': {'type': 'string'},
+                        'source_node': {'type': 'string'},
+                        'source_compute': {'type': 'string'},
+                        'dest_node': {'type': 'string'},
+                        'dest_compute': {'type': 'string'},
+                        'dest_host': {'type': 'string'},
+                        'old_instance_type_id': {
+                            'type': ['integer', 'string']
+                        },
+                        'new_instance_type_id': {
+                            'type': ['integer', 'string']
+                        },
+                        'created_at': {'type': 'string'},
+                        'updated_at': {'type': ['string', 'null']}
+                    },
+                    'required': [
+                        'id', 'status', 'instance_uuid', 'source_node',
+                        'source_compute', 'dest_node', 'dest_compute',
+                        'dest_host', 'old_instance_type_id',
+                        'new_instance_type_id', 'created_at', 'updated_at'
+                    ]
+                }
+            }
+        },
+        'required': ['migrations']
+    }
+}
diff --git a/tempest/api_schema/compute/parameter_types.py b/tempest/api_schema/compute/parameter_types.py
index 67c0c9b..95d5b92 100644
--- a/tempest/api_schema/compute/parameter_types.py
+++ b/tempest/api_schema/compute/parameter_types.py
@@ -26,3 +26,8 @@
         'required': ['href', 'rel']
     }
 }
+
+mac_address = {
+    'type': 'string',
+    'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+}
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
new file mode 100644
index 0000000..31cd56b
--- /dev/null
+++ b/tempest/api_schema/compute/servers.py
@@ -0,0 +1,64 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+get_password = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'password': {'type': 'string'}
+        },
+        'required': ['password']
+    }
+}
+
+get_vnc_console = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'console': {
+                'type': 'object',
+                'properties': {
+                    'type': {'type': 'string'},
+                    'url': {
+                        'type': 'string',
+                        'format': 'uri'
+                    }
+                },
+                'required': ['type', 'url']
+            }
+        },
+        'required': ['console']
+    }
+}
+
+delete_server = {
+    'status_code': [204],
+}
+
+set_server_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': {'type': 'object'}
+        },
+        'required': ['metadata']
+    }
+}
+
+list_server_metadata = copy.deepcopy(set_server_metadata)
diff --git a/tempest/api_schema/compute/services.py b/tempest/api_schema/compute/services.py
index 4793f5a..4c58013 100644
--- a/tempest/api_schema/compute/services.py
+++ b/tempest/api_schema/compute/services.py
@@ -42,3 +42,22 @@
         'required': ['services']
     }
 }
+
+enable_service = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'service': {
+                'type': 'object',
+                'properties': {
+                    'status': {'type': 'string'},
+                    'binary': {'type': 'string'},
+                    'host': {'type': 'string'}
+                },
+                'required': ['status', 'binary', 'host']
+            }
+        },
+        'required': ['service']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/agents.py b/tempest/api_schema/compute/v2/agents.py
new file mode 100644
index 0000000..837731f
--- /dev/null
+++ b/tempest/api_schema/compute/v2/agents.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_agent = {
+    'status_code': [200]
+}
diff --git a/tempest/api_schema/compute/v2/aggregates.py b/tempest/api_schema/compute/v2/aggregates.py
new file mode 100644
index 0000000..de3e12b
--- /dev/null
+++ b/tempest/api_schema/compute/v2/aggregates.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_aggregate = {
+    'status_code': [200]
+}
diff --git a/tempest/api_schema/compute/v2/availability_zone.py b/tempest/api_schema/compute/v2/availability_zone.py
new file mode 100644
index 0000000..d3d2787
--- /dev/null
+++ b/tempest/api_schema/compute/v2/availability_zone.py
@@ -0,0 +1,54 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import availability_zone as common
+
+
+base = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'availabilityZoneInfo': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'zoneName': {'type': 'string'},
+                        'zoneState': {
+                            'type': 'object',
+                            'properties': {
+                                'available': {'type': 'boolean'}
+                            },
+                            'required': ['available']
+                        },
+                        # NOTE: Here is the difference between detail and
+                        # non-detail.
+                        'hosts': {'type': 'null'}
+                    },
+                    'required': ['zoneName', 'zoneState', 'hosts']
+                }
+            }
+        },
+        'required': ['availabilityZoneInfo']
+    }
+}
+
+get_availability_zone_list = copy.deepcopy(base)
+
+get_availability_zone_list_detail = copy.deepcopy(base)
+get_availability_zone_list_detail['response_body']['properties'][
+    'availabilityZoneInfo']['items']['properties']['hosts'] = common.detail
diff --git a/tempest/api_schema/compute/v2/certificates.py b/tempest/api_schema/compute/v2/certificates.py
new file mode 100644
index 0000000..1eb38ce
--- /dev/null
+++ b/tempest/api_schema/compute/v2/certificates.py
@@ -0,0 +1,19 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
diff --git a/tempest/api_schema/compute/v2/fixed_ips.py b/tempest/api_schema/compute/v2/fixed_ips.py
index a6add04..446633f 100644
--- a/tempest/api_schema/compute/v2/fixed_ips.py
+++ b/tempest/api_schema/compute/v2/fixed_ips.py
@@ -34,3 +34,8 @@
         'required': ['fixed_ip']
     }
 }
+
+fixed_ip_action = {
+    'status_code': [202],
+    'response_body': {'type': 'string'}
+}
diff --git a/tempest/api_schema/compute/v2/flavors.py b/tempest/api_schema/compute/v2/flavors.py
new file mode 100644
index 0000000..bee6ecb
--- /dev/null
+++ b/tempest/api_schema/compute/v2/flavors.py
@@ -0,0 +1,57 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import flavors
+
+list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+list_flavors_details['response_body']['properties']['flavors']['items'][
+    'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+list_flavors_details['response_body']['properties']['flavors']['items'][
+    'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+                          'os-flavor-access:is_public': {'type': 'boolean'},
+                          'rxtx_factor': {'type': 'number'},
+                          'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+unset_flavor_extra_specs = {
+    'status_code': [200]
+}
+
+create_get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+create_get_flavor_details['response_body']['properties']['flavor'][
+    'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+create_get_flavor_details['response_body']['properties']['flavor'][
+    'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+                          'os-flavor-access:is_public': {'type': 'boolean'},
+                          'rxtx_factor': {'type': 'number'},
+                          'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+delete_flavor = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/hosts.py b/tempest/api_schema/compute/v2/hosts.py
new file mode 100644
index 0000000..cd6bd7b
--- /dev/null
+++ b/tempest/api_schema/compute/v2/hosts.py
@@ -0,0 +1,43 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+body = {
+    'type': 'object',
+    'properties': {
+        'host': {'type': 'string'},
+        'power_action': {'enum': ['startup']}
+    },
+    'required': ['host', 'power_action']
+}
+
+startup_host = {
+    'status_code': [200],
+    'response_body': body
+}
+
+# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
+shutdown_host = copy.deepcopy(startup_host)
+
+shutdown_host['response_body']['properties']['power_action'] = {
+    'enum': ['shutdown']
+}
+
+# The 'power_action' attribute of 'reboot_host' API is 'reboot'
+reboot_host = copy.deepcopy(startup_host)
+
+reboot_host['response_body']['properties']['power_action'] = {
+    'enum': ['reboot']
+}
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
index fad6b56..e97752d 100644
--- a/tempest/api_schema/compute/v2/images.py
+++ b/tempest/api_schema/compute/v2/images.py
@@ -14,43 +14,46 @@
 
 from tempest.api_schema.compute import parameter_types
 
+common_image_schema = {
+    'type': 'object',
+    'properties': {
+        'id': {'type': 'string'},
+        'status': {'type': 'string'},
+        'updated': {'type': 'string'},
+        'links': parameter_types.links,
+        'name': {'type': 'string'},
+        'created': {'type': 'string'},
+        'minDisk': {'type': 'integer'},
+        'minRam': {'type': 'integer'},
+        'progress': {'type': 'integer'},
+        'metadata': {'type': 'object'},
+        'server': {
+            'type': 'object',
+            'properties': {
+                # NOTE: Now the type of 'id' is integer, but here
+                # allows 'string' also because we will be able to
+                # change it to 'uuid' in the future.
+                'id': {'type': ['integer', 'string']},
+                'links': parameter_types.links
+            },
+            'required': ['id', 'links']
+        },
+        'OS-EXT-IMG-SIZE:size': {'type': 'integer'}
+    },
+    # 'server' attributes only comes in response body if image is
+    # associated with any server. 'OS-EXT-IMG-SIZE:size' is API
+    # extension, So those are not defined as 'required'.
+    'required': ['id', 'status', 'updated', 'links', 'name',
+                 'created', 'minDisk', 'minRam', 'progress',
+                 'metadata']
+}
+
 get_image = {
     'status_code': [200],
     'response_body': {
         'type': 'object',
         'properties': {
-            'image': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'status': {'type': 'string'},
-                    'updated': {'type': 'string'},
-                    'links': parameter_types.links,
-                    'name': {'type': 'string'},
-                    'created': {'type': 'string'},
-                    'OS-EXT-IMG-SIZE:size': {'type': 'integer'},
-                    'minDisk': {'type': 'integer'},
-                    'minRam': {'type': 'integer'},
-                    'progress': {'type': 'integer'},
-                    'metadata': {'type': 'object'},
-                    'server': {
-                        'type': 'object',
-                        'properties': {
-                            # NOTE: Now the type of 'id' is integer, but here
-                            # allows 'string' also because we will be able to
-                            # change it to 'uuid' in the future.
-                            'id': {'type': ['integer', 'string']},
-                            'links': parameter_types.links
-                        },
-                        'required': ['id', 'links']
-                    }
-                },
-                # 'server' attributes only comes in response body if image is
-                # associated with any server. So it is not 'required'
-                'required': ['id', 'status', 'updated', 'links', 'name',
-                             'created', 'OS-EXT-IMG-SIZE:size', 'minDisk',
-                             'minRam', 'progress', 'metadata']
-            }
+            'image': common_image_schema
         },
         'required': ['image']
     }
@@ -67,20 +70,7 @@
                     'type': 'object',
                     'properties': {
                         'id': {'type': 'string'},
-                        'links': {
-                            'type': 'array',
-                            'items': {
-                                'type': 'object',
-                                'properties': {
-                                    'href': {
-                                        'type': 'string',
-                                        'format': 'uri'
-                                    },
-                                    'rel': {'type': 'string'}
-                                },
-                                'required': ['href', 'rel']
-                            }
-                        },
+                        'links': parameter_types.links,
                         'name': {'type': 'string'}
                     },
                     'required': ['id', 'links', 'name']
@@ -120,3 +110,17 @@
         'required': ['meta']
     }
 }
+
+list_images_details = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'images': {
+                'type': 'array',
+                'items': common_image_schema
+            }
+        },
+        'required': ['images']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/instance_usage_audit_logs.py b/tempest/api_schema/compute/v2/instance_usage_audit_logs.py
index c1509b4..658f574 100644
--- a/tempest/api_schema/compute/v2/instance_usage_audit_logs.py
+++ b/tempest/api_schema/compute/v2/instance_usage_audit_logs.py
@@ -46,3 +46,14 @@
         'required': ['instance_usage_audit_log']
     }
 }
+
+list_instance_usage_audit_log = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'instance_usage_audit_logs': common_instance_usage_audit_log
+        },
+        'required': ['instance_usage_audit_logs']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/quotas.py b/tempest/api_schema/compute/v2/quotas.py
index d69cbd7..17dc4dd 100644
--- a/tempest/api_schema/compute/v2/quotas.py
+++ b/tempest/api_schema/compute/v2/quotas.py
@@ -45,3 +45,7 @@
         'required': ['quota_set']
     }
 }
+
+delete_quota = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/security_groups.py b/tempest/api_schema/compute/v2/security_groups.py
index 68b65b4..8b4bead 100644
--- a/tempest/api_schema/compute/v2/security_groups.py
+++ b/tempest/api_schema/compute/v2/security_groups.py
@@ -12,6 +12,49 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+common_security_group_rule = {
+    'from_port': {'type': ['integer', 'null']},
+    'to_port': {'type': ['integer', 'null']},
+    'group': {
+        'type': 'object',
+        'properties': {
+            'tenant_id': {'type': 'string'},
+            'name': {'type': 'string'}
+        }
+    },
+    'ip_protocol': {'type': ['string', 'null']},
+    # 'parent_group_id' can be UUID so defining it as 'string' also.
+    'parent_group_id': {'type': ['string', 'integer', 'null']},
+    'ip_range': {
+        'type': 'object',
+        'properties': {
+            'cidr': {'type': 'string'}
+        }
+        # When optional argument is provided in request body
+        # like 'group_id' then, attribute 'cidr' does not
+        # comes in response body. So it is not 'required'.
+    },
+    'id': {'type': ['string', 'integer']}
+}
+
+common_security_group = {
+    'type': 'object',
+    'properties': {
+        'id': {'type': ['integer', 'string']},
+        'name': {'type': 'string'},
+        'tenant_id': {'type': 'string'},
+        'rules': {
+            'type': 'array',
+            'items': {
+                'type': ['object', 'null'],
+                'properties': common_security_group_rule
+            }
+        },
+        'description': {'type': 'string'},
+    },
+    'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
+}
+
 list_security_groups = {
     'status_code': [200],
     'response_body': {
@@ -19,20 +62,40 @@
         'properties': {
             'security_groups': {
                 'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': ['integer', 'string']},
-                        'name': {'type': 'string'},
-                        'tenant_id': {'type': 'string'},
-                        'rules': {'type': 'array'},
-                        'description': {'type': 'string'},
-                    },
-                    'required': ['id', 'name', 'tenant_id', 'rules',
-                                 'description'],
-                }
+                'items': common_security_group
             }
         },
         'required': ['security_groups']
     }
 }
+
+get_security_group = create_security_group = update_security_group = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'security_group': common_security_group
+        },
+        'required': ['security_group']
+    }
+}
+
+create_security_group_rule = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'security_group_rule': {
+                'type': 'object',
+                'properties': common_security_group_rule,
+                'required': ['from_port', 'to_port', 'group', 'ip_protocol',
+                             'parent_group_id', 'id', 'ip_range']
+            }
+        },
+        'required': ['security_group_rule']
+    }
+}
+
+delete_security_group_rule = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index 4e0cec0..eed4589 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -42,3 +42,51 @@
         'required': ['server']
     }
 }
+
+list_virtual_interfaces = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'virtual_interfaces': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'id': {'type': 'string'},
+                        'mac_address': parameter_types.mac_address,
+                        'OS-EXT-VIF-NET:net_id': {'type': 'string'}
+                    },
+                    # 'OS-EXT-VIF-NET:net_id' is API extension So it is
+                    # not defined as 'required'
+                    'required': ['id', 'mac_address']
+                }
+            }
+        },
+        'required': ['virtual_interfaces']
+    }
+}
+
+attach_volume = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volumeAttachment': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string'},
+                    'device': {'type': 'string'},
+                    'volumeId': {'type': 'string'},
+                    'serverId': {'type': ['integer', 'string']}
+                },
+                'required': ['id', 'device', 'volumeId', 'serverId']
+            }
+        },
+        'required': ['volumeAttachment']
+    }
+}
+
+detach_volume = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/volumes.py b/tempest/api_schema/compute/v2/volumes.py
index 9cfd7e3..84a659c 100644
--- a/tempest/api_schema/compute/v2/volumes.py
+++ b/tempest/api_schema/compute/v2/volumes.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-get_volume = {
+create_get_volume = {
     'status_code': [200],
     'response_body': {
         'type': 'object',
@@ -108,3 +108,7 @@
         'required': ['volumes']
     }
 }
+
+delete_volume = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v3/agents.py b/tempest/api_schema/compute/v3/agents.py
new file mode 100644
index 0000000..63d1c46
--- /dev/null
+++ b/tempest/api_schema/compute/v3/agents.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_agent = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/aggregates.py b/tempest/api_schema/compute/v3/aggregates.py
new file mode 100644
index 0000000..358e455
--- /dev/null
+++ b/tempest/api_schema/compute/v3/aggregates.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_aggregate = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/availability_zone.py b/tempest/api_schema/compute/v3/availability_zone.py
new file mode 100644
index 0000000..5f36c33
--- /dev/null
+++ b/tempest/api_schema/compute/v3/availability_zone.py
@@ -0,0 +1,53 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import availability_zone as common
+
+
+base = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'availability_zone_info': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'zone_name': {'type': 'string'},
+                        'zone_state': {
+                            'type': 'object',
+                            'properties': {
+                                'available': {'type': 'boolean'}
+                            },
+                            'required': ['available']
+                        },
+                        # NOTE: Here is the difference between detail and
+                        # non-detail
+                        'hosts': {'type': 'null'}
+                    },
+                    'required': ['zone_name', 'zone_state', 'hosts']
+                }
+            }
+        },
+        'required': ['availability_zone_info']
+    }
+}
+
+get_availability_zone_list = copy.deepcopy(base)
+get_availability_zone_list_detail = copy.deepcopy(base)
+get_availability_zone_list_detail['response_body']['properties'][
+    'availability_zone_info']['items']['properties']['hosts'] = common.detail
diff --git a/tempest/api_schema/compute/v3/certificates.py b/tempest/api_schema/compute/v3/certificates.py
new file mode 100644
index 0000000..0723a16
--- /dev/null
+++ b/tempest/api_schema/compute/v3/certificates.py
@@ -0,0 +1,20 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
+create_certificate['status_code'] = [201]
diff --git a/tempest/api_schema/compute/v3/flavors.py b/tempest/api_schema/compute/v3/flavors.py
new file mode 100644
index 0000000..52010f5
--- /dev/null
+++ b/tempest/api_schema/compute/v3/flavors.py
@@ -0,0 +1,68 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import flavors
+from tempest.api_schema.compute import flavors_extra_specs
+
+list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+list_flavors_details['response_body']['properties']['flavors']['items'][
+    'properties'].update({'disabled': {'type': 'boolean'},
+                          'ephemeral': {'type': 'integer'},
+                          'flavor-access:is_public': {'type': 'boolean'},
+                          'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+list_flavors_details['response_body']['properties']['flavors']['items'][
+    'required'].extend(['disabled', 'ephemeral'])
+
+set_flavor_extra_specs = copy.deepcopy(flavors_extra_specs.flavor_extra_specs)
+set_flavor_extra_specs['status_code'] = [201]
+
+unset_flavor_extra_specs = {
+    'status_code': [204]
+}
+
+get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+get_flavor_details['response_body']['properties']['flavor'][
+    'properties'].update({'disabled': {'type': 'boolean'},
+                          'ephemeral': {'type': 'integer'},
+                          'flavor-access:is_public': {'type': 'boolean'},
+                          'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+get_flavor_details['response_body']['properties']['flavor'][
+    'required'].extend(['disabled', 'ephemeral'])
+
+
+create_flavor_details = copy.deepcopy(get_flavor_details)
+
+# Overriding the status code for create flavor V3 API.
+create_flavor_details['status_code'] = [201]
+
+delete_flavor = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/hosts.py b/tempest/api_schema/compute/v3/hosts.py
new file mode 100644
index 0000000..2cf8f9b
--- /dev/null
+++ b/tempest/api_schema/compute/v3/hosts.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+from tempest.api_schema.compute.v2 import hosts
+
+startup_host = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'host': hosts.body
+        },
+        'required': ['host']
+    }
+}
+
+# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
+shutdown_host = copy.deepcopy(startup_host)
+
+shutdown_host['response_body']['properties']['power_action'] = {
+    'enum': ['shutdown']
+}
+
+# The 'power_action' attribute of 'reboot_host' API is 'reboot'
+reboot_host = copy.deepcopy(startup_host)
+
+reboot_host['response_body']['properties']['power_action'] = {
+    'enum': ['reboot']
+}
diff --git a/tempest/api_schema/compute/v3/quotas.py b/tempest/api_schema/compute/v3/quotas.py
index 1b9989d..aec1e80 100644
--- a/tempest/api_schema/compute/v3/quotas.py
+++ b/tempest/api_schema/compute/v3/quotas.py
@@ -40,3 +40,46 @@
         'required': ['quota_set']
     }
 }
+
+quota_common_info = {
+    'type': 'object',
+    'properties': {
+        'reserved': {'type': 'integer'},
+        'limit': {'type': 'integer'},
+        'in_use': {'type': 'integer'}
+    },
+    'required': ['reserved', 'limit', 'in_use']
+}
+
+quota_set_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'quota_set': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string'},
+                    'instances': quota_common_info,
+                    'cores': quota_common_info,
+                    'ram': quota_common_info,
+                    'floating_ips': quota_common_info,
+                    'fixed_ips': quota_common_info,
+                    'metadata_items': quota_common_info,
+                    'key_pairs': quota_common_info,
+                    'security_groups': quota_common_info,
+                    'security_group_rules': quota_common_info
+                },
+                'required': ['id', 'instances', 'cores', 'ram',
+                             'floating_ips', 'fixed_ips',
+                             'metadata_items', 'key_pairs',
+                             'security_groups', 'security_group_rules']
+            }
+        },
+        'required': ['quota_set']
+    }
+}
+
+delete_quota = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 390962e..f2a4b78 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -42,3 +42,7 @@
         'required': ['server']
     }
 }
+
+attach_detach_volume = {
+    'status_code': [202]
+}
diff --git a/tempest/auth.py b/tempest/auth.py
index 5fc923f..ac8cbd1 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -43,11 +43,11 @@
         :param client_type: 'tempest' or 'official'
         :param interface: 'json' or 'xml'. Applicable for tempest client only
         """
+        credentials = self._convert_credentials(credentials)
         if self.check_credentials(credentials):
             self.credentials = credentials
         else:
             raise TypeError("Invalid credentials")
-        self.credentials = credentials
         self.client_type = client_type
         self.interface = interface
         if self.client_type == 'tempest' and self.interface is None:
@@ -56,6 +56,13 @@
         self.alt_auth_data = None
         self.alt_part = None
 
+    def _convert_credentials(self, credentials):
+        # Support dict credentials for backwards compatibility
+        if isinstance(credentials, dict):
+            return get_credentials(**credentials)
+        else:
+            return credentials
+
     def __str__(self):
         return "Creds :{creds}, client type: {client_type}, interface: " \
                "{interface}, cached auth data: {cache}".format(
@@ -76,9 +83,9 @@
     @classmethod
     def check_credentials(cls, credentials):
         """
-        Verify credentials are valid. Subclasses can do a better check.
+        Verify credentials are valid.
         """
-        return isinstance(credentials, dict)
+        return isinstance(credentials, Credentials) and credentials.is_valid()
 
     @property
     def auth_data(self):
@@ -218,16 +225,6 @@
 
     EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
 
-    @classmethod
-    def check_credentials(cls, credentials, scoped=True):
-        # tenant_name is optional if not scoped
-        valid = super(KeystoneV2AuthProvider, cls).check_credentials(
-            credentials) and 'username' in credentials and \
-            'password' in credentials
-        if scoped:
-            valid = valid and 'tenant_name' in credentials
-        return valid
-
     def _auth_client(self):
         if self.client_type == 'tempest':
             if self.interface == 'json':
@@ -240,9 +237,9 @@
     def _auth_params(self):
         if self.client_type == 'tempest':
             return dict(
-                user=self.credentials['username'],
-                password=self.credentials['password'],
-                tenant=self.credentials.get('tenant_name', None),
+                user=self.credentials.username,
+                password=self.credentials.password,
+                tenant=self.credentials.tenant_name,
                 auth_data=True)
         else:
             raise NotImplementedError
@@ -303,12 +300,15 @@
 
     EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
 
+    def _convert_credentials(self, credentials):
+        # For V3 do not convert as V3 Credentials are not defined yet
+        return credentials
+
     @classmethod
     def check_credentials(cls, credentials, scoped=True):
         # tenant_name is optional if not scoped
-        valid = super(KeystoneV3AuthProvider, cls).check_credentials(
-            credentials) and 'username' in credentials and \
-            'password' in credentials and 'domain_name' in credentials
+        valid = 'username' in credentials and 'password' in credentials \
+            and 'domain_name' in credentials
         if scoped:
             valid = valid and 'tenant_name' in credentials
         return valid
@@ -327,7 +327,7 @@
             return dict(
                 user=self.credentials['username'],
                 password=self.credentials['password'],
-                tenant=self.credentials.get('tenant_name', None),
+                tenant=self.credentials['tenant_name'],
                 domain=self.credentials['domain_name'],
                 auth_data=True)
         else:
@@ -398,3 +398,144 @@
                                             self.EXPIRY_DATE_FORMAT)
         return expiry - self.token_expiry_threshold <= \
             datetime.datetime.utcnow()
+
+
+def get_credentials(credential_type=None, **kwargs):
+    """
+    Builds a credentials object based on the configured auth_version
+
+    :param credential_type (string): requests credentials from tempest
+           configuration file. Valid values are defined in
+           Credentials.TYPE.
+    :param kwargs (dict): take into account only if credential_type is
+           not specified or None. Dict of credential key/value pairs
+
+    Examples:
+
+        Returns credentials from the provided parameters:
+        >>> get_credentials(username='foo', password='bar')
+
+        Returns credentials from tempest configuration:
+        >>> get_credentials(credential_type='user')
+    """
+    if CONF.identity.auth_version == 'v2':
+        credential_class = KeystoneV2Credentials
+    else:
+        raise exceptions.InvalidConfiguration('Unsupported auth version')
+    if credential_type is not None:
+        creds = credential_class.get_default(credential_type)
+    else:
+        creds = credential_class(**kwargs)
+    return creds
+
+
+class Credentials(object):
+    """
+    Set of credentials for accessing OpenStack services
+
+    ATTRIBUTES: list of valid class attributes representing credentials.
+
+    TYPES: types of credentials available in the configuration file.
+           For each key there's a tuple (section, prefix) to match the
+           configuration options.
+    """
+
+    ATTRIBUTES = []
+    TYPES = {
+        'identity_admin': ('identity', 'admin'),
+        'compute_admin': ('compute_admin', None),
+        'user': ('identity', None),
+        'alt_user': ('identity', 'alt')
+    }
+
+    def __init__(self, **kwargs):
+        """
+        Enforce the available attributes at init time (only).
+        Additional attributes can still be set afterwards if tests need
+        to do so.
+        """
+        self._apply_credentials(kwargs)
+
+    def _apply_credentials(self, attr):
+        for key in attr.keys():
+            if key in self.ATTRIBUTES:
+                setattr(self, key, attr[key])
+            else:
+                raise exceptions.InvalidCredentials
+
+    def __str__(self):
+        """
+        Represent only attributes included in self.ATTRIBUTES
+        """
+        _repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
+        return str(_repr)
+
+    def __eq__(self, other):
+        """
+        Credentials are equal if attributes in self.ATTRIBUTES are equal
+        """
+        return str(self) == str(other)
+
+    def __getattr__(self, key):
+        # If an attribute is set, __getattr__ is not invoked
+        # If an attribute is not set, and it is a known one, return None
+        if key in self.ATTRIBUTES:
+            return None
+        else:
+            raise AttributeError
+
+    def __delitem__(self, key):
+        # For backwards compatibility, support dict behaviour
+        if key in self.ATTRIBUTES:
+            delattr(self, key)
+        else:
+            raise AttributeError
+
+    def get(self, item, default):
+        # In this patch act as dict for backward compatibility
+        try:
+            return getattr(self, item)
+        except AttributeError:
+            return default
+
+    @classmethod
+    def get_default(cls, credentials_type):
+        if credentials_type not in cls.TYPES:
+            raise exceptions.InvalidCredentials()
+        creds = cls._get_default(credentials_type)
+        if not creds.is_valid():
+            raise exceptions.InvalidConfiguration()
+        return creds
+
+    @classmethod
+    def _get_default(cls, credentials_type):
+        raise NotImplementedError
+
+    def is_valid(self):
+        raise NotImplementedError
+
+
+class KeystoneV2Credentials(Credentials):
+
+    CONF_ATTRIBUTES = ['username', 'password', 'tenant_name']
+    ATTRIBUTES = ['user_id', 'tenant_id']
+    ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+    @classmethod
+    def _get_default(cls, credentials_type='user'):
+        params = {}
+        section, prefix = cls.TYPES[credentials_type]
+        for attr in cls.CONF_ATTRIBUTES:
+            _section = getattr(CONF, section)
+            if prefix is None:
+                params[attr] = getattr(_section, attr)
+            else:
+                params[attr] = getattr(_section, prefix + "_" + attr)
+        return KeystoneV2Credentials(**params)
+
+    def is_valid(self):
+        """
+        Minimum set of valid credentials, are username and password.
+        Tenant is optional.
+        """
+        return None not in (self.username, self.password)
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index cd819a4..36cc324 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -48,23 +48,93 @@
 
     def test_sahara_plugins_list(self):
         plugins = self.parser.listing(self.sahara('plugin-list'))
-        self.assertTableStruct(plugins, ['name', 'versions', 'title'])
+        self.assertTableStruct(plugins, [
+            'name',
+            'versions',
+            'title'
+        ])
 
     def test_sahara_plugins_show(self):
-        plugin = self.parser.listing(self.sahara('plugin-show',
-                                                 params='--name vanilla'))
-        self.assertTableStruct(plugin, ['Property', 'Value'])
+        result = self.sahara('plugin-show', params='--name vanilla')
+        plugin = self.parser.listing(result)
+        self.assertTableStruct(plugin, [
+            'Property',
+            'Value'
+        ])
 
     def test_sahara_node_group_template_list(self):
-        plugins = self.parser.listing(self.sahara('node-group-template-list'))
-        self.assertTableStruct(plugins, ['name', 'id', 'plugin_name',
-                                         'node_processes', 'description'])
+        result = self.sahara('node-group-template-list')
+        node_group_templates = self.parser.listing(result)
+        self.assertTableStruct(node_group_templates, [
+            'name',
+            'id',
+            'plugin_name',
+            'node_processes',
+            'description'
+        ])
 
     def test_sahara_cluster_template_list(self):
-        plugins = self.parser.listing(self.sahara('cluster-template-list'))
-        self.assertTableStruct(plugins, ['name', 'id', 'plugin_name',
-                                         'node_groups', 'description'])
+        result = self.sahara('cluster-template-list')
+        cluster_templates = self.parser.listing(result)
+        self.assertTableStruct(cluster_templates, [
+            'name',
+            'id',
+            'plugin_name',
+            'node_groups',
+            'description'
+        ])
 
     def test_sahara_cluster_list(self):
-        plugins = self.parser.listing(self.sahara('cluster-list'))
-        self.assertTableStruct(plugins, ['name', 'id', 'status', 'node_count'])
+        result = self.sahara('cluster-list')
+        clusters = self.parser.listing(result)
+        self.assertTableStruct(clusters, [
+            'name',
+            'id',
+            'status',
+            'node_count'
+        ])
+
+    def test_sahara_data_source_list(self):
+        result = self.sahara('data-source-list')
+        data_sources = self.parser.listing(result)
+        self.assertTableStruct(data_sources, [
+            'name',
+            'id',
+            'type',
+            'description'
+        ])
+
+    def test_sahara_job_binary_data_list(self):
+        result = self.sahara('job-binary-data-list')
+        job_binary_data_list = self.parser.listing(result)
+        self.assertTableStruct(job_binary_data_list, [
+            'id',
+            'name'
+        ])
+
+    def test_sahara_job_binary_list(self):
+        result = self.sahara('job-binary-list')
+        job_binaries = self.parser.listing(result)
+        self.assertTableStruct(job_binaries, [
+            'id',
+            'name',
+            'description'
+        ])
+
+    def test_sahara_job_template_list(self):
+        result = self.sahara('job-template-list')
+        job_templates = self.parser.listing(result)
+        self.assertTableStruct(job_templates, [
+            'id',
+            'name',
+            'description'
+        ])
+
+    def test_sahara_job_list(self):
+        result = self.sahara('job-list')
+        jobs = self.parser.listing(result)
+        self.assertTableStruct(jobs, [
+            'id',
+            'cluster_id',
+            'status'
+        ])
diff --git a/tempest/clients.py b/tempest/clients.py
index 7ebd983..2fe4c95 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,15 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-# Default client libs
-import cinderclient.client
-import glanceclient
-import heatclient.client
 import keystoneclient.exceptions
 import keystoneclient.v2_0.client
-import neutronclient.v2_0.client
-import novaclient.client
-import swiftclient
 
 from tempest.common.rest_client import NegativeRestClient
 from tempest import config
@@ -30,6 +23,8 @@
 from tempest.openstack.common import log as logging
 from tempest.services.baremetal.v1.client_json import BaremetalClientJSON
 from tempest.services import botoclients
+from tempest.services.compute.json.agents_client import \
+    AgentsClientJSON
 from tempest.services.compute.json.aggregates_client import \
     AggregatesClientJSON
 from tempest.services.compute.json.availability_zone_client import \
@@ -52,6 +47,8 @@
     InterfacesClientJSON
 from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
 from tempest.services.compute.json.limits_client import LimitsClientJSON
+from tempest.services.compute.json.migrations_client import \
+    MigrationsClientJSON
 from tempest.services.compute.json.quotas_client import QuotasClientJSON
 from tempest.services.compute.json.security_groups_client import \
     SecurityGroupsClientJSON
@@ -78,6 +75,8 @@
     InterfacesV3ClientJSON
 from tempest.services.compute.v3.json.keypairs_client import \
     KeyPairsV3ClientJSON
+from tempest.services.compute.v3.json.migration_client import \
+    MigrationsV3ClientJSON
 from tempest.services.compute.v3.json.quotas_client import \
     QuotasV3ClientJSON
 from tempest.services.compute.v3.json.servers_client import \
@@ -325,6 +324,8 @@
             self.tenant_usages_client = TenantUsagesClientJSON(
                 self.auth_provider)
             self.version_v3_client = VersionV3ClientJSON(self.auth_provider)
+            self.migrations_v3_client = MigrationsV3ClientJSON(
+                self.auth_provider)
             self.policy_client = PolicyClientJSON(self.auth_provider)
             self.hosts_client = HostsClientJSON(self.auth_provider)
             self.hypervisor_v3_client = HypervisorV3ClientJSON(
@@ -366,6 +367,7 @@
 
         # common clients
         self.account_client = AccountClient(self.auth_provider)
+        self.agents_client = AgentsClientJSON(self.auth_provider)
         if CONF.service_available.glance:
             self.image_client = ImageClientJSON(self.auth_provider)
             self.image_client_v2 = ImageClientV2JSON(self.auth_provider)
@@ -381,6 +383,7 @@
             AccountClientCustomizedHeader(self.auth_provider)
         self.data_processing_client = DataProcessingClient(
             self.auth_provider)
+        self.migrations_client = MigrationsClientJSON(self.auth_provider)
 
 
 class AltManager(Manager):
@@ -429,24 +432,6 @@
                       service=service)
 
 
-class OrchestrationManager(Manager):
-    """
-    Manager object that uses the admin credentials for its
-    so that heat templates can create users
-    """
-    def __init__(self, interface='json', service=None):
-        base = super(OrchestrationManager, self)
-        # heat currently needs an admin user so that stacks can create users
-        # however the tests need the demo tenant so that the neutron
-        # private network is the default. DO NOT change this auth combination
-        # until heat can run with the demo user.
-        base.__init__(CONF.identity.admin_username,
-                      CONF.identity.admin_password,
-                      CONF.identity.tenant_name,
-                      interface=interface,
-                      service=service)
-
-
 class OfficialClientManager(manager.Manager):
     """
     Manager that provides access to the official python clients for
@@ -456,6 +441,8 @@
     NOVACLIENT_VERSION = '2'
     CINDERCLIENT_VERSION = '1'
     HEATCLIENT_VERSION = '1'
+    IRONICCLIENT_VERSION = '1'
+    SAHARACLIENT_VERSION = '1.1'
 
     def __init__(self, username, password, tenant_name):
         # FIXME(andreaf) Auth provider for client_type 'official' is
@@ -465,6 +452,7 @@
         # super cares for credentials validation
         super(OfficialClientManager, self).__init__(
             username=username, password=password, tenant_name=tenant_name)
+        self.baremetal_client = self._get_baremetal_client()
         self.compute_client = self._get_compute_client(username,
                                                        password,
                                                        tenant_name)
@@ -484,11 +472,34 @@
             username,
             password,
             tenant_name)
+        self.data_processing_client = self._get_data_processing_client(
+            username,
+            password,
+            tenant_name)
+
+    def _get_roles(self):
+        keystone_admin = self._get_identity_client(
+            CONF.identity.admin_username,
+            CONF.identity.admin_password,
+            CONF.identity.admin_tenant_name)
+
+        username = self.credentials['username']
+        tenant_name = self.credentials['tenant_name']
+        user_id = keystone_admin.users.find(name=username).id
+        tenant_id = keystone_admin.tenants.find(name=tenant_name).id
+
+        roles = keystone_admin.roles.roles_for_user(
+            user=user_id, tenant=tenant_id)
+
+        return [r.name for r in roles]
 
     def _get_compute_client(self, username, password, tenant_name):
         # Novaclient will not execute operations for anyone but the
         # identified user, so a new client needs to be created for
         # each user that operations need to be performed for.
+        if not CONF.service_available.nova:
+            return None
+        import novaclient.client
         self._validate_credentials(username, password, tenant_name)
 
         auth_url = CONF.identity.uri
@@ -510,6 +521,9 @@
                                         http_log_debug=True)
 
     def _get_image_client(self):
+        if not CONF.service_available.glance:
+            return None
+        import glanceclient
         token = self.identity_client.auth_token
         region = CONF.identity.region
         endpoint_type = CONF.image.endpoint_type
@@ -521,9 +535,13 @@
                                    insecure=dscv)
 
     def _get_volume_client(self, username, password, tenant_name):
+        if not CONF.service_available.cinder:
+            return None
+        import cinderclient.client
         auth_url = CONF.identity.uri
         region = CONF.identity.region
         endpoint_type = CONF.volume.endpoint_type
+        dscv = CONF.identity.disable_ssl_certificate_validation
         return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
                                           username,
                                           password,
@@ -531,9 +549,13 @@
                                           auth_url,
                                           region_name=region,
                                           endpoint_type=endpoint_type,
+                                          insecure=dscv,
                                           http_log_debug=True)
 
     def _get_object_storage_client(self, username, password, tenant_name):
+        if not CONF.service_available.swift:
+            return None
+        import swiftclient
         auth_url = CONF.identity.uri
         # add current tenant to swift operator role group.
         keystone_admin = self._get_identity_client(
@@ -563,6 +585,9 @@
 
     def _get_orchestration_client(self, username=None, password=None,
                                   tenant_name=None):
+        if not CONF.service_available.heat:
+            return None
+        import heatclient.client
         if not username:
             username = CONF.identity.admin_username
         if not password:
@@ -606,6 +631,37 @@
                                                  auth_url=auth_url,
                                                  insecure=dscv)
 
+    def _get_baremetal_client(self):
+        # ironic client is currently intended to by used by admin users
+        if not CONF.service_available.ironic:
+            return None
+        import ironicclient.client
+        roles = self._get_roles()
+        if CONF.identity.admin_role not in roles:
+            return None
+
+        auth_url = CONF.identity.uri
+        api_version = self.IRONICCLIENT_VERSION
+        insecure = CONF.identity.disable_ssl_certificate_validation
+        service_type = CONF.baremetal.catalog_type
+        endpoint_type = CONF.baremetal.endpoint_type
+        creds = {
+            'os_username': self.credentials['username'],
+            'os_password': self.credentials['password'],
+            'os_tenant_name': self.credentials['tenant_name']
+        }
+
+        try:
+            return ironicclient.client.get_client(
+                api_version=api_version,
+                os_auth_url=auth_url,
+                insecure=insecure,
+                os_service_type=service_type,
+                os_endpoint_type=endpoint_type,
+                **creds)
+        except keystoneclient.exceptions.EndpointNotFound:
+            return None
+
     def _get_network_client(self):
         # The intended configuration is for the network client to have
         # admin privileges and indicate for whom resources are being
@@ -613,6 +669,9 @@
         # preferable to authenticating as a specific user because
         # working with certain resources (public routers and networks)
         # often requires admin privileges anyway.
+        if not CONF.service_available.neutron:
+            return None
+        import neutronclient.v2_0.client
         username = CONF.identity.admin_username
         password = CONF.identity.admin_password
         tenant_name = CONF.identity.admin_tenant_name
@@ -629,3 +688,25 @@
                                                 endpoint_type=endpoint_type,
                                                 auth_url=auth_url,
                                                 insecure=dscv)
+
+    def _get_data_processing_client(self, username, password, tenant_name):
+        if not CONF.service_available.sahara:
+            # Sahara isn't available
+            return None
+
+        import saharaclient.client
+
+        self._validate_credentials(username, password, tenant_name)
+
+        endpoint_type = CONF.data_processing.endpoint_type
+        catalog_type = CONF.data_processing.catalog_type
+        auth_url = CONF.identity.uri
+
+        client = saharaclient.client.Client(self.SAHARACLIENT_VERSION,
+                                            username, password,
+                                            project_name=tenant_name,
+                                            endpoint_type=endpoint_type,
+                                            service_type=catalog_type,
+                                            auth_url=auth_url)
+
+        return client
diff --git a/tempest/cmd/__init__.py b/tempest/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cmd/__init__.py
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
new file mode 100755
index 0000000..4d74f53
--- /dev/null
+++ b/tempest/cmd/verify_tempest_config.py
@@ -0,0 +1,346 @@
+#!/usr/bin/env python
+
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import argparse
+import json
+import os
+import sys
+import urlparse
+
+import httplib2
+from six.moves import configparser
+
+from tempest import clients
+from tempest import config
+
+
+CONF = config.CONF
+RAW_HTTP = httplib2.Http()
+CONF_FILE = None
+OUTFILE = sys.stdout
+
+
+def _get_config_file():
+    default_config_dir = os.path.join(os.path.abspath(
+        os.path.dirname(os.path.dirname(__file__))), "etc")
+    default_config_file = "tempest.conf"
+
+    conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
+    conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
+    path = os.path.join(conf_dir, conf_file)
+    fd = open(path, 'rw')
+    return fd
+
+
+def change_option(option, group, value):
+    config_parse = configparser.SafeConfigParser()
+    config_parse.optionxform = str
+    config_parse.readfp(CONF_FILE)
+    if not config_parse.has_section(group):
+        config_parse.add_section(group)
+    config_parse.set(group, option, str(value))
+    global OUTFILE
+    config_parse.write(OUTFILE)
+
+
+def print_and_or_update(option, group, value, update):
+    print('Config option %s in group %s should be changed to: %s'
+          % (option, group, value))
+    if update:
+        change_option(option, group, value)
+
+
+def verify_glance_api_versions(os, update):
+    # Check glance api versions
+    __, versions = os.image_client.get_versions()
+    if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
+                                             versions):
+        print_and_or_update('api_v1', 'image_feature_enabled',
+                            not CONF.image_feature_enabled.api_v1, update)
+    if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'image_feature_enabled',
+                            not CONF.image_feature_enabled.api_v2, update)
+
+
+def _get_api_versions(os, service):
+    client_dict = {
+        'nova': os.servers_client,
+        'keystone': os.identity_client,
+        'cinder': os.volumes_client,
+    }
+    client_dict[service].skip_path()
+    endpoint_parts = urlparse.urlparse(client_dict[service].base_url)
+    endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
+    __, body = RAW_HTTP.request(endpoint, 'GET')
+    client_dict[service].reset_path()
+    body = json.loads(body)
+    if service == 'keystone':
+        versions = map(lambda x: x['id'], body['versions']['values'])
+    else:
+        versions = map(lambda x: x['id'], body['versions'])
+    return versions
+
+
+def verify_keystone_api_versions(os, update):
+    # Check keystone api versions
+    versions = _get_api_versions(os, 'keystone')
+    if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'identity_feature_enabled',
+                            not CONF.identity_feature_enabled.api_v2, update)
+    if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
+        print_and_or_update('api_v3', 'identity_feature_enabled',
+                            not CONF.identity_feature_enabled.api_v3, update)
+
+
+def verify_nova_api_versions(os, update):
+    versions = _get_api_versions(os, 'nova')
+    if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
+        print_and_or_update('api_v3', 'compute_feature_enabled',
+                            not CONF.compute_feature_enabled.api_v3, update)
+
+
+def verify_cinder_api_versions(os, update):
+    # Check cinder api versions
+    versions = _get_api_versions(os, 'cinder')
+    if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
+        print_and_or_update('api_v1', 'volume_feature_enabled',
+                            not CONF.volume_feature_enabled.api_v1, update)
+    if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'volume_feature_enabled',
+                            not CONF.volume_feature_enabled.api_v2, update)
+
+
+def get_extension_client(os, service):
+    extensions_client = {
+        'nova': os.extensions_client,
+        'nova_v3': os.extensions_v3_client,
+        'cinder': os.volumes_extension_client,
+        'neutron': os.network_client,
+        'swift': os.account_client,
+    }
+    if service not in extensions_client:
+        print('No tempest extensions client for %s' % service)
+        exit(1)
+    return extensions_client[service]
+
+
+def get_enabled_extensions(service):
+    extensions_options = {
+        'nova': CONF.compute_feature_enabled.api_extensions,
+        'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
+        'cinder': CONF.volume_feature_enabled.api_extensions,
+        'neutron': CONF.network_feature_enabled.api_extensions,
+        'swift': CONF.object_storage_feature_enabled.discoverable_apis,
+    }
+    if service not in extensions_options:
+        print('No supported extensions list option for %s' % service)
+        exit(1)
+    return extensions_options[service]
+
+
+def verify_extensions(os, service, results):
+    extensions_client = get_extension_client(os, service)
+    __, resp = extensions_client.list_extensions()
+    if isinstance(resp, dict):
+        # Neutron's extension 'name' field has is not a single word (it has
+        # spaces in the string) Since that can't be used for list option the
+        # api_extension option in the network-feature-enabled group uses alias
+        # instead of name.
+        if service == 'neutron':
+            extensions = map(lambda x: x['alias'], resp['extensions'])
+        elif service == 'swift':
+            # Remove Swift general information from extensions list
+            resp.pop('swift')
+            extensions = resp.keys()
+        else:
+            extensions = map(lambda x: x['name'], resp['extensions'])
+
+    else:
+        extensions = map(lambda x: x['name'], resp)
+    if not results.get(service):
+        results[service] = {}
+    extensions_opt = get_enabled_extensions(service)
+    if extensions_opt[0] == 'all':
+        results[service]['extensions'] = extensions
+        return results
+    # Verify that all configured extensions are actually enabled
+    for extension in extensions_opt:
+        results[service][extension] = extension in extensions
+    # Verify that there aren't additional extensions enabled that aren't
+    # specified in the config list
+    for extension in extensions:
+        if extension not in extensions_opt:
+            results[service][extension] = False
+    return results
+
+
+def display_results(results, update, replace):
+    update_dict = {
+        'swift': 'object-storage-feature-enabled',
+        'nova': 'compute-feature-enabled',
+        'nova_v3': 'compute-feature-enabled',
+        'cinder': 'volume-feature-enabled',
+        'neutron': 'network-feature-enabled',
+    }
+    for service in results:
+        # If all extensions are specified as being enabled there is no way to
+        # verify this so we just assume this to be true
+        if results[service].get('extensions'):
+            if replace:
+                output_list = results[service].get('extensions')
+            else:
+                output_list = ['all']
+        else:
+            extension_list = get_enabled_extensions(service)
+            output_list = []
+            for extension in results[service]:
+                if not results[service][extension]:
+                    if extension in extension_list:
+                        print("%s extension: %s should not be included in the "
+                              "list of enabled extensions" % (service,
+                                                              extension))
+                    else:
+                        print("%s extension: %s should be included in the list"
+                              " of enabled extensions" % (service, extension))
+                        output_list.append(extension)
+                else:
+                    output_list.append(extension)
+        if update:
+            # Sort List
+            output_list.sort()
+            # Convert list to a string
+            output_string = ', '.join(output_list)
+            if service == 'swift':
+                change_option('discoverable_apis', update_dict[service],
+                              output_string)
+            elif service == 'nova_v3':
+                change_option('api_v3_extensions', update_dict[service],
+                              output_string)
+            else:
+                change_option('api_extensions', update_dict[service],
+                              output_string)
+
+
+def check_service_availability(os, update):
+    services = []
+    avail_services = []
+    codename_match = {
+        'volume': 'cinder',
+        'network': 'neutron',
+        'image': 'glance',
+        'object_storage': 'swift',
+        'compute': 'nova',
+        'orchestration': 'heat',
+        'metering': 'ceilometer',
+        'telemetry': 'ceilometer',
+        'data_processing': 'sahara',
+        'baremetal': 'ironic',
+        'identity': 'keystone',
+        'queuing': 'marconi',
+        'database': 'trove'
+    }
+    # Get catalog list for endpoints to use for validation
+    __, endpoints = os.endpoints_client.list_endpoints()
+    for endpoint in endpoints:
+        __, service = os.service_client.get_service(endpoint['service_id'])
+        services.append(service['type'])
+    # Pull all catalog types from config file and compare against endpoint list
+    for cfgname in dir(CONF._config):
+        cfg = getattr(CONF, cfgname)
+        catalog_type = getattr(cfg, 'catalog_type', None)
+        if not catalog_type:
+            continue
+        else:
+            if cfgname == 'identity':
+                # Keystone is a required service for tempest
+                continue
+            if catalog_type not in services:
+                if getattr(CONF.service_available, codename_match[cfgname]):
+                    print('Endpoint type %s not found either disable service '
+                          '%s or fix the catalog_type in the config file' % (
+                          catalog_type, codename_match[cfgname]))
+                    if update:
+                        change_option(codename_match[cfgname],
+                                      'service_available', False)
+            else:
+                if not getattr(CONF.service_available,
+                               codename_match[cfgname]):
+                    print('Endpoint type %s is available, service %s should be'
+                          ' set as available in the config file.' % (
+                          catalog_type, codename_match[cfgname]))
+                    if update:
+                        change_option(codename_match[cfgname],
+                                      'service_available', True)
+                else:
+                    avail_services.append(codename_match[cfgname])
+    return avail_services
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-u', '--update', action='store_true',
+                        help='Update the config file with results from api '
+                             'queries. This assumes whatever is set in the '
+                             'config file is incorrect. In the case of '
+                             'endpoint checks where it could either be the '
+                             'incorrect catalog type or the service available '
+                             'option the service available option is assumed '
+                             'to be incorrect and is thus changed')
+    parser.add_argument('-o', '--output',
+                        help="Output file to write an updated config file to. "
+                             "This has to be a separate file from the "
+                             "original config file. If one isn't specified "
+                             "with -u the new config file will be printed to "
+                             "STDOUT")
+    parser.add_argument('-r', '--replace-ext', action='store_true',
+                        help="If specified the all option will be replaced "
+                             "with a full list of extensions")
+    args = parser.parse_args()
+    return args
+
+
+def main():
+    print('Running config verification...')
+    opts = parse_args()
+    update = opts.update
+    replace = opts.replace_ext
+    global CONF_FILE
+    global OUTFILE
+    if update:
+        CONF_FILE = _get_config_file()
+        if opts.output:
+            OUTFILE = open(opts.output, 'w+')
+    os = clients.ComputeAdminManager(interface='json')
+    services = check_service_availability(os, update)
+    results = {}
+    for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
+        if service == 'nova_v3' and 'nova' not in services:
+            continue
+        elif service not in services:
+            continue
+        results = verify_extensions(os, service, results)
+    verify_keystone_api_versions(os, update)
+    verify_glance_api_versions(os, update)
+    verify_nova_api_versions(os, update)
+    verify_cinder_api_versions(os, update)
+    display_results(results, update, replace)
+    if CONF_FILE:
+        CONF_FILE.close()
+    OUTFILE.close()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tempest/common/debug.py b/tempest/common/debug.py
index 6a496c2..228be7a 100644
--- a/tempest/common/debug.py
+++ b/tempest/common/debug.py
@@ -20,7 +20,7 @@
 CONF = config.CONF
 LOG = logging.getLogger(__name__)
 
-tables = ['filter', 'nat', 'mangle']
+TABLES = ['filter', 'nat', 'mangle']
 
 
 def log_ip_ns():
@@ -28,14 +28,14 @@
         return
     LOG.info("Host Addr:\n" + commands.ip_addr_raw())
     LOG.info("Host Route:\n" + commands.ip_route_raw())
-    for table in ['filter', 'nat', 'mangle']:
+    for table in TABLES:
         LOG.info('Host %s table:\n%s', table, commands.iptables_raw(table))
     ns_list = commands.ip_ns_list()
     LOG.info("Host ns list" + str(ns_list))
     for ns in ns_list:
         LOG.info("ns(%s) Addr:\n%s", ns, commands.ip_ns_addr(ns))
         LOG.info("ns(%s) Route:\n%s", ns, commands.ip_ns_route(ns))
-        for table in ['filter', 'nat', 'mangle']:
+        for table in TABLES:
             LOG.info('ns(%s) table(%s):\n%s', ns, table,
                      commands.iptables_ns(ns, table))
 
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 95d50e2..57b98f7 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -87,12 +87,6 @@
         "additionalProperties": False,
     }
 
-    def __new__(cls, *args, **kwargs):
-        if not cls._instance:
-            cls._instance = super(BasicGeneratorSet, cls).__new__(cls, *args,
-                                                                  **kwargs)
-        return cls._instance
-
     def __init__(self):
         self.types_dict = {}
         for m in dir(self):
@@ -129,7 +123,7 @@
                 raise Exception("non-integer list types not supported")
         result = []
         if schema_type not in self.types_dict:
-            raise Exception("generator (%s) doesn't support type: %s"
+            raise TypeError("generator (%s) doesn't support type: %s"
                             % (self.__class__.__name__, schema_type))
         for generator in self.types_dict[schema_type]:
             ret = generator(schema)
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index b4ba933..9358851 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -19,6 +19,7 @@
 import hashlib
 import httplib
 import json
+import OpenSSL
 import posixpath
 import re
 from six import moves
@@ -27,14 +28,6 @@
 import struct
 import urlparse
 
-
-# Python 2.5 compat fix
-if not hasattr(urlparse, 'parse_qsl'):
-    import cgi
-    urlparse.parse_qsl = cgi.parse_qsl
-
-import OpenSSL
-
 from tempest import exceptions as exc
 from tempest.openstack.common import log as logging
 
@@ -50,7 +43,7 @@
         self.auth_provider = auth_provider
         self.filters = filters
         self.endpoint = auth_provider.base_url(filters)
-        endpoint_parts = self.parse_endpoint(self.endpoint)
+        endpoint_parts = urlparse.urlparse(self.endpoint)
         self.endpoint_scheme = endpoint_parts.scheme
         self.endpoint_hostname = endpoint_parts.hostname
         self.endpoint_port = endpoint_parts.port
@@ -61,10 +54,6 @@
             self.endpoint_scheme, **kwargs)
 
     @staticmethod
-    def parse_endpoint(endpoint):
-        return urlparse.urlparse(endpoint)
-
-    @staticmethod
     def get_connection_class(scheme):
         if scheme == 'https':
             return VerifiedHTTPSConnection
@@ -107,7 +96,7 @@
         conn = self.get_connection()
 
         try:
-            url_parts = self.parse_endpoint(url)
+            url_parts = urlparse.urlparse(url)
             conn_url = posixpath.normpath(url_parts.path)
             LOG.debug('Actual Path: {path}'.format(path=conn_url))
             if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
@@ -134,7 +123,6 @@
             raise exc.TimeoutException(message)
 
         body_iter = ResponseBodyIterator(resp)
-
         # Read body into string if it isn't obviously image data
         if resp.getheader('content-type', None) != 'application/octet-stream':
             body_str = ''.join([body_chunk for body_chunk in body_iter])
@@ -178,7 +166,7 @@
 
         resp, body_iter = self._http_request(url, method, **kwargs)
 
-        if 'application/json' in resp.getheader('content-type', None):
+        if 'application/json' in resp.getheader('content-type', ''):
             body = ''.join([chunk for chunk in body_iter])
             try:
                 body = json.loads(body)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 43907d9..9745cb4 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -197,26 +197,26 @@
                 details = pattern.format(read_code, expected_code)
                 raise exceptions.InvalidHttpSuccessCode(details)
 
-    def post(self, url, body, headers=None):
-        return self.request('POST', url, headers, body)
+    def post(self, url, body, headers=None, extra_headers=False):
+        return self.request('POST', url, extra_headers, headers, body)
 
-    def get(self, url, headers=None):
-        return self.request('GET', url, headers)
+    def get(self, url, headers=None, extra_headers=False):
+        return self.request('GET', url, extra_headers, headers)
 
-    def delete(self, url, headers=None, body=None):
-        return self.request('DELETE', url, headers, body)
+    def delete(self, url, headers=None, body=None, extra_headers=False):
+        return self.request('DELETE', url, extra_headers, headers, body)
 
-    def patch(self, url, body, headers=None):
-        return self.request('PATCH', url, headers, body)
+    def patch(self, url, body, headers=None, extra_headers=False):
+        return self.request('PATCH', url, extra_headers, headers, body)
 
-    def put(self, url, body, headers=None):
-        return self.request('PUT', url, headers, body)
+    def put(self, url, body, headers=None, extra_headers=False):
+        return self.request('PUT', url, extra_headers, headers, body)
 
-    def head(self, url, headers=None):
-        return self.request('HEAD', url, headers)
+    def head(self, url, headers=None, extra_headers=False):
+        return self.request('HEAD', url, extra_headers, headers)
 
-    def copy(self, url, headers=None):
-        return self.request('COPY', url, headers)
+    def copy(self, url, headers=None, extra_headers=False):
+        return self.request('COPY', url, extra_headers, headers)
 
     def get_versions(self):
         resp, body = self.get('')
@@ -420,13 +420,22 @@
 
         return resp, resp_body
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
+        # if extra_headers is True
+        # default headers would be added to headers
         retry = 0
 
         if headers is None:
             # NOTE(vponomaryov): if some client do not need headers,
             # it should explicitly pass empty dict
             headers = self.get_headers()
+        elif extra_headers:
+            try:
+                headers = headers.copy()
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = self.get_headers()
 
         resp, resp_body = self._request(method, url,
                                         headers=headers, body=body)
@@ -483,7 +492,7 @@
             raise exceptions.InvalidContentType(str(resp.status))
 
         if resp.status == 401 or resp.status == 403:
-            raise exceptions.Unauthorized()
+            raise exceptions.Unauthorized(resp_body)
 
         if resp.status == 404:
             raise exceptions.NotFound(resp_body)
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 00e5e0d..95b6833 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -112,3 +112,8 @@
     def turn_nic_on(self, nic):
         cmd = "sudo /bin/ip link set {nic} up".format(nic=nic)
         return self.exec_command(cmd)
+
+    def get_pids(self, pr_name):
+        # Get pid(s) of a process/program
+        cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name
+        return self.exec_command(cmd).split('\n')
diff --git a/tempest/config.py b/tempest/config.py
index 0212d8a..7084768 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -126,7 +126,7 @@
                      "OpenStack Identity API admin credentials are known."),
     cfg.StrOpt('image_ref',
                default="{$IMAGE_ID}",
-               help="Valid secondary image reference to be used in tests."),
+               help="Valid primary image reference to be used in tests."),
     cfg.StrOpt('image_ref_alt',
                default="{$IMAGE_ID_ALT}",
                help="Valid secondary image reference to be used in tests."),
@@ -159,6 +159,19 @@
     cfg.BoolOpt('run_ssh',
                 default=False,
                 help="Should the tests ssh to instances?"),
+    cfg.StrOpt('ssh_auth_method',
+               default='keypair',
+               help="Auth method used for authenticate to the instance. "
+                    "Valid choices are: keypair, configured, adminpass. "
+                    "keypair: start the servers with an ssh keypair. "
+                    "configured: use the configured user and password. "
+                    "adminpass: use the injected adminPass. "
+                    "disabled: avoid using ssh when it is an option."),
+    cfg.StrOpt('ssh_connect_method',
+               default='fixed',
+               help="How to connect to the instance? "
+                    "fixed: using the first ip belongs the fixed network "
+                    "floating: creating and using a floating ip"),
     cfg.StrOpt('ssh_user',
                default='root',
                help="User name used to authenticate to an instance."),
@@ -441,6 +454,9 @@
     cfg.StrOpt('disk_format',
                default='raw',
                help='Disk format to use when copying a volume to image'),
+    cfg.IntOpt('volume_size',
+               default=1,
+               help='Default size in GB for volumes created by volumes tests'),
 ]
 
 volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
@@ -556,7 +572,7 @@
                default=1,
                help="Time in seconds between build status checks."),
     cfg.IntOpt('build_timeout',
-               default=600,
+               default=1200,
                help="Timeout in seconds to wait for a stack to build."),
     cfg.StrOpt('instance_type',
                default='m1.micro',
@@ -844,13 +860,29 @@
 BaremetalGroup = [
     cfg.StrOpt('catalog_type',
                default='baremetal',
-               help="Catalog type of the baremetal provisioning service."),
+               help="Catalog type of the baremetal provisioning service"),
+    cfg.BoolOpt('driver_enabled',
+                default=False,
+                help="Whether the Ironic nova-compute driver is enabled"),
     cfg.StrOpt('endpoint_type',
                default='publicURL',
                choices=['public', 'admin', 'internal',
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the baremetal provisioning "
-                    "service."),
+                    "service"),
+    cfg.IntOpt('active_timeout',
+               default=300,
+               help="Timeout for Ironic node to completely provision"),
+    cfg.IntOpt('association_timeout',
+               default=10,
+               help="Timeout for association of Nova instance and Ironic "
+                    "node"),
+    cfg.IntOpt('power_timeout',
+               default=20,
+               help="Timeout for Ironic power transitions."),
+    cfg.IntOpt('unprovision_timeout',
+               default=20,
+               help="Timeout for unprovisioning an Ironic node.")
 ]
 
 cli_group = cfg.OptGroup(name='cli', title="cli Configuration Options")
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
new file mode 100644
index 0000000..857e1e8
--- /dev/null
+++ b/tempest/exceptions.py
@@ -0,0 +1,209 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+
+class TempestException(Exception):
+    """
+    Base Tempest Exception
+
+    To correctly use this class, inherit from it and define
+    a 'message' property. That message will get printf'd
+    with the keyword arguments provided to the constructor.
+    """
+    message = "An unknown exception occurred"
+
+    def __init__(self, *args, **kwargs):
+        super(TempestException, self).__init__()
+        try:
+            self._error_string = self.message % kwargs
+        except Exception:
+            # at least get the core message out if something happened
+            self._error_string = self.message
+        if len(args) > 0:
+            # If there is a non-kwarg parameter, assume it's the error
+            # message or reason description and tack it on to the end
+            # of the exception message
+            # Convert all arguments into their string representations...
+            args = ["%s" % arg for arg in args]
+            self._error_string = (self._error_string +
+                                  "\nDetails: %s" % '\n'.join(args))
+
+    def __str__(self):
+        return self._error_string
+
+
+class RestClientException(TempestException,
+                          testtools.TestCase.failureException):
+    pass
+
+
+class RFCViolation(RestClientException):
+    message = "RFC Violation"
+
+
+class InvalidConfiguration(TempestException):
+    message = "Invalid Configuration"
+
+
+class InvalidCredentials(TempestException):
+    message = "Invalid Credentials"
+
+
+class InvalidHttpSuccessCode(RestClientException):
+    message = "The success code is different than the expected one"
+
+
+class NotFound(RestClientException):
+    message = "Object not found"
+
+
+class Unauthorized(RestClientException):
+    message = 'Unauthorized'
+
+
+class InvalidServiceTag(RestClientException):
+    message = "Invalid service tag"
+
+
+class TimeoutException(TempestException):
+    message = "Request timed out"
+
+
+class BuildErrorException(TempestException):
+    message = "Server %(server_id)s failed to build and is in ERROR status"
+
+
+class ImageKilledException(TempestException):
+    message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
+
+
+class AddImageException(TempestException):
+    message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
+
+
+class EC2RegisterImageException(TempestException):
+    message = ("Image %(image_id)s failed to become 'available' "
+               "in the allotted time")
+
+
+class VolumeBuildErrorException(TempestException):
+    message = "Volume %(volume_id)s failed to build and is in ERROR status"
+
+
+class SnapshotBuildErrorException(TempestException):
+    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
+
+
+class VolumeBackupException(TempestException):
+    message = "Volume backup %(backup_id)s failed and is in ERROR status"
+
+
+class StackBuildErrorException(TempestException):
+    message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
+               "due to '%(stack_status_reason)s'")
+
+
+class StackResourceBuildErrorException(TempestException):
+    message = ("Resource %(resource_name) in stack %(stack_identifier)s is "
+               "in %(resource_status)s status due to "
+               "'%(resource_status_reason)s'")
+
+
+class BadRequest(RestClientException):
+    message = "Bad request"
+
+
+class UnprocessableEntity(RestClientException):
+    message = "Unprocessable entity"
+
+
+class AuthenticationFailure(RestClientException):
+    message = ("Authentication with user %(user)s and password "
+               "%(password)s failed auth using tenant %(tenant)s.")
+
+
+class EndpointNotFound(TempestException):
+    message = "Endpoint not found"
+
+
+class RateLimitExceeded(TempestException):
+    message = "Rate limit exceeded"
+
+
+class OverLimit(TempestException):
+    message = "Quota exceeded"
+
+
+class ServerFault(TempestException):
+    message = "Got server fault"
+
+
+class ImageFault(TempestException):
+    message = "Got image fault"
+
+
+class IdentityError(TempestException):
+    message = "Got identity error"
+
+
+class Conflict(RestClientException):
+    message = "An object with that identifier already exists"
+
+
+class SSHTimeout(TempestException):
+    message = ("Connection to the %(host)s via SSH timed out.\n"
+               "User: %(user)s, Password: %(password)s")
+
+
+class SSHExecCommandFailed(TempestException):
+    """Raised when remotely executed command returns nonzero status."""
+    message = ("Command '%(command)s', exit status: %(exit_status)d, "
+               "Error:\n%(strerror)s")
+
+
+class ServerUnreachable(TempestException):
+    message = "The server is not reachable via the configured network"
+
+
+class TearDownException(TempestException):
+    message = "%(num)d cleanUp operation failed"
+
+
+class ResponseWithNonEmptyBody(RFCViolation):
+    message = ("RFC Violation! Response with %(status)d HTTP Status Code "
+               "MUST NOT have a body")
+
+
+class ResponseWithEntity(RFCViolation):
+    message = ("RFC Violation! Response with 205 HTTP Status Code "
+               "MUST NOT have an entity")
+
+
+class InvalidHTTPResponseBody(RestClientException):
+    message = "HTTP response body is invalid json or xml"
+
+
+class InvalidContentType(RestClientException):
+    message = "Invalid content type provided"
+
+
+class UnexpectedResponseCode(RestClientException):
+    message = "Unexpected response code received"
+
+
+class InvalidStructure(TempestException):
+    message = "Invalid structure of table with details"
diff --git a/tempest/exceptions/README.rst b/tempest/exceptions/README.rst
deleted file mode 100644
index dbe42b2..0000000
--- a/tempest/exceptions/README.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-Tempest Field Guide to Exceptions
-=================================
-
-
-What are these exceptions?
---------------------------
-
-These exceptions are used by Tempest for covering OpenStack specific exceptional
-cases.
-
-How to add new exceptions?
---------------------------
-
-Each exception-template for inheritance purposes should be added into 'base'
-submodule.
-All other exceptions can be added in two ways:
-- in main module
-- in submodule
-But only in one of the ways. Need to make sure, that new exception is not
-present already.
-
-How to use exceptions?
-----------------------
-
-Any exceptions from this module or its submodules should be used in appropriate
-places to handle exceptional cases.
-Classes from 'base' module should be used only for inheritance.
diff --git a/tempest/exceptions/__init__.py b/tempest/exceptions/__init__.py
deleted file mode 100644
index d313def..0000000
--- a/tempest/exceptions/__init__.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.exceptions import base
-
-
-class InvalidConfiguration(base.TempestException):
-    message = "Invalid Configuration"
-
-
-class InvalidCredentials(base.TempestException):
-    message = "Invalid Credentials"
-
-
-class InvalidHttpSuccessCode(base.RestClientException):
-    message = "The success code is different than the expected one"
-
-
-class NotFound(base.RestClientException):
-    message = "Object not found"
-
-
-class Unauthorized(base.RestClientException):
-    message = 'Unauthorized'
-
-
-class InvalidServiceTag(base.RestClientException):
-    message = "Invalid service tag"
-
-
-class TimeoutException(base.TempestException):
-    message = "Request timed out"
-
-
-class BuildErrorException(base.TempestException):
-    message = "Server %(server_id)s failed to build and is in ERROR status"
-
-
-class ImageKilledException(base.TempestException):
-    message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
-
-
-class AddImageException(base.TempestException):
-    message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
-
-
-class EC2RegisterImageException(base.TempestException):
-    message = ("Image %(image_id)s failed to become 'available' "
-               "in the allotted time")
-
-
-class VolumeBuildErrorException(base.TempestException):
-    message = "Volume %(volume_id)s failed to build and is in ERROR status"
-
-
-class SnapshotBuildErrorException(base.TempestException):
-    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
-
-
-class VolumeBackupException(base.TempestException):
-    message = "Volume backup %(backup_id)s failed and is in ERROR status"
-
-
-class StackBuildErrorException(base.TempestException):
-    message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
-               "due to '%(stack_status_reason)s'")
-
-
-class BadRequest(base.RestClientException):
-    message = "Bad request"
-
-
-class UnprocessableEntity(base.RestClientException):
-    message = "Unprocessable entity"
-
-
-class AuthenticationFailure(base.RestClientException):
-    message = ("Authentication with user %(user)s and password "
-               "%(password)s failed auth using tenant %(tenant)s.")
-
-
-class EndpointNotFound(base.TempestException):
-    message = "Endpoint not found"
-
-
-class RateLimitExceeded(base.TempestException):
-    message = "Rate limit exceeded"
-
-
-class OverLimit(base.TempestException):
-    message = "Quota exceeded"
-
-
-class ServerFault(base.TempestException):
-    message = "Got server fault"
-
-
-class ImageFault(base.TempestException):
-    message = "Got image fault"
-
-
-class IdentityError(base.TempestException):
-    message = "Got identity error"
-
-
-class Conflict(base.RestClientException):
-    message = "An object with that identifier already exists"
-
-
-class SSHTimeout(base.TempestException):
-    message = ("Connection to the %(host)s via SSH timed out.\n"
-               "User: %(user)s, Password: %(password)s")
-
-
-class SSHExecCommandFailed(base.TempestException):
-    """Raised when remotely executed command returns nonzero status."""
-    message = ("Command '%(command)s', exit status: %(exit_status)d, "
-               "Error:\n%(strerror)s")
-
-
-class ServerUnreachable(base.TempestException):
-    message = "The server is not reachable via the configured network"
-
-
-class TearDownException(base.TempestException):
-    message = "%(num)d cleanUp operation failed"
-
-
-class ResponseWithNonEmptyBody(base.RFCViolation):
-    message = ("RFC Violation! Response with %(status)d HTTP Status Code "
-               "MUST NOT have a body")
-
-
-class ResponseWithEntity(base.RFCViolation):
-    message = ("RFC Violation! Response with 205 HTTP Status Code "
-               "MUST NOT have an entity")
-
-
-class InvalidHTTPResponseBody(base.RestClientException):
-    message = "HTTP response body is invalid json or xml"
-
-
-class InvalidContentType(base.RestClientException):
-    message = "Invalid content type provided"
-
-
-class UnexpectedResponseCode(base.RestClientException):
-    message = "Unexpected response code received"
-
-
-class InvalidStructure(base.TempestException):
-    message = "Invalid structure of table with details"
diff --git a/tempest/exceptions/base.py b/tempest/exceptions/base.py
deleted file mode 100644
index b8e470e..0000000
--- a/tempest/exceptions/base.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testtools
-
-
-class TempestException(Exception):
-    """
-    Base Tempest Exception
-
-    To correctly use this class, inherit from it and define
-    a 'message' property. That message will get printf'd
-    with the keyword arguments provided to the constructor.
-    """
-    message = "An unknown exception occurred"
-
-    def __init__(self, *args, **kwargs):
-        super(TempestException, self).__init__()
-        try:
-            self._error_string = self.message % kwargs
-        except Exception:
-            # at least get the core message out if something happened
-            self._error_string = self.message
-        if len(args) > 0:
-            # If there is a non-kwarg parameter, assume it's the error
-            # message or reason description and tack it on to the end
-            # of the exception message
-            # Convert all arguments into their string representations...
-            args = ["%s" % arg for arg in args]
-            self._error_string = (self._error_string +
-                                  "\nDetails: %s" % '\n'.join(args))
-
-    def __str__(self):
-        return self._error_string
-
-
-class RestClientException(TempestException,
-                          testtools.TestCase.failureException):
-    pass
-
-
-class RFCViolation(RestClientException):
-    message = "RFC Violation"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 234faad..f297f22 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -12,6 +12,7 @@
 #   License for the specific language governing permissions and limitations
 #   under the License.
 
+import os
 import re
 
 
@@ -22,7 +23,7 @@
 PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
 TEST_DEFINITION = re.compile(r'^\s*def test.*')
 SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
-SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
 VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
 
 
@@ -47,7 +48,7 @@
     T104: Scenario tests require a services decorator
     """
 
-    if 'tempest/scenario' in filename:
+    if 'tempest/scenario/test_' in filename:
         if TEST_DEFINITION.match(physical_line):
             if not SCENARIO_DECORATOR.match(previous_logical):
                 return (physical_line.find('def'),
@@ -75,8 +76,32 @@
             return 0, "T106: Don't put vi configuration in source files"
 
 
+def service_tags_not_in_module_path(physical_line, filename):
+    """Check that a service tag isn't in the module path
+
+    A service tag should only be added if the service name isn't already in
+    the module path.
+
+    T107
+    """
+    # NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
+    # created for services like heat which would cause false negatives for
+    # those tests, so just exclude the scenario tests.
+    if 'tempest/scenario' not in filename:
+        matches = SCENARIO_DECORATOR.match(physical_line)
+        if matches:
+            services = matches.group(1).split(',')
+            for service in services:
+                service_name = service.strip().strip("'")
+                modulepath = os.path.split(filename)[0]
+                if service_name in modulepath:
+                    return (physical_line.find(service_name),
+                            "T107: service tag should not be in path")
+
+
 def factory(register):
     register(import_no_clients_in_api)
     register(scenario_tests_need_service_tags)
     register(no_setupclass_for_unit_tests)
     register(no_vi_headers)
+    register(service_tags_not_in_module_path)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index f06a850..1e7ddb1 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -71,11 +71,13 @@
             username, password, tenant_name)
         cls.compute_client = cls.manager.compute_client
         cls.image_client = cls.manager.image_client
+        cls.baremetal_client = cls.manager.baremetal_client
         cls.identity_client = cls.manager.identity_client
         cls.network_client = cls.manager.network_client
         cls.volume_client = cls.manager.volume_client
         cls.object_storage_client = cls.manager.object_storage_client
         cls.orchestration_client = cls.manager.orchestration_client
+        cls.data_processing_client = cls.manager.data_processing_client
         cls.resource_keys = {}
         cls.os_resources = []
 
@@ -283,7 +285,7 @@
         return rules
 
     def create_server(self, client=None, name=None, image=None, flavor=None,
-                      create_kwargs={}):
+                      wait=True, create_kwargs={}):
         if client is None:
             client = self.compute_client
         if name is None:
@@ -318,7 +320,8 @@
         server = client.servers.create(name, image, flavor, **create_kwargs)
         self.assertEqual(server.name, name)
         self.set_resource(name, server)
-        self.status_timeout(client.servers, server.id, 'ACTIVE')
+        if wait:
+            self.status_timeout(client.servers, server.id, 'ACTIVE')
         # The instance retrieved on creation is missing network
         # details, necessitating retrieval after it becomes active to
         # ensure correct details.
@@ -439,6 +442,82 @@
         LOG.debug("image:%s" % self.image)
 
 
+class BaremetalScenarioTest(OfficialClientTest):
+    @classmethod
+    def setUpClass(cls):
+        super(BaremetalScenarioTest, cls).setUpClass()
+
+        if (not CONF.service_available.ironic or
+           not CONF.baremetal.driver_enabled):
+            msg = 'Ironic not available or Ironic compute driver not enabled'
+            raise cls.skipException(msg)
+
+        # use an admin client manager for baremetal client
+        username, password, tenant = cls.admin_credentials()
+        manager = clients.OfficialClientManager(username, password, tenant)
+        cls.baremetal_client = manager.baremetal_client
+
+        # allow any issues obtaining the node list to raise early
+        cls.baremetal_client.node.list()
+
+    def _node_state_timeout(self, node_id, state_attr,
+                            target_states, timeout=10, interval=1):
+        if not isinstance(target_states, list):
+            target_states = [target_states]
+
+        def check_state():
+            node = self.get_node(node_id=node_id)
+            if getattr(node, state_attr) in target_states:
+                return True
+            return False
+
+        if not tempest.test.call_until_true(
+            check_state, timeout, interval):
+            msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
+                   (node_id, state_attr, target_states))
+            raise exceptions.TimeoutException(msg)
+
+    def wait_provisioning_state(self, node_id, state, timeout):
+        self._node_state_timeout(
+            node_id=node_id, state_attr='provision_state',
+            target_states=state, timeout=timeout)
+
+    def wait_power_state(self, node_id, state):
+        self._node_state_timeout(
+            node_id=node_id, state_attr='power_state',
+            target_states=state, timeout=CONF.baremetal.power_timeout)
+
+    def wait_node(self, instance_id):
+        """Waits for a node to be associated with instance_id."""
+        from ironicclient import exc as ironic_exceptions
+
+        def _get_node():
+            node = None
+            try:
+                node = self.get_node(instance_id=instance_id)
+            except ironic_exceptions.HTTPNotFound:
+                pass
+            return node is not None
+
+        if not tempest.test.call_until_true(
+            _get_node, CONF.baremetal.association_timeout, 1):
+            msg = ('Timed out waiting to get Ironic node by instance id %s'
+                   % instance_id)
+            raise exceptions.TimeoutException(msg)
+
+    def get_node(self, node_id=None, instance_id=None):
+        if node_id:
+            return self.baremetal_client.node.get(node_id)
+        elif instance_id:
+            return self.baremetal_client.node.get_by_instance_uuid(instance_id)
+
+    def get_ports(self, node_id):
+        ports = []
+        for port in self.baremetal_client.node.list_ports(node_id):
+            ports.append(self.baremetal_client.port.get(port.uuid))
+        return ports
+
+
 class NetworkScenarioTest(OfficialClientTest):
     """
     Base class for network scenario tests
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
new file mode 100644
index 0000000..c53aa83
--- /dev/null
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -0,0 +1,147 @@
+#
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+# power/provision states as of icehouse
+class PowerStates(object):
+    """Possible power states of an Ironic node."""
+    POWER_ON = 'power on'
+    POWER_OFF = 'power off'
+    REBOOT = 'rebooting'
+    SUSPEND = 'suspended'
+
+
+class ProvisionStates(object):
+    """Possible provision states of an Ironic node."""
+    NOSTATE = None
+    INIT = 'initializing'
+    ACTIVE = 'active'
+    BUILDING = 'building'
+    DEPLOYWAIT = 'wait call-back'
+    DEPLOYING = 'deploying'
+    DEPLOYFAIL = 'deploy failed'
+    DEPLOYDONE = 'deploy complete'
+    DELETING = 'deleting'
+    DELETED = 'deleted'
+    ERROR = 'error'
+
+
+class BaremetalBasicOptsPXESSH(manager.BaremetalScenarioTest):
+    """
+    This smoke test tests the pxe_ssh Ironic driver.  It follows this basic
+    set of operations:
+        * Creates a keypair
+        * Boots an instance using the keypair
+        * Monitors the associated Ironic node for power and
+          expected state transitions
+        * Validates Ironic node's driver_info has been properly
+          updated
+        * Validates Ironic node's port data has been properly updated
+        * Verifies SSH connectivity using created keypair via fixed IP
+        * Associates a floating ip
+        * Verifies SSH connectivity using created keypair via floating IP
+        * Deletes instance
+        * Monitors the associated Ironic node for power and
+          expected state transitions
+    """
+    def add_keypair(self):
+        self.keypair = self.create_keypair()
+
+    def add_floating_ip(self):
+        floating_ip = self.compute_client.floating_ips.create()
+        self.instance.add_floating_ip(floating_ip)
+        return floating_ip.ip
+
+    def verify_connectivity(self, ip=None):
+        if ip:
+            dest = self.get_remote_client(ip)
+        else:
+            dest = self.get_remote_client(self.instance)
+        dest.validate_authentication()
+
+    def validate_driver_info(self):
+        f_id = self.instance.flavor['id']
+        flavor_extra = self.compute_client.flavors.get(f_id).get_keys()
+        driver_info = self.node.driver_info
+        self.assertEqual(driver_info['pxe_deploy_kernel'],
+                         flavor_extra['baremetal:deploy_kernel_id'])
+        self.assertEqual(driver_info['pxe_deploy_ramdisk'],
+                         flavor_extra['baremetal:deploy_ramdisk_id'])
+        self.assertEqual(driver_info['pxe_image_source'],
+                         self.instance.image['id'])
+
+    def validate_ports(self):
+        for port in self.get_ports(self.node.uuid):
+            n_port_id = port.extra['vif_port_id']
+            n_port = self.network_client.show_port(n_port_id)['port']
+            self.assertEqual(n_port['device_id'], self.instance.id)
+            self.assertEqual(n_port['mac_address'], port.address)
+
+    def boot_instance(self):
+        create_kwargs = {
+            'key_name': self.keypair.id
+        }
+        self.instance = self.create_server(
+            wait=False, create_kwargs=create_kwargs)
+
+        self.set_resource('instance', self.instance)
+
+        self.wait_node(self.instance.id)
+        self.node = self.get_node(instance_id=self.instance.id)
+
+        self.wait_power_state(self.node.uuid, PowerStates.POWER_ON)
+
+        self.wait_provisioning_state(
+            self.node.uuid,
+            [ProvisionStates.DEPLOYWAIT, ProvisionStates.ACTIVE],
+            timeout=15)
+
+        self.wait_provisioning_state(self.node.uuid, ProvisionStates.ACTIVE,
+                                     timeout=CONF.baremetal.active_timeout)
+
+        self.status_timeout(
+            self.compute_client.servers, self.instance.id, 'ACTIVE')
+
+        self.node = self.get_node(instance_id=self.instance.id)
+        self.instance = self.compute_client.servers.get(self.instance.id)
+
+    def terminate_instance(self):
+        self.instance.delete()
+        self.remove_resource('instance')
+        self.wait_power_state(self.node.uuid, PowerStates.POWER_OFF)
+        self.wait_provisioning_state(
+            self.node.uuid,
+            ProvisionStates.NOSTATE,
+            timeout=CONF.baremetal.unprovision_timeout)
+
+    @test.services('baremetal', 'compute', 'image', 'network')
+    def test_baremetal_server_ops(self):
+        self.add_keypair()
+        self.boot_instance()
+        self.validate_driver_info()
+        self.validate_ports()
+        self.verify_connectivity()
+        floating_ip = self.add_floating_ip()
+        self.verify_connectivity(ip=floating_ip)
+        self.terminate_instance()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index b7a30f8..0210c56 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -31,7 +31,7 @@
     Test large operations.
 
     This test below:
-    * Spin up multiple instances in one nova call
+    * Spin up multiple instances in one nova call, and repeat three times
     * as a regular user
     * TODO: same thing for cinder
 
@@ -69,3 +69,5 @@
             return
         self.glance_image_create()
         self.nova_boot()
+        self.nova_boot()
+        self.nova_boot()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index f7a3d6f..d771aed 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -17,7 +17,6 @@
 import urllib
 
 from tempest.api.network import common as net_common
-from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
 from tempest.scenario import manager
@@ -59,24 +58,45 @@
     def setUpClass(cls):
         super(TestLoadBalancerBasic, cls).setUpClass()
         cls.check_preconditions()
-        cls.security_groups = {}
         cls.servers_keypairs = {}
         cls.members = []
         cls.floating_ips = {}
-        cls.server_ip = None
-        cls.vip_ip = None
+        cls.server_ips = {}
         cls.port1 = 80
         cls.port2 = 88
 
-    def _create_security_groups(self):
-        self.security_groups[self.tenant_id] =\
-            self._create_security_group_neutron(tenant_id=self.tenant_id)
+    def setUp(self):
+        super(TestLoadBalancerBasic, self).setUp()
+        self.server_ips = {}
+        self._create_security_group()
 
-    def _create_server(self):
-        tenant_id = self.tenant_id
-        name = data_utils.rand_name("smoke_server-")
+    def cleanup_wrapper(self, resource):
+        self.cleanup_resource(resource, self.__class__.__name__)
+
+    def _create_security_group(self):
+        self.security_group = self._create_security_group_neutron(
+            tenant_id=self.tenant_id)
+        self._create_security_group_rules_for_port(self.port1)
+        self._create_security_group_rules_for_port(self.port2)
+        self.addCleanup(self.cleanup_wrapper, self.security_group)
+
+    def _create_security_group_rules_for_port(self, port):
+        rule = {
+            'direction': 'ingress',
+            'protocol': 'tcp',
+            'port_range_min': port,
+            'port_range_max': port,
+        }
+        self._create_security_group_rule(
+            client=self.network_client,
+            secgroup=self.security_group,
+            tenant_id=self.tenant_id,
+            **rule)
+
+    def _create_server(self, name):
         keypair = self.create_keypair(name='keypair-%s' % name)
-        security_groups = [self.security_groups[tenant_id].name]
+        self.addCleanup(self.cleanup_wrapper, keypair)
+        security_groups = [self.security_group.name]
         net = self._list_networks(tenant_id=self.tenant_id)[0]
         create_kwargs = {
             'nics': [
@@ -87,51 +107,106 @@
         }
         server = self.create_server(name=name,
                                     create_kwargs=create_kwargs)
-        self.servers_keypairs[server] = keypair
+        self.addCleanup(self.cleanup_wrapper, server)
+        self.servers_keypairs[server.id] = keypair
         if (config.network.public_network_id and not
                 config.network.tenant_networks_reachable):
             public_network_id = config.network.public_network_id
             floating_ip = self._create_floating_ip(
                 server, public_network_id)
+            self.addCleanup(self.cleanup_wrapper, floating_ip)
             self.floating_ips[floating_ip] = server
-            self.server_ip = floating_ip.floating_ip_address
+            self.server_ips[server.id] = floating_ip.floating_ip_address
         else:
-            self.server_ip = server.networks[net['name']][0]
+            self.server_ips[server.id] = server.networks[net['name']][0]
         self.assertTrue(self.servers_keypairs)
         return server
 
-    def _start_servers(self, server):
+    def _create_servers(self):
+        for count in range(2):
+            self._create_server(name=("server%s" % (count + 1)))
+        self.assertEqual(len(self.servers_keypairs), 2)
+
+    def _start_servers(self):
         """
+        Start two backends
+
         1. SSH to the instance
         2. Start two http backends listening on ports 80 and 88 respectively
+        In case there are two instances, each backend is created on a separate
+        instance.
+
+        The backends are the inetd services. To start them we need to edit
+        /etc/inetd.conf in the following way:
+        www stream tcp nowait root /bin/sh sh /home/cirros/script_name
+
+        Where /home/cirros/script_name is a path to a script which
+        echoes the responses:
+        echo -e 'HTTP/1.0 200 OK\r\n\r\nserver_name
+
+        If we want the server to listen on port 88, then we use
+        "kerberos" instead of "www".
         """
 
-        private_key = self.servers_keypairs[server].private_key
-        ssh_client = self.get_remote_client(
-            server_or_ip=self.server_ip,
-            private_key=private_key).ssh_client
-        start_server = "while true; do echo -e 'HTTP/1.0 200 OK\r\n\r\n" \
-                       "%(server)s' | sudo nc -l -p %(port)s ; done &"
-        cmd = start_server % {'server': 'server1',
-                              'port': self.port1}
-        ssh_client.exec_command(cmd)
-        cmd = start_server % {'server': 'server2',
-                              'port': self.port2}
-        ssh_client.exec_command(cmd)
+        for server_id, ip in self.server_ips.iteritems():
+            private_key = self.servers_keypairs[server_id].private_key
+            server_name = self.compute_client.servers.get(server_id).name
+            ssh_client = self.get_remote_client(
+                server_or_ip=ip,
+                private_key=private_key)
+            ssh_client.validate_authentication()
+            # Create service for inetd
+            create_script = """sudo sh -c "echo -e \\"echo -e 'HTTP/1.0 """ \
+                            """200 OK\\\\\\r\\\\\\n\\\\\\r\\\\\\n""" \
+                            """%(server)s'\\" >>/home/cirros/%(script)s\""""
 
-    def _check_connection(self, check_ip):
-        def try_connect(ip):
+            cmd = create_script % {
+                'server': server_name,
+                'script': 'script1'}
+            ssh_client.exec_command(cmd)
+            # Configure inetd
+            configure_inetd = """sudo sh -c "echo -e \\"%(service)s """ \
+                              """stream tcp nowait root /bin/sh sh """ \
+                              """/home/cirros/%(script)s\\" >> """ \
+                              """/etc/inetd.conf\""""
+            # "www" stands for port 80
+            cmd = configure_inetd % {'service': 'www',
+                                     'script': 'script1'}
+            ssh_client.exec_command(cmd)
+
+            if len(self.server_ips) == 1:
+                cmd = create_script % {'server': 'server2',
+                                       'script': 'script2'}
+                ssh_client.exec_command(cmd)
+                # "kerberos" stands for port 88
+                cmd = configure_inetd % {'service': 'kerberos',
+                                         'script': 'script2'}
+                ssh_client.exec_command(cmd)
+
+            # Get PIDs of inetd
+            pids = ssh_client.get_pids('inetd')
+            if pids != ['']:
+                # If there are any inetd processes, reload them
+                kill_cmd = "sudo kill -HUP %s" % ' '.join(pids)
+                ssh_client.exec_command(kill_cmd)
+            else:
+                # In other case start inetd
+                start_inetd = "sudo /usr/sbin/inetd /etc/inetd.conf"
+                ssh_client.exec_command(start_inetd)
+
+    def _check_connection(self, check_ip, port=80):
+        def try_connect(ip, port):
             try:
-                urllib.urlopen("http://{0}/".format(ip))
-                return True
+                resp = urllib.urlopen("http://{0}:{1}/".format(ip, port))
+                if resp.getcode() == 200:
+                    return True
+                return False
             except IOError:
                 return False
         timeout = config.compute.ping_timeout
-        timer = 0
-        while not try_connect(check_ip):
-            time.sleep(1)
-            timer += 1
-            if timer >= timeout:
+        start = time.time()
+        while not try_connect(check_ip, port):
+            if (time.time() - start) > timeout:
                 message = "Timed out trying to connect to %s" % check_ip
                 raise exceptions.TimeoutException(message)
 
@@ -142,30 +217,37 @@
         self.subnet = net_common.DeletableSubnet(client=self.network_client,
                                                  **subnet)
         self.pool = super(TestLoadBalancerBasic, self)._create_pool(
-            'ROUND_ROBIN',
-            'HTTP',
-            self.subnet.id)
+            lb_method='ROUND_ROBIN',
+            protocol='HTTP',
+            subnet_id=self.subnet.id)
+        self.addCleanup(self.cleanup_wrapper, self.pool)
         self.assertTrue(self.pool)
 
-    def _create_members(self, server_ids):
+    def _create_members(self):
         """
         Create two members.
 
         In case there is only one server, create both members with the same ip
         but with different ports to listen on.
         """
-        servers = self.compute_client.servers.list()
-        for server in servers:
-            if server.id in server_ids:
-                ip = self.server_ip
-                pool_id = self.pool.id
-                if len(set(server_ids)) == 1 or len(servers) == 1:
-                    member1 = self._create_member(ip, self.port1, pool_id)
-                    member2 = self._create_member(ip, self.port2, pool_id)
-                    self.members.extend([member1, member2])
-                else:
-                    member = self._create_member(ip, self.port1, pool_id)
-                    self.members.append(member)
+
+        for server_id, ip in self.server_ips.iteritems():
+            if len(self.server_ips) == 1:
+                member1 = self._create_member(address=ip,
+                                              protocol_port=self.port1,
+                                              pool_id=self.pool.id)
+                self.addCleanup(self.cleanup_wrapper, member1)
+                member2 = self._create_member(address=ip,
+                                              protocol_port=self.port2,
+                                              pool_id=self.pool.id)
+                self.addCleanup(self.cleanup_wrapper, member2)
+                self.members.extend([member1, member2])
+            else:
+                member = self._create_member(address=ip,
+                                             protocol_port=self.port1,
+                                             pool_id=self.pool.id)
+                self.addCleanup(self.cleanup_wrapper, member)
+                self.members.append(member)
         self.assertTrue(self.members)
 
     def _assign_floating_ip_to_vip(self, vip):
@@ -173,22 +255,23 @@
         port_id = vip.port_id
         floating_ip = self._create_floating_ip(vip, public_network_id,
                                                port_id=port_id)
+        self.addCleanup(self.cleanup_wrapper, floating_ip)
         self.floating_ips.setdefault(vip.id, [])
         self.floating_ips[vip.id].append(floating_ip)
 
     def _create_load_balancer(self):
         self._create_pool()
-        self._create_members([self.servers_keypairs.keys()[0].id])
-        subnet_id = self.subnet.id
-        pool_id = self.pool.id
-        self.vip = super(TestLoadBalancerBasic, self)._create_vip('HTTP', 80,
-                                                                  subnet_id,
-                                                                  pool_id)
-        self._status_timeout(NeutronRetriever(self.network_client,
-                                              self.network_client.vip_path,
-                                              net_common.DeletableVip),
-                             self.vip.id,
-                             expected_status='ACTIVE')
+        self._create_members()
+        self.vip = self._create_vip(protocol='HTTP',
+                                    protocol_port=80,
+                                    subnet_id=self.subnet.id,
+                                    pool_id=self.pool.id)
+        self.addCleanup(self.cleanup_wrapper, self.vip)
+        self.status_timeout(NeutronRetriever(self.network_client,
+                                             self.network_client.vip_path,
+                                             net_common.DeletableVip),
+                            self.vip.id,
+                            expected_status='ACTIVE')
         if (config.network.public_network_id and not
                 config.network.tenant_networks_reachable):
             self._assign_floating_ip_to_vip(self.vip)
@@ -199,34 +282,50 @@
 
     def _check_load_balancing(self):
         """
-        1. Send 10 requests on the floating ip associated with the VIP
+        1. Send 100 requests on the floating ip associated with the VIP
         2. Check that the requests are shared between
            the two servers and that both of them get equal portions
            of the requests
         """
 
         self._check_connection(self.vip_ip)
+        resp = self._send_requests(self.vip_ip)
+        self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
+        self.assertEqual(50, resp.count("server1\n"))
+        self.assertEqual(50, resp.count("server2\n"))
+
+    def _send_requests(self, vip_ip):
         resp = []
-        for count in range(10):
+        for count in range(100):
             resp.append(
                 urllib.urlopen(
-                    "http://{0}/".format(self.vip_ip)).read())
-        self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
-        self.assertEqual(5, resp.count("server1\n"))
-        self.assertEqual(5, resp.count("server2\n"))
+                    "http://{0}/".format(vip_ip)).read())
+        return resp
 
     @test.skip_because(bug='1295165')
     @test.attr(type='smoke')
     @test.services('compute', 'network')
     def test_load_balancer_basic(self):
-        self._create_security_groups()
-        server = self._create_server()
-        self._start_servers(server)
+        self._create_server('server1')
+        self._start_servers()
         self._create_load_balancer()
         self._check_load_balancing()
 
 
 class NeutronRetriever(object):
+    """
+    Helper class to make possible handling neutron objects returned by GET
+    requests as attribute dicts.
+
+    Whet get() method is called, the returned dictionary is wrapped into
+    a corresponding DeletableResource class which provides attribute access
+    to dictionary values.
+
+    Usage:
+        This retriever is used to allow using status_timeout from
+        tempest.manager with Neutron objects.
+    """
+
     def __init__(self, network_client, path, resource):
         self.network_client = network_client
         self.path = path
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index e7e97b5..0ba65cf 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.common import debug
 from tempest.common.utils import data_utils
 from tempest import config
@@ -158,6 +160,8 @@
         self.server.rebuild(image_ref_alt)
         self._wait_server_status_and_check_network_connectivity()
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @services('compute', 'network')
     def test_server_connectivity_pause_unpause(self):
         self.server.pause()
@@ -167,6 +171,8 @@
         self.server.unpause()
         self._wait_server_status_and_check_network_connectivity()
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @services('compute', 'network')
     def test_server_connectivity_suspend_resume(self):
         self.server.suspend()
@@ -176,11 +182,10 @@
         self.server.resume()
         self._wait_server_status_and_check_network_connectivity()
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize is not available.')
     @services('compute', 'network')
     def test_server_connectivity_resize(self):
-        if not CONF.compute_feature_enabled.resize:
-            msg = "Skipping test - resize not available on this host"
-            raise self.skipException(msg)
         resize_flavor = CONF.compute.flavor_ref_alt
         if resize_flavor == CONF.compute.flavor_ref:
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index b9ee040..b1b06cc 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -422,11 +422,15 @@
         access_point_ssh = self._connect_to_access_point(tenant)
         mac_addr = access_point_ssh.get_mac_address()
         mac_addr = mac_addr.strip().lower()
-        port_list = self.network_client.list_ports()['ports']
+        # Get the fixed_ips and mac_address fields of all ports. Select
+        # only those two columns to reduce the size of the response.
+        port_list = self.network_client.list_ports(
+            fields=['fixed_ips', 'mac_address'])['ports']
         port_detail_list = [
             (port['fixed_ips'][0]['subnet_id'],
              port['fixed_ips'][0]['ip_address'],
-             port['mac_address'].lower()) for port in port_list
+             port['mac_address'].lower())
+            for port in port_list if port['fixed_ips']
         ]
         server_ip = self._get_server_ip(tenant.access_point)
         subnet_id = tenant.subnet.id
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index c0eb6e7..5a1dc04 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
@@ -37,16 +39,12 @@
         cls.set_network_resources()
         super(TestServerAdvancedOps, cls).setUpClass()
 
-        if not CONF.compute_feature_enabled.resize:
-            msg = "Skipping test - resize not available on this host"
-            raise cls.skipException(msg)
-
-        resize_flavor = CONF.compute.flavor_ref_alt
-
-        if resize_flavor == CONF.compute.flavor_ref:
+        if CONF.compute.flavor_ref_alt == CONF.compute.flavor_ref:
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
             raise cls.skipException(msg)
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize is not available.')
     @test.services('compute')
     def test_resize_server_confirm(self):
         # We create an instance for use in this test
@@ -65,6 +63,8 @@
         self.status_timeout(
             self.compute_client.servers, instance_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.services('compute')
     def test_server_sequence_suspend_resume(self):
         # We create an instance for use in this test
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d369f12..13e00a5 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -14,19 +14,17 @@
 #    under the License.
 
 from tempest.common.utils import data_utils
-from tempest.common.utils import test_utils
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
+from tempest.scenario import utils as test_utils
 from tempest import test
 
-import testscenarios
-
 CONF = config.CONF
 
 LOG = logging.getLogger(__name__)
 
-load_tests = testscenarios.load_tests_apply_scenarios
+load_tests = test_utils.load_tests_input_scenario_utils
 
 
 class TestServerBasicOps(manager.OfficialClientTest):
@@ -43,13 +41,6 @@
      * Terminate the instance
     """
 
-    scenario_utils = test_utils.InputScenarioUtils()
-    scenario_flavor = scenario_utils.scenario_flavors
-    scenario_image = scenario_utils.scenario_images
-
-    scenarios = testscenarios.multiply_scenarios(scenario_image,
-                                                 scenario_flavor)
-
     def setUp(self):
         super(TestServerBasicOps, self).setUp()
         # Setup image and flavor the test instance
@@ -99,42 +90,6 @@
                                       create_kwargs=create_kwargs)
         self.set_resource('instance', instance)
 
-    def pause_server(self):
-        instance = self.get_resource('instance')
-        instance_id = instance.id
-        LOG.debug("Pausing instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.pause()
-        self.status_timeout(
-            self.compute_client.servers, instance_id, 'PAUSED')
-
-    def unpause_server(self):
-        instance = self.get_resource('instance')
-        instance_id = instance.id
-        LOG.debug("Unpausing instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.unpause()
-        self.status_timeout(
-            self.compute_client.servers, instance_id, 'ACTIVE')
-
-    def suspend_server(self):
-        instance = self.get_resource('instance')
-        instance_id = instance.id
-        LOG.debug("Suspending instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.suspend()
-        self.status_timeout(self.compute_client.servers,
-                            instance_id, 'SUSPENDED')
-
-    def resume_server(self):
-        instance = self.get_resource('instance')
-        instance_id = instance.id
-        LOG.debug("Resuming instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.resume()
-        self.status_timeout(
-            self.compute_client.servers, instance_id, 'ACTIVE')
-
     def terminate_instance(self):
         instance = self.get_resource('instance')
         instance.delete()
@@ -149,10 +104,11 @@
             instance.add_floating_ip(floating_ip)
             # Check ssh
             try:
-                self.get_remote_client(
+                linux_client = self.get_remote_client(
                     server_or_ip=floating_ip.ip,
                     username=self.image_utils.ssh_user(self.image_ref),
-                    private_key=self.keypair.private)
+                    private_key=self.keypair.private_key)
+                linux_client.validate_authentication()
             except Exception:
                 LOG.exception('ssh to server failed')
                 self._log_console_output()
@@ -163,9 +119,5 @@
         self.add_keypair()
         self.create_security_group()
         self.boot_instance()
-        self.pause_server()
-        self.unpause_server()
-        self.suspend_server()
-        self.resume_server()
         self.verify_ssh()
         self.terminate_instance()
diff --git a/tempest/common/utils/test_utils.py b/tempest/scenario/utils.py
similarity index 85%
rename from tempest/common/utils/test_utils.py
rename to tempest/scenario/utils.py
index cc0d831..4c7b6d7 100644
--- a/tempest/common/utils/test_utils.py
+++ b/tempest/scenario/utils.py
@@ -12,15 +12,19 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest import clients
-from tempest.common.utils import misc
-from tempest import config
 
 import json
 import re
 import string
 import unicodedata
 
+import testscenarios
+import testtools
+
+from tempest import clients
+from tempest.common.utils import misc
+from tempest import config
+
 CONF = config.CONF
 
 
@@ -79,7 +83,7 @@
 
     class TestInputScenario(manager.OfficialClientTest):
 
-        scenario_utils = test_utils.InputScenarioUtils()
+        scenario_utils = utils.InputScenarioUtils()
         scenario_flavor = scenario_utils.scenario_flavors
         scenario_image = scenario_utils.scenario_images
         scenarios = testscenarios.multiply_scenarios(scenario_image,
@@ -134,3 +138,22 @@
                 for f in flavors if re.search(self.flavor_pattern, str(f.name))
             ]
         return self._scenario_flavors
+
+
+def load_tests_input_scenario_utils(*args):
+    """
+    Wrapper for testscenarios to set the scenarios to avoid running a getattr
+    on the CONF object at import.
+    """
+    if getattr(args[0], 'suiteClass', None) is not None:
+        loader, standard_tests, pattern = args
+    else:
+        standard_tests, module, loader = args
+    scenario_utils = InputScenarioUtils()
+    scenario_flavor = scenario_utils.scenario_flavors
+    scenario_image = scenario_utils.scenario_images
+    for test in testtools.iterate_tests(standard_tests):
+        setattr(test, 'scenarios', testscenarios.multiply_scenarios(
+            scenario_image,
+            scenario_flavor))
+    return testscenarios.load_tests_apply_scenarios(*args)
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index 5f6b513..2af287f 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -12,6 +12,7 @@
 
 import functools
 import json
+import urllib
 
 import six
 
@@ -103,16 +104,19 @@
 
         return patch
 
-    def _list_request(self, resource, permanent=False):
+    def _list_request(self, resource, permanent=False, **kwargs):
         """
         Get the list of objects of the specified type.
 
         :param resource: The name of the REST resource, e.g., 'nodes'.
+        "param **kw: Parameters for the request.
         :return: A tuple with the server response and deserialized JSON list
                  of objects
 
         """
         uri = self._get_uri(resource, permanent=permanent)
+        if kwargs:
+            uri += "?%s" % urllib.urlencode(kwargs)
 
         resp, body = self.get(uri)
 
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 3f4c509..296a199 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -37,9 +37,24 @@
         return self._list_request('chassis')
 
     @base.handle_errors
-    def list_ports(self):
+    def list_ports(self, **kwargs):
         """List all existing ports."""
-        return self._list_request('ports')
+        return self._list_request('ports', **kwargs)
+
+    @base.handle_errors
+    def list_nodestates(self, uuid):
+        """List all existing states."""
+        return self._list_request('/nodes/%s/states' % uuid)
+
+    @base.handle_errors
+    def list_ports_detail(self):
+        """Details list all existing ports."""
+        return self._list_request('/ports/detail')
+
+    @base.handle_errors
+    def list_drivers(self):
+        """List all existing drivers."""
+        return self._list_request('drivers')
 
     @base.handle_errors
     def show_node(self, uuid):
@@ -116,12 +131,20 @@
         Create a port with the specified parameters.
 
         :param node_id: The ID of the node which owns the port.
-        :param address: MAC address of the port. Default: 01:23:45:67:89:0A.
+        :param address: MAC address of the port.
+        :param extra: Meta data of the port. Default: {'foo': 'bar'}.
+        :param uuid: UUID of the port.
         :return: A tuple with the server response and the created port.
 
         """
-        port = {'address': kwargs.get('address', '01:23:45:67:89:0A'),
-                'node_uuid': node_id}
+        port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
+                'uuid': kwargs['uuid']}
+
+        if node_id is not None:
+            port['node_uuid'] = node_id
+
+        if kwargs['address'] is not None:
+            port['address'] = kwargs['address']
 
         return self._create_request('ports', 'port', port)
 
@@ -192,15 +215,14 @@
         return self._patch_request('chassis', uuid, patch)
 
     @base.handle_errors
-    def update_port(self, uuid, **kwargs):
+    def update_port(self, uuid, patch):
         """
         Update the specified port.
 
         :param uuid: The unique identifier of the port.
+        :param patch: List of dicts representing json patches.
         :return: A tuple with the server response and the updated port.
 
         """
-        port_attributes = ('address',)
-        patch = self._make_patch(port_attributes, **kwargs)
 
         return self._patch_request('ports', uuid, patch)
diff --git a/tempest/services/compute/json/agents_client.py b/tempest/services/compute/json/agents_client.py
new file mode 100644
index 0000000..98d8896
--- /dev/null
+++ b/tempest/services/compute/json/agents_client.py
@@ -0,0 +1,61 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import agents as common_schema
+from tempest.api_schema.compute.v2 import agents as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class AgentsClientJSON(rest_client.RestClient):
+    """
+    Tests Agents API
+    """
+
+    def __init__(self, auth_provider):
+        super(AgentsClientJSON, self).__init__(auth_provider)
+        self.service = CONF.compute.catalog_type
+
+    def list_agents(self, params=None):
+        """List all agent builds."""
+        url = 'os-agents'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(common_schema.list_agents, resp, body)
+        return resp, body['agents']
+
+    def create_agent(self, **kwargs):
+        """Create an agent build."""
+        post_body = json.dumps({'agent': kwargs})
+        resp, body = self.post('os-agents', post_body)
+        return resp, self._parse_resp(body)
+
+    def delete_agent(self, agent_id):
+        """Delete an existing agent build."""
+        resp, body = self.delete("os-agents/%s" % str(agent_id))
+        self.validate_response(schema.delete_agent, resp, body)
+        return resp, body
+
+    def update_agent(self, agent_id, **kwargs):
+        """Update an agent build."""
+        put_body = json.dumps({'para': kwargs})
+        resp, body = self.put('os-agents/%s' % str(agent_id), put_body)
+        return resp, self._parse_resp(body)
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index ccb85c4..5c0b5d3 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -16,6 +16,7 @@
 import json
 
 from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v2 import aggregates as v2_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -40,6 +41,7 @@
         """Get details of the given aggregate."""
         resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
         body = json.loads(body)
+        self.validate_response(schema.get_aggregate, resp, body)
         return resp, body['aggregate']
 
     def create_aggregate(self, **kwargs):
@@ -60,11 +62,14 @@
         resp, body = self.put('os-aggregates/%s' % str(aggregate_id), put_body)
 
         body = json.loads(body)
+        self.validate_response(schema.update_aggregate, resp, body)
         return resp, body['aggregate']
 
     def delete_aggregate(self, aggregate_id):
         """Deletes the given aggregate."""
-        return self.delete("os-aggregates/%s" % str(aggregate_id))
+        resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+        self.validate_response(v2_schema.delete_aggregate, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         try:
@@ -104,4 +109,5 @@
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema.aggregate_set_metadata, resp, body)
         return resp, body['aggregate']
diff --git a/tempest/services/compute/json/availability_zone_client.py b/tempest/services/compute/json/availability_zone_client.py
index 9278d5b..1c067e8 100644
--- a/tempest/services/compute/json/availability_zone_client.py
+++ b/tempest/services/compute/json/availability_zone_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute.v2 import availability_zone as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,9 +32,12 @@
     def get_availability_zone_list(self):
         resp, body = self.get('os-availability-zone')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list, resp, body)
         return resp, body['availabilityZoneInfo']
 
     def get_availability_zone_list_detail(self):
         resp, body = self.get('os-availability-zone/detail')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list_detail, resp,
+                               body)
         return resp, body['availabilityZoneInfo']
diff --git a/tempest/services/compute/json/certificates_client.py b/tempest/services/compute/json/certificates_client.py
index c05e352..1d04628 100644
--- a/tempest/services/compute/json/certificates_client.py
+++ b/tempest/services/compute/json/certificates_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v2 import certificates as v2schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,6 +33,7 @@
         url = "os-certificates/%s" % (id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_certificate, resp, body)
         return resp, body['certificate']
 
     def create_certificate(self):
@@ -38,4 +41,5 @@
         url = "os-certificates"
         resp, body = self.post(url, None)
         body = json.loads(body)
+        self.validate_response(v2schema.create_certificate, resp, body)
         return resp, body['certificate']
diff --git a/tempest/services/compute/json/fixed_ips_client.py b/tempest/services/compute/json/fixed_ips_client.py
index 5fdd564..f2d5cbe 100644
--- a/tempest/services/compute/json/fixed_ips_client.py
+++ b/tempest/services/compute/json/fixed_ips_client.py
@@ -39,4 +39,5 @@
         """This reserves and unreserves fixed ips."""
         url = "os-fixed-ips/%s/action" % (ip)
         resp, body = self.post(url, json.dumps(body))
+        self.validate_response(schema.fixed_ip_action, resp, body)
         return resp, body
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index bc4a64f..89cbe1d 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -18,6 +18,9 @@
 
 from tempest.api_schema.compute import flavors as common_schema
 from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute import flavors_extra_specs \
+    as schema_extra_specs
+from tempest.api_schema.compute.v2 import flavors as v2schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -47,11 +50,13 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(v2schema.list_flavors_details, resp, body)
         return resp, body['flavors']
 
     def get_flavor_details(self, flavor_id):
         resp, body = self.get("flavors/%s" % str(flavor_id))
         body = json.loads(body)
+        self.validate_response(v2schema.create_get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -75,11 +80,14 @@
         resp, body = self.post('flavors', post_body)
 
         body = json.loads(body)
+        self.validate_response(v2schema.create_get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def delete_flavor(self, flavor_id):
         """Deletes the given flavor."""
-        return self.delete("flavors/%s" % str(flavor_id))
+        resp, body = self.delete("flavors/{0}".format(flavor_id))
+        self.validate_response(v2schema.delete_flavor, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         # Did not use get_flavor_details(id) for verification as it gives
@@ -97,12 +105,16 @@
         resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec(self, flavor_id):
         """Gets extra Specs details of the mentioned flavor."""
         resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -110,6 +122,8 @@
         resp, body = self.get('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
                               key))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
@@ -117,18 +131,23 @@
         resp, body = self.put('flavors/%s/os-extra_specs/%s' %
                               (flavor_id, key), json.dumps(kwargs))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def unset_flavor_extra_spec(self, flavor_id, key):
         """Unsets extra Specs from the mentioned flavor."""
-        return self.delete('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
-                           key))
+        resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
+                                 (str(flavor_id), key))
+        self.validate_response(v2schema.unset_flavor_extra_specs, resp, body)
+        return resp, body
 
     def list_flavor_access(self, flavor_id):
         """Gets flavor access information given the flavor id."""
         resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
         body = json.loads(body)
-        self.validate_response(schema_access.list_flavor_access, resp, body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
 
     def add_flavor_access(self, flavor_id, tenant_id):
@@ -141,6 +160,8 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
         body = json.loads(body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
 
     def remove_flavor_access(self, flavor_id, tenant_id):
@@ -153,4 +174,6 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
         body = json.loads(body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
diff --git a/tempest/services/compute/json/hosts_client.py b/tempest/services/compute/json/hosts_client.py
index eeb417a..e148572 100644
--- a/tempest/services/compute/json/hosts_client.py
+++ b/tempest/services/compute/json/hosts_client.py
@@ -16,6 +16,7 @@
 import urllib
 
 from tempest.api_schema.compute import hosts as schema
+from tempest.api_schema.compute.v2 import hosts as v2_schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -67,6 +68,7 @@
 
         resp, body = self.get("os-hosts/%s/startup" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v2_schema.startup_host, resp, body)
         return resp, body['host']
 
     def shutdown_host(self, hostname):
@@ -74,6 +76,7 @@
 
         resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v2_schema.shutdown_host, resp, body)
         return resp, body['host']
 
     def reboot_host(self, hostname):
@@ -81,4 +84,5 @@
 
         resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v2_schema.reboot_host, resp, body)
         return resp, body['host']
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index bd39a04..af7752a 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -70,6 +70,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_images_details, resp, body)
         return resp, body['images']
 
     def get_image(self, image_id):
diff --git a/tempest/services/compute/json/instance_usage_audit_log_client.py b/tempest/services/compute/json/instance_usage_audit_log_client.py
index 4088be9..4700ca7 100644
--- a/tempest/services/compute/json/instance_usage_audit_log_client.py
+++ b/tempest/services/compute/json/instance_usage_audit_log_client.py
@@ -34,6 +34,8 @@
         url = 'os-instance_usage_audit_log'
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_instance_usage_audit_log,
+                               resp, body)
         return resp, body["instance_usage_audit_logs"]
 
     def get_instance_usage_audit_log(self, time_before):
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index 9928b94..2f165a2 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -16,6 +16,7 @@
 import json
 import time
 
+from tempest.api_schema.compute import interfaces as common_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -58,6 +59,7 @@
     def delete_interface(self, server, port_id):
         resp, body = self.delete('servers/%s/os-interface/%s' % (server,
                                                                  port_id))
+        self.validate_response(common_schema.delete_interface, resp, body)
         return resp, body
 
     def wait_for_interface_status(self, server, port_id, status):
diff --git a/tempest/services/compute/json/migrations_client.py b/tempest/services/compute/json/migrations_client.py
new file mode 100644
index 0000000..beef5d2
--- /dev/null
+++ b/tempest/services/compute/json/migrations_client.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import migrations as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class MigrationsClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(MigrationsClientJSON, self).__init__(auth_provider)
+        self.service = CONF.compute.catalog_type
+
+    def list_migrations(self, params=None):
+        """Lists all migrations."""
+
+        url = 'os-migrations'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(schema.list_migrations, resp, body)
+        return resp, body['migrations']
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 9346183..9bddf2c 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -48,8 +48,8 @@
         self.validate_response(schema.quota_set, resp, body)
         return resp, body['quota_set']
 
-    def update_quota_set(self, tenant_id, force=None,
-                         injected_file_content_bytes=None,
+    def update_quota_set(self, tenant_id, user_id=None,
+                         force=None, injected_file_content_bytes=None,
                          metadata_items=None, ram=None, floating_ips=None,
                          fixed_ips=None, key_pairs=None, instances=None,
                          security_group_rules=None, injected_files=None,
@@ -101,11 +101,19 @@
             post_body['security_groups'] = security_groups
 
         post_body = json.dumps({'quota_set': post_body})
-        resp, body = self.put('os-quota-sets/%s' % str(tenant_id), post_body)
+
+        if user_id:
+            resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+                                  (str(tenant_id), str(user_id)), post_body)
+        else:
+            resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+                                  post_body)
 
         body = json.loads(body)
         return resp, body['quota_set']
 
     def delete_quota_set(self, tenant_id):
         """Delete the tenant's quota set."""
-        return self.delete('os-quota-sets/%s' % str(tenant_id))
+        resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
+        self.validate_response(schema.delete_quota, resp, body)
+        return resp, body
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 9267be7..c19baf3 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -47,6 +47,7 @@
         url = "os-security-groups/%s" % str(security_group_id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_security_group, resp, body)
         return resp, body['security_group']
 
     def create_security_group(self, name, description):
@@ -62,6 +63,7 @@
         post_body = json.dumps({'security_group': post_body})
         resp, body = self.post('os-security-groups', post_body)
         body = json.loads(body)
+        self.validate_response(schema.get_security_group, resp, body)
         return resp, body['security_group']
 
     def update_security_group(self, security_group_id, name=None,
@@ -81,6 +83,7 @@
         resp, body = self.put('os-security-groups/%s' % str(security_group_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_security_group, resp, body)
         return resp, body['security_group']
 
     def delete_security_group(self, security_group_id):
@@ -111,11 +114,15 @@
         url = 'os-security-group-rules'
         resp, body = self.post(url, post_body)
         body = json.loads(body)
+        self.validate_response(schema.create_security_group_rule, resp, body)
         return resp, body['security_group_rule']
 
     def delete_security_group_rule(self, group_rule_id):
         """Deletes the provided Security Group rule."""
-        return self.delete('os-security-group-rules/%s' % str(group_rule_id))
+        resp, body = self.delete('os-security-group-rules/%s' %
+                                 str(group_rule_id))
+        self.validate_response(schema.delete_security_group_rule, resp, body)
+        return resp, body
 
     def list_security_group_rules(self, security_group_id):
         """List all rules for a security group."""
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index d6705db..82ded4c 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -18,6 +18,7 @@
 import time
 import urllib
 
+from tempest.api_schema.compute import servers as common_schema
 from tempest.api_schema.compute.v2 import servers as schema
 from tempest.common import rest_client
 from tempest.common import waiters
@@ -135,7 +136,9 @@
 
     def delete_server(self, server_id):
         """Deletes the given server."""
-        return self.delete("servers/%s" % str(server_id))
+        resp, body = self.delete("servers/%s" % str(server_id))
+        self.validate_response(common_schema.delete_server, resp, body)
+        return resp, body
 
     def list_servers(self, params=None):
         """Lists all servers for a user."""
@@ -197,12 +200,21 @@
         body = json.loads(body)
         return resp, body
 
-    def action(self, server_id, action_name, response_key, **kwargs):
+    def action(self, server_id, action_name, response_key,
+               schema=None, **kwargs):
         post_body = json.dumps({action_name: kwargs})
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
         if response_key is not None:
-            body = json.loads(body)[response_key]
+            body = json.loads(body)
+            # Check for Schema as 'None' because if we donot have any server
+            # action schema implemented yet then they can pass 'None' to skip
+            # the validation.Once all server action has their schema
+            # implemented then, this check can be removed if every actions are
+            # supposed to validate their response.
+            if schema is not None:
+                self.validate_response(schema, resp, body)
+            body = body[response_key]
         return resp, body
 
     def create_backup(self, server_id, backup_type, rotation, name):
@@ -221,6 +233,7 @@
         resp, body = self.get("servers/%s/os-server-password" %
                               str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.get_password, resp, body)
         return resp, body
 
     def delete_password(self, server_id):
@@ -260,13 +273,10 @@
         """Reverts a server back to its original flavor."""
         return self.action(server_id, 'revertResize', None, **kwargs)
 
-    def create_image(self, server_id, name):
-        """Creates an image of the given server."""
-        return self.action(server_id, 'createImage', None, name=name)
-
     def list_server_metadata(self, server_id):
         resp, body = self.get("servers/%s/metadata" % str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.list_server_metadata, resp, body)
         return resp, body['metadata']
 
     def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -277,6 +287,7 @@
         resp, body = self.put('servers/%s/metadata' % str(server_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.set_server_metadata, resp, body)
         return resp, body['metadata']
 
     def update_server_metadata(self, server_id, meta):
@@ -319,12 +330,15 @@
         })
         resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
                                post_body)
+        body = json.loads(body)
+        self.validate_response(schema.attach_volume, resp, body)
         return resp, body
 
     def detach_volume(self, server_id, volume_id):
         """Detaches a volume from a server instance."""
         resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
                                  (server_id, volume_id))
+        self.validate_response(schema.detach_volume, resp, body)
         return resp, body
 
     def add_security_group(self, server_id, name):
@@ -403,7 +417,9 @@
         """
         resp, body = self.get('/'.join(['servers', server_id,
                               'os-virtual-interfaces']))
-        return resp, json.loads(body)
+        body = json.loads(body)
+        self.validate_response(schema.list_virtual_interfaces, resp, body)
+        return resp, body
 
     def rescue_server(self, server_id, **kwargs):
         """Rescue the provided server."""
@@ -451,4 +467,5 @@
     def get_vnc_console(self, server_id, console_type):
         """Get URL of VNC console."""
         return self.action(server_id, "os-getVNCConsole",
-                           "console", type=console_type)
+                           "console", common_schema.get_vnc_console,
+                           type=console_type)
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
index 0f7d4cb..d58ca6f 100644
--- a/tempest/services/compute/json/services_client.py
+++ b/tempest/services/compute/json/services_client.py
@@ -49,6 +49,7 @@
         post_body = json.dumps({'binary': binary, 'host': host_name})
         resp, body = self.put('os-services/enable', post_body)
         body = json.loads(body)
+        self.validate_response(schema.enable_service, resp, body)
         return resp, body['service']
 
     def disable_service(self, host_name, binary):
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 17468eb..d1014af 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -61,7 +61,7 @@
         url = "os-volumes/%s" % str(volume_id)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.validate_response(schema.get_volume, resp, body)
+        self.validate_response(schema.create_get_volume, resp, body)
         return resp, body['volume']
 
     def create_volume(self, size, **kwargs):
@@ -81,11 +81,14 @@
         post_body = json.dumps({'volume': post_body})
         resp, body = self.post('os-volumes', post_body)
         body = json.loads(body)
+        self.validate_response(schema.create_get_volume, resp, body)
         return resp, body['volume']
 
     def delete_volume(self, volume_id):
         """Deletes the Specified Volume."""
-        return self.delete("os-volumes/%s" % str(volume_id))
+        resp, body = self.delete("os-volumes/%s" % str(volume_id))
+        self.validate_response(schema.delete_volume, resp, body)
+        return resp, body
 
     def wait_for_volume_status(self, volume_id, status):
         """Waits for a Volume to reach a given status."""
diff --git a/tempest/services/compute/v3/json/agents_client.py b/tempest/services/compute/v3/json/agents_client.py
index 6893af2..48be54c 100644
--- a/tempest/services/compute/v3/json/agents_client.py
+++ b/tempest/services/compute/v3/json/agents_client.py
@@ -15,6 +15,8 @@
 import json
 import urllib
 
+from tempest.api_schema.compute import agents as common_schema
+from tempest.api_schema.compute.v3 import agents as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -33,7 +35,9 @@
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
-        return resp, self._parse_resp(body)
+        body = json.loads(body)
+        self.validate_response(common_schema.list_agents, resp, body)
+        return resp, body['agents']
 
     def create_agent(self, **kwargs):
         """Create an agent build."""
@@ -43,7 +47,9 @@
 
     def delete_agent(self, agent_id):
         """Delete an existing agent build."""
-        return self.delete('os-agents/%s' % str(agent_id))
+        resp, body = self.delete("os-agents/%s" % str(agent_id))
+        self.validate_response(schema.delete_agent, resp, body)
+        return resp, body
 
     def update_agent(self, agent_id, **kwargs):
         """Update an agent build."""
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 7f73622..2487ee7 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -16,6 +16,7 @@
 import json
 
 from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v3 import aggregates as v3_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -40,6 +41,7 @@
         """Get details of the given aggregate."""
         resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
         body = json.loads(body)
+        self.validate_response(schema.get_aggregate, resp, body)
         return resp, body['aggregate']
 
     def create_aggregate(self, **kwargs):
@@ -60,11 +62,14 @@
         resp, body = self.put('os-aggregates/%s' % str(aggregate_id), put_body)
 
         body = json.loads(body)
+        self.validate_response(schema.update_aggregate, resp, body)
         return resp, body['aggregate']
 
     def delete_aggregate(self, aggregate_id):
         """Deletes the given aggregate."""
-        return self.delete("os-aggregates/%s" % str(aggregate_id))
+        resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+        self.validate_response(v3_schema.delete_aggregate, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         try:
@@ -104,4 +109,5 @@
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema.aggregate_set_metadata, resp, body)
         return resp, body['aggregate']
diff --git a/tempest/services/compute/v3/json/availability_zone_client.py b/tempest/services/compute/v3/json/availability_zone_client.py
index bad2de9..bf74e68 100644
--- a/tempest/services/compute/v3/json/availability_zone_client.py
+++ b/tempest/services/compute/v3/json/availability_zone_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute.v3 import availability_zone as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,9 +32,12 @@
     def get_availability_zone_list(self):
         resp, body = self.get('os-availability-zone')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list, resp, body)
         return resp, body['availability_zone_info']
 
     def get_availability_zone_list_detail(self):
         resp, body = self.get('os-availability-zone/detail')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list_detail, resp,
+                               body)
         return resp, body['availability_zone_info']
diff --git a/tempest/services/compute/v3/json/certificates_client.py b/tempest/services/compute/v3/json/certificates_client.py
index f8beeb9..be9b3c3 100644
--- a/tempest/services/compute/v3/json/certificates_client.py
+++ b/tempest/services/compute/v3/json/certificates_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v3 import certificates as v3schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,6 +33,7 @@
         url = "os-certificates/%s" % (id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_certificate, resp, body)
         return resp, body['certificate']
 
     def create_certificate(self):
@@ -38,4 +41,5 @@
         url = "os-certificates"
         resp, body = self.post(url, None)
         body = json.loads(body)
+        self.validate_response(v3schema.create_certificate, resp, body)
         return resp, body['certificate']
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index 3fdb3ca..5afab5a 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -18,6 +18,9 @@
 
 from tempest.api_schema.compute import flavors as common_schema
 from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute import flavors_extra_specs \
+    as schema_extra_specs
+from tempest.api_schema.compute.v3 import flavors as v3schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -47,11 +50,13 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(v3schema.list_flavors_details, resp, body)
         return resp, body['flavors']
 
     def get_flavor_details(self, flavor_id):
         resp, body = self.get("flavors/%s" % str(flavor_id))
         body = json.loads(body)
+        self.validate_response(v3schema.get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -75,11 +80,14 @@
         resp, body = self.post('flavors', post_body)
 
         body = json.loads(body)
+        self.validate_response(v3schema.create_flavor_details, resp, body)
         return resp, body['flavor']
 
     def delete_flavor(self, flavor_id):
         """Deletes the given flavor."""
-        return self.delete("flavors/%s" % str(flavor_id))
+        resp, body = self.delete("flavors/{0}".format(flavor_id))
+        self.validate_response(v3schema.delete_flavor, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         # Did not use get_flavor_details(id) for verification as it gives
@@ -97,12 +105,15 @@
         resp, body = self.post('flavors/%s/flavor-extra-specs' % flavor_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(v3schema.set_flavor_extra_specs, resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec(self, flavor_id):
         """Gets extra Specs details of the mentioned flavor."""
         resp, body = self.get('flavors/%s/flavor-extra-specs' % flavor_id)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -110,6 +121,8 @@
         resp, body = self.get('flavors/%s/flavor-extra-specs/%s' %
                               (str(flavor_id), key))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
@@ -117,18 +130,23 @@
         resp, body = self.put('flavors/%s/flavor-extra-specs/%s' %
                               (flavor_id, key), json.dumps(kwargs))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def unset_flavor_extra_spec(self, flavor_id, key):
         """Unsets extra Specs from the mentioned flavor."""
-        return self.delete('flavors/%s/flavor-extra-specs/%s' %
-                           (str(flavor_id), key))
+        resp, body = self.delete('flavors/%s/flavor-extra-specs/%s' %
+                                 (str(flavor_id), key))
+        self.validate_response(v3schema.unset_flavor_extra_specs, resp, body)
+        return resp, body
 
     def list_flavor_access(self, flavor_id):
         """Gets flavor access information given the flavor id."""
         resp, body = self.get('flavors/%s/flavor-access' % flavor_id)
         body = json.loads(body)
-        self.validate_response(schema_access.list_flavor_access, resp, body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
 
     def add_flavor_access(self, flavor_id, tenant_id):
@@ -141,6 +159,8 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
         body = json.loads(body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
 
     def remove_flavor_access(self, flavor_id, tenant_id):
@@ -153,4 +173,6 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
         body = json.loads(body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
diff --git a/tempest/services/compute/v3/json/hosts_client.py b/tempest/services/compute/v3/json/hosts_client.py
index db7134c..24d43d0 100644
--- a/tempest/services/compute/v3/json/hosts_client.py
+++ b/tempest/services/compute/v3/json/hosts_client.py
@@ -16,6 +16,7 @@
 import urllib
 
 from tempest.api_schema.compute import hosts as schema
+from tempest.api_schema.compute.v3 import hosts as v3_schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -67,6 +68,7 @@
 
         resp, body = self.get("os-hosts/%s/startup" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v3_schema.startup_host, resp, body)
         return resp, body['host']
 
     def shutdown_host(self, hostname):
@@ -74,6 +76,7 @@
 
         resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v3_schema.shutdown_host, resp, body)
         return resp, body['host']
 
     def reboot_host(self, hostname):
@@ -81,4 +84,5 @@
 
         resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v3_schema.reboot_host, resp, body)
         return resp, body['host']
diff --git a/tempest/services/compute/v3/json/interfaces_client.py b/tempest/services/compute/v3/json/interfaces_client.py
index b45426c..25c8db7 100644
--- a/tempest/services/compute/v3/json/interfaces_client.py
+++ b/tempest/services/compute/v3/json/interfaces_client.py
@@ -16,6 +16,7 @@
 import json
 import time
 
+from tempest.api_schema.compute import interfaces as common_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -59,6 +60,7 @@
         resp, body =\
             self.delete('servers/%s/os-attach-interfaces/%s' % (server,
                                                                 port_id))
+        self.validate_response(common_schema.delete_interface, resp, body)
         return resp, body
 
     def wait_for_interface_status(self, server, port_id, status):
diff --git a/tempest/services/compute/v3/json/migration_client.py b/tempest/services/compute/v3/json/migration_client.py
new file mode 100644
index 0000000..c821567
--- /dev/null
+++ b/tempest/services/compute/v3/json/migration_client.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import migrations as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class MigrationsV3ClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(MigrationsV3ClientJSON, self).__init__(auth_provider)
+        self.service = CONF.compute.catalog_v3_type
+
+    def list_migrations(self, params=None):
+        """Lists all migrations."""
+
+        url = 'os-migrations'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(schema.list_migrations, resp, body)
+        return resp, body['migrations']
diff --git a/tempest/services/compute/v3/json/quotas_client.py b/tempest/services/compute/v3/json/quotas_client.py
index a8507c4..37a8906 100644
--- a/tempest/services/compute/v3/json/quotas_client.py
+++ b/tempest/services/compute/v3/json/quotas_client.py
@@ -45,6 +45,7 @@
         url = 'os-quota-sets/%s/detail' % str(tenant_id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.quota_set_detail, resp, body)
         return resp, body['quota_set']
 
     def get_default_quota_set(self, tenant_id):
@@ -56,7 +57,7 @@
         self.validate_response(schema.quota_set, resp, body)
         return resp, body['quota_set']
 
-    def update_quota_set(self, tenant_id, force=None,
+    def update_quota_set(self, tenant_id, user_id=None, force=None,
                          metadata_items=None, ram=None, floating_ips=None,
                          fixed_ips=None, key_pairs=None, instances=None,
                          security_group_rules=None, cores=None,
@@ -97,7 +98,13 @@
             post_body['security_groups'] = security_groups
 
         post_body = json.dumps({'quota_set': post_body})
-        resp, body = self.put('os-quota-sets/%s' % str(tenant_id), post_body)
+
+        if user_id:
+            resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+                                  (str(tenant_id), str(user_id)), post_body)
+        else:
+            resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+                                  post_body)
 
         body = json.loads(body)
         self.validate_response(schema.quota_set, resp, body)
@@ -105,4 +112,6 @@
 
     def delete_quota_set(self, tenant_id):
         """Delete the tenant's quota set."""
-        return self.delete('os-quota-sets/%s' % str(tenant_id))
+        resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
+        self.validate_response(schema.delete_quota, resp, body)
+        return resp, body
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index 6f492d0..67232a8 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -19,6 +19,7 @@
 import time
 import urllib
 
+from tempest.api_schema.compute import servers as common_schema
 from tempest.api_schema.compute.v3 import servers as schema
 from tempest.common import rest_client
 from tempest.common import waiters
@@ -135,7 +136,9 @@
 
     def delete_server(self, server_id):
         """Deletes the given server."""
-        return self.delete("servers/%s" % str(server_id))
+        resp, body = self.delete("servers/%s" % str(server_id))
+        self.validate_response(common_schema.delete_server, resp, body)
+        return resp, body
 
     def list_servers(self, params=None):
         """Lists all servers for a user."""
@@ -221,6 +224,7 @@
         resp, body = self.get("servers/%s/os-server-password" %
                               str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.get_password, resp, body)
         return resp, body
 
     def delete_password(self, server_id):
@@ -280,6 +284,7 @@
     def list_server_metadata(self, server_id):
         resp, body = self.get("servers/%s/metadata" % str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.list_server_metadata, resp, body)
         return resp, body['metadata']
 
     def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -290,6 +295,7 @@
         resp, body = self.put('servers/%s/metadata' % str(server_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.set_server_metadata, resp, body)
         return resp, body['metadata']
 
     def update_server_metadata(self, server_id, meta):
@@ -324,12 +330,17 @@
 
     def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
         """Attaches a volume to a server instance."""
-        return self.action(server_id, 'attach', None, volume_id=volume_id,
-                           device=device)
+        resp, body = self.action(server_id, 'attach', None,
+                                 volume_id=volume_id, device=device)
+        self.validate_response(schema.attach_detach_volume, resp, body)
+        return resp, body
 
     def detach_volume(self, server_id, volume_id):
         """Detaches a volume from a server instance."""
-        return self.action(server_id, 'detach', None, volume_id=volume_id)
+        resp, body = self.action(server_id, 'detach', None,
+                                 volume_id=volume_id)
+        self.validate_response(schema.attach_detach_volume, resp, body)
+        return resp, body
 
     def live_migrate_server(self, server_id, dest_host, use_block_migration):
         """This should be called with administrator privileges ."""
@@ -408,19 +419,19 @@
                               str(server_id))
         return resp, json.loads(body)
 
-    def list_instance_actions(self, server_id):
+    def list_server_actions(self, server_id):
         """List the provided server action."""
-        resp, body = self.get("servers/%s/os-instance-actions" %
+        resp, body = self.get("servers/%s/os-server-actions" %
                               str(server_id))
         body = json.loads(body)
-        return resp, body['instance_actions']
+        return resp, body['server_actions']
 
-    def get_instance_action(self, server_id, request_id):
+    def get_server_action(self, server_id, request_id):
         """Returns the action details of the provided server."""
-        resp, body = self.get("servers/%s/os-instance-actions/%s" %
+        resp, body = self.get("servers/%s/os-server-actions/%s" %
                               (str(server_id), str(request_id)))
         body = json.loads(body)
-        return resp, body['instance_action']
+        return resp, body['server_action']
 
     def force_delete_server(self, server_id, **kwargs):
         """Force delete a server."""
@@ -440,6 +451,7 @@
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.get_vnc_console, resp, body)
         return resp, body['console']
 
     def reset_network(self, server_id, **kwargs):
diff --git a/tempest/services/compute/v3/json/services_client.py b/tempest/services/compute/v3/json/services_client.py
index 88c4d16..96ff580 100644
--- a/tempest/services/compute/v3/json/services_client.py
+++ b/tempest/services/compute/v3/json/services_client.py
@@ -54,6 +54,7 @@
         })
         resp, body = self.put('os-services/enable', post_body)
         body = json.loads(body)
+        self.validate_response(schema.enable_service, resp, body)
         return resp, body['service']
 
     def disable_service(self, host_name, binary):
diff --git a/tempest/services/compute/xml/aggregates_client.py b/tempest/services/compute/xml/aggregates_client.py
index b5f7678..9c2d4aa 100644
--- a/tempest/services/compute/xml/aggregates_client.py
+++ b/tempest/services/compute/xml/aggregates_client.py
@@ -61,9 +61,11 @@
 
     def create_aggregate(self, name, availability_zone=None):
         """Creates a new aggregate."""
-        post_body = xml_utils.Element("aggregate",
-                                      name=name,
-                                      availability_zone=availability_zone)
+        if availability_zone is not None:
+            post_body = xml_utils.Element("aggregate", name=name,
+                                          availability_zone=availability_zone)
+        else:
+            post_body = xml_utils.Element("aggregate", name=name)
         resp, body = self.post('os-aggregates',
                                str(xml_utils.Document(post_body)))
         aggregate = self._format_aggregate(etree.fromstring(body))
@@ -71,9 +73,11 @@
 
     def update_aggregate(self, aggregate_id, name, availability_zone=None):
         """Update a aggregate."""
-        put_body = xml_utils.Element("aggregate",
-                                     name=name,
-                                     availability_zone=availability_zone)
+        if availability_zone is not None:
+            put_body = xml_utils.Element("aggregate", name=name,
+                                         availability_zone=availability_zone)
+        else:
+            put_body = xml_utils.Element("aggregate", name=name)
         resp, body = self.put('os-aggregates/%s' % str(aggregate_id),
                               str(xml_utils.Document(put_body)))
         aggregate = self._format_aggregate(etree.fromstring(body))
diff --git a/tempest/services/compute/xml/quotas_client.py b/tempest/services/compute/xml/quotas_client.py
index 8a521ab..5502fcc 100644
--- a/tempest/services/compute/xml/quotas_client.py
+++ b/tempest/services/compute/xml/quotas_client.py
@@ -61,8 +61,8 @@
         body = self._format_quota(body)
         return resp, body
 
-    def update_quota_set(self, tenant_id, force=None,
-                         injected_file_content_bytes=None,
+    def update_quota_set(self, tenant_id, user_id=None,
+                         force=None, injected_file_content_bytes=None,
                          metadata_items=None, ram=None, floating_ips=None,
                          fixed_ips=None, key_pairs=None, instances=None,
                          security_group_rules=None, injected_files=None,
@@ -115,8 +115,14 @@
         if security_groups is not None:
             post_body.add_attr('security_groups', security_groups)
 
-        resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
-                              str(xml_utils.Document(post_body)))
+        if user_id:
+            resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+                                  (str(tenant_id), str(user_id)),
+                                  str(xml_utils.Document(post_body)))
+        else:
+            resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+                                  str(xml_utils.Document(post_body)))
+
         body = xml_utils.xml_to_json(etree.fromstring(body))
         body = self._format_quota(body)
         return resp, body
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 37de147..c1105f9 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -119,6 +119,10 @@
                 '/compute/ext/extended_status/api/v1.1}vm_state')
     task_state = ('{http://docs.openstack.org'
                   '/compute/ext/extended_status/api/v1.1}task_state')
+    if 'tenantId' in json:
+        json['tenant_id'] = json.pop('tenantId')
+    if 'userId' in json:
+        json['user_id'] = json.pop('userId')
     if diskConfig in json:
         json['OS-DCF:diskConfig'] = json.pop(diskConfig)
     if terminated_at in json:
@@ -242,13 +246,19 @@
             array.append(xml_utils.xml_to_json(child))
         return array
 
+    def _parse_server_array(self, node):
+        array = []
+        for child in node.getchildren():
+            array.append(self._parse_server(child))
+        return array
+
     def list_servers(self, params=None):
         url = 'servers'
         if params:
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
-        servers = self._parse_array(etree.fromstring(body))
+        servers = self._parse_server_array(etree.fromstring(body))
         return resp, {"servers": servers}
 
     def list_servers_with_detail(self, params=None):
@@ -257,7 +267,7 @@
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
-        servers = self._parse_array(etree.fromstring(body))
+        servers = self._parse_server_array(etree.fromstring(body))
         return resp, {"servers": servers}
 
     def update_server(self, server_id, name=None, meta=None, accessIPv4=None,
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index e96b44b..c7b5f93 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -32,7 +32,6 @@
 
         It returns pair: resp and parsed resource(s) body.
         """
-
         resp, body = req_fun(uri, headers={
             'Content-Type': 'application/json'
         }, *args, **kwargs)
@@ -48,7 +47,7 @@
     def get_node_group_template(self, tmpl_id):
         """Returns the details of a single node group template."""
 
-        uri = "node-group-templates/%s" % tmpl_id
+        uri = 'node-group-templates/%s' % tmpl_id
         return self._request_and_parse(self.get, uri, 'node_group_template')
 
     def create_node_group_template(self, name, plugin_name, hadoop_version,
@@ -59,7 +58,7 @@
         It supports passing additional params using kwargs and returns created
         object.
         """
-        uri = "node-group-templates"
+        uri = 'node-group-templates'
         body = kwargs.copy()
         body.update({
             'name': name,
@@ -75,7 +74,7 @@
     def delete_node_group_template(self, tmpl_id):
         """Deletes the specified node group template by id."""
 
-        uri = "node-group-templates/%s" % tmpl_id
+        uri = 'node-group-templates/%s' % tmpl_id
         return self.delete(uri)
 
     def list_plugins(self):
@@ -87,7 +86,45 @@
     def get_plugin(self, plugin_name, plugin_version=None):
         """Returns the details of a single plugin."""
 
-        uri = "plugins/%s" % plugin_name
+        uri = 'plugins/%s' % plugin_name
         if plugin_version:
             uri += '/%s' % plugin_version
         return self._request_and_parse(self.get, uri, 'plugin')
+
+    def list_cluster_templates(self):
+        """List all cluster templates for a user."""
+
+        uri = 'cluster-templates'
+        return self._request_and_parse(self.get, uri, 'cluster_templates')
+
+    def get_cluster_template(self, tmpl_id):
+        """Returns the details of a single cluster template."""
+
+        uri = 'cluster-templates/%s' % tmpl_id
+        return self._request_and_parse(self.get, uri, 'cluster_template')
+
+    def create_cluster_template(self, name, plugin_name, hadoop_version,
+                                node_groups, cluster_configs=None,
+                                **kwargs):
+        """Creates cluster template with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object.
+        """
+        uri = 'cluster-templates'
+        body = kwargs.copy()
+        body.update({
+            'name': name,
+            'plugin_name': plugin_name,
+            'hadoop_version': hadoop_version,
+            'node_groups': node_groups,
+            'cluster_configs': cluster_configs or dict(),
+        })
+        return self._request_and_parse(self.post, uri, 'cluster_template',
+                                       body=json.dumps(body))
+
+    def delete_cluster_template(self, tmpl_id):
+        """Deletes the specified cluster template by id."""
+
+        uri = 'cluster-templates/%s' % tmpl_id
+        return self.delete(uri)
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index c95faaa..55239f7 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -219,7 +219,7 @@
 
     def list_services(self):
         """List Service - Returns Services."""
-        resp, body = self.get('/OS-KSADM/services/')
+        resp, body = self.get('/OS-KSADM/services')
         return resp, self._parse_resp(body)
 
     def delete_service(self, service_id):
@@ -275,13 +275,20 @@
 
         return resp, body['access']
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         if headers is None:
             # Always accept 'json', for TokenClientXML too.
             # Because XML response is not easily
             # converted to the corresponding JSON one
             headers = self.get_headers(accept_type="json")
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers(accept_type="json"))
+            except (ValueError, TypeError):
+                headers = self.get_headers(accept_type="json")
+
         resp, resp_body = self.http_obj.request(url, method,
                                                 headers=headers, body=body)
         self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 4b530f1..6829333 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -163,6 +163,12 @@
         body = json.loads(body)
         return resp, body['role']
 
+    def list_roles(self):
+        """Get the list of Roles."""
+        resp, body = self.get("roles")
+        body = json.loads(body)
+        return resp, body['roles']
+
     def update_role(self, name, role_id):
         """Create a Role."""
         post_body = {
@@ -515,13 +521,20 @@
         resp, body = self.post(self.auth_url, body=body)
         return resp, body
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         if headers is None:
             # Always accept 'json', for xml token client too.
             # Because XML response is not easily
             # converted to the corresponding JSON one
             headers = self.get_headers(accept_type="json")
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers(accept_type="json"))
+            except (ValueError, TypeError):
+                headers = self.get_headers(accept_type="json")
+
         resp, resp_body = self.http_obj.request(url, method,
                                                 headers=headers, body=body)
         self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
index 93dc3dc..6490e34 100644
--- a/tempest/services/identity/v3/xml/endpoints_client.py
+++ b/tempest/services/identity/v3/xml/endpoints_client.py
@@ -46,12 +46,19 @@
         json = common.xml_to_json(body)
         return json
 
-    def request(self, method, url, headers=None, body=None, wait=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None, wait=None):
         """Overriding the existing HTTP request in super class RestClient."""
+        if extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = self.get_headers()
         dscv = CONF.identity.disable_ssl_certificate_validation
         self.http_obj = http.ClosingHttp(
             disable_ssl_certificate_validation=dscv)
         return super(EndPointClientXML, self).request(method, url,
+                                                      extra_headers,
                                                       headers=headers,
                                                       body=body)
 
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index c49f361..35295d7 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -217,6 +217,12 @@
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
+    def list_roles(self):
+        """Get the list of Roles."""
+        resp, body = self.get("roles")
+        body = self._parse_roles(etree.fromstring(body))
+        return resp, body
+
     def update_role(self, name, role_id):
         """Updates a Role."""
         post_body = common.Element("role",
@@ -516,13 +522,19 @@
         resp, body = self.post(self.auth_url, body=str(common.Document(auth)))
         return resp, body
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         if headers is None:
             # Always accept 'json', for xml token client too.
             # Because XML response is not easily
             # converted to the corresponding JSON one
             headers = self.get_headers(accept_type="json")
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers(accept_type="json"))
+            except (ValueError, TypeError):
+                headers = self.get_headers(accept_type="json")
         resp, resp_body = self.http_obj.request(url, method,
                                                 headers=headers, body=body)
         self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/xml/policy_client.py b/tempest/services/identity/v3/xml/policy_client.py
index e903089..73d831b 100644
--- a/tempest/services/identity/v3/xml/policy_client.py
+++ b/tempest/services/identity/v3/xml/policy_client.py
@@ -46,12 +46,19 @@
         json = common.xml_to_json(body)
         return json
 
-    def request(self, method, url, headers=None, body=None, wait=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None, wait=None):
         """Overriding the existing HTTP request in super class RestClient."""
+        if extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = self.get_headers()
         dscv = CONF.identity.disable_ssl_certificate_validation
         self.http_obj = http.ClosingHttp(
             disable_ssl_certificate_validation=dscv)
         return super(PolicyClientXML, self).request(method, url,
+                                                    extra_headers,
                                                     headers=headers,
                                                     body=body)
 
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index e21abe1..2a797b2 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -31,12 +31,15 @@
     'vpnservices': 'vpn',
     'ikepolicies': 'vpn',
     'metering_labels': 'metering',
-    'metering_label_rules': 'metering'
+    'metering_label_rules': 'metering',
+    'firewall_rules': 'fw',
+    'firewall_policies': 'fw',
+    'firewalls': 'fw'
 }
 
 # The following list represents resource names that do not require
 # changing underscore to a hyphen
-hyphen_exceptions = ["health_monitors"]
+hyphen_exceptions = ["health_monitors", "firewall_rules", "firewall_policies"]
 
 # map from resource name to a plural name
 # needed only for those which can't be constructed as name + 's'
@@ -44,7 +47,8 @@
     'security_groups': 'security_groups',
     'security_group_rules': 'security_group_rules',
     'ikepolicy': 'ikepolicies',
-    'quotas': 'quotas'
+    'quotas': 'quotas',
+    'firewall_policy': 'firewall_policies'
 }
 
 
@@ -100,7 +104,7 @@
         def _list(**filters):
             uri = self.get_uri(plural_name)
             if filters:
-                uri += '?' + urllib.urlencode(filters)
+                uri += '?' + urllib.urlencode(filters, doseq=1)
             resp, body = self.get(uri)
             result = {plural_name: self.deserialize_list(body)}
             return resp, result
@@ -116,14 +120,14 @@
         return _delete
 
     def _shower(self, resource_name):
-        def _show(resource_id, field_list=[]):
-            # field_list is a sequence of two-element tuples, with the
-            # first element being 'fields'. An example:
-            # [('fields', 'id'), ('fields', 'name')]
+        def _show(resource_id, **fields):
+            # fields is a dict which key is 'fields' and value is a
+            # list of field's name. An example:
+            # {'fields': ['id', 'name']}
             plural = self.pluralize(resource_name)
             uri = '%s/%s' % (self.get_uri(plural), resource_id)
-            if field_list:
-                uri += '?' + urllib.urlencode(field_list)
+            if fields:
+                uri += '?' + urllib.urlencode(fields, doseq=1)
             resp, body = self.get(uri)
             body = self.deserialize_single(body)
             return resp, body
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 0945b09..50a1954 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -24,7 +24,7 @@
     # list of plurals used for xml serialization
     PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
                'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
-               'health_monitors', 'vips']
+               'health_monitors', 'vips', 'members']
 
     def get_rest_client(self, auth_provider):
         rc = rest_client.RestClient(auth_provider)
@@ -257,6 +257,38 @@
         body = _root_tag_fetcher_and_xml_to_json_parse(body)
         return resp, body
 
+    def create_vpnservice(self, subnet_id, router_id, **kwargs):
+        uri = '%s/vpn/vpnservices' % (self.uri_prefix)
+        vpnservice = common.Element("vpnservice")
+        p1 = common.Element("subnet_id", subnet_id)
+        p2 = common.Element("router_id", router_id)
+        vpnservice.append(p1)
+        vpnservice.append(p2)
+        common.deep_dict_to_xml(vpnservice, kwargs)
+        resp, body = self.post(uri, str(common.Document(vpnservice)))
+        body = _root_tag_fetcher_and_xml_to_json_parse(body)
+        return resp, body
+
+    def create_ikepolicy(self, name, **kwargs):
+        uri = '%s/vpn/ikepolicies' % (self.uri_prefix)
+        ikepolicy = common.Element("ikepolicy")
+        p1 = common.Element("name", name)
+        ikepolicy.append(p1)
+        common.deep_dict_to_xml(ikepolicy, kwargs)
+        resp, body = self.post(uri, str(common.Document(ikepolicy)))
+        body = _root_tag_fetcher_and_xml_to_json_parse(body)
+        return resp, body
+
+    def create_ipsecpolicy(self, name, **kwargs):
+        uri = '%s/vpn/ipsecpolicies' % (self.uri_prefix)
+        ipsecpolicy = common.Element("ipsecpolicy")
+        p1 = common.Element("name", name)
+        ipsecpolicy.append(p1)
+        common.deep_dict_to_xml(ipsecpolicy, kwargs)
+        resp, body = self.post(uri, str(common.Document(ipsecpolicy)))
+        body = _root_tag_fetcher_and_xml_to_json_parse(body)
+        return resp, body
+
 
 def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
     body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index 6e7910e..a0506f2 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -162,11 +162,17 @@
         self.service = CONF.object_storage.catalog_type
         self.format = 'json'
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         self.http_obj = http.ClosingHttp()
         if headers is None:
             headers = {}
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = {}
 
         # Authorize the request
         req_url, req_headers, req_body = self.auth_provider.auth_request(
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 49f7f49..f3f4eb6 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -29,12 +29,16 @@
 
         self.service = CONF.object_storage.catalog_type
 
-    def create_object(self, container, object_name, data, params=None):
+    def create_object(self, container, object_name, data,
+                      params=None, metadata=None):
         """Create storage object."""
 
         headers = self.get_headers()
         if not data:
             headers['content-length'] = '0'
+        if metadata:
+            for key in metadata:
+                headers[str(key)] = metadata[key]
         url = "%s/%s" % (str(container), str(object_name))
         if params:
             url += '?%s' % urllib.urlencode(params)
@@ -73,11 +77,16 @@
         resp, body = self.head(url)
         return resp, body
 
-    def get_object(self, container, object_name):
+    def get_object(self, container, object_name, metadata=None):
         """Retrieve object's data."""
 
+        headers = {}
+        if metadata:
+            for key in metadata:
+                headers[str(key)] = metadata[key]
+
         url = "{0}/{1}".format(container, object_name)
-        resp, body = self.get(url)
+        resp, body = self.get(url, headers=headers)
         return resp, body
 
     def copy_object_in_same_container(self, container, src_object_name,
@@ -146,13 +155,19 @@
         self.service = CONF.object_storage.catalog_type
         self.format = 'json'
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         dscv = CONF.identity.disable_ssl_certificate_validation
         self.http_obj = http.ClosingHttp(
             disable_ssl_certificate_validation=dscv)
         if headers is None:
             headers = {}
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = {}
 
         # Authorize the request
         req_url, req_headers, req_body = self.auth_provider.auth_request(
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 113003c..2311bdd 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -154,7 +154,8 @@
                 if resource_status == status:
                     return
                 if fail_regexp.search(resource_status):
-                    raise exceptions.StackBuildErrorException(
+                    raise exceptions.StackResourceBuildErrorException(
+                        resource_name=resource_name,
                         stack_identifier=stack_identifier,
                         resource_status=resource_status,
                         resource_status_reason=body['resource_status_reason'])
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index e4d2e8d..b55a037 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -67,10 +67,10 @@
         body = json.loads(body)
         return resp, body['volume']
 
-    def create_volume(self, size, **kwargs):
+    def create_volume(self, size=None, **kwargs):
         """
         Creates a new Volume.
-        size(Required): Size of volume in GB.
+        size: Size of volume in GB.
         Following optional keyword arguments are accepted:
         display_name: Optional Volume Name.
         metadata: A dictionary of values to be used as metadata.
@@ -78,6 +78,10 @@
         snapshot_id: When specified the volume is created from this snapshot
         imageRef: When specified the volume is created from this image
         """
+        # for bug #1293885:
+        # If no size specified, read volume size from CONF
+        if size is None:
+            size = CONF.volume.volume_size
         post_body = {'size': size}
         post_body.update(kwargs)
         post_body = json.dumps({'volume': post_body})
diff --git a/tempest/services/volume/v2/json/volumes_client.py b/tempest/services/volume/v2/json/volumes_client.py
index 5bfa75f..df20a2a 100644
--- a/tempest/services/volume/v2/json/volumes_client.py
+++ b/tempest/services/volume/v2/json/volumes_client.py
@@ -68,10 +68,10 @@
         body = json.loads(body)
         return resp, body['volume']
 
-    def create_volume(self, size, **kwargs):
+    def create_volume(self, size=None, **kwargs):
         """
         Creates a new Volume.
-        size(Required): Size of volume in GB.
+        size: Size of volume in GB.
         Following optional keyword arguments are accepted:
         name: Optional Volume Name.
         metadata: A dictionary of values to be used as metadata.
@@ -79,6 +79,10 @@
         snapshot_id: When specified the volume is created from this snapshot
         imageRef: When specified the volume is created from this image
         """
+        # for bug #1293885:
+        # If no size specified, read volume size from CONF
+        if size is None:
+            size = CONF.volume.volume_size
         post_body = {'size': size}
         post_body.update(kwargs)
         post_body = json.dumps({'volume': post_body})
diff --git a/tempest/services/volume/v2/xml/volumes_client.py b/tempest/services/volume/v2/xml/volumes_client.py
index e735a65..1fdaf19 100644
--- a/tempest/services/volume/v2/xml/volumes_client.py
+++ b/tempest/services/volume/v2/xml/volumes_client.py
@@ -117,10 +117,10 @@
         body = self._check_if_bootable(body)
         return resp, body
 
-    def create_volume(self, size, **kwargs):
+    def create_volume(self, size=None, **kwargs):
         """Creates a new Volume.
 
-        :param size: Size of volume in GB. (Required)
+        :param size: Size of volume in GB.
         :param name: Optional Volume Name.
         :param metadata: An optional dictionary of values for metadata.
         :param volume_type: Optional Name of volume_type for the volume
@@ -129,6 +129,10 @@
         :param imageRef: When specified the volume is created from this
                          image
         """
+        # for bug #1293885:
+        # If no size specified, read volume size from CONF
+        if size is None:
+            size = CONF.volume.volume_size
         # NOTE(afazekas): it should use a volume namespace
         volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
 
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 6866dad..65bc321 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -118,10 +118,10 @@
         body = self._check_if_bootable(body)
         return resp, body
 
-    def create_volume(self, size, **kwargs):
+    def create_volume(self, size=None, **kwargs):
         """Creates a new Volume.
 
-        :param size: Size of volume in GB. (Required)
+        :param size: Size of volume in GB.
         :param display_name: Optional Volume Name.
         :param metadata: An optional dictionary of values for metadata.
         :param volume_type: Optional Name of volume_type for the volume
@@ -130,6 +130,10 @@
         :param imageRef: When specified the volume is created from this
                          image
         """
+        # for bug #1293885:
+        # If no size specified, read volume size from CONF
+        if size is None:
+            size = CONF.volume.volume_size
         # NOTE(afazekas): it should use a volume namespace
         volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
 
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
new file mode 100644
index 0000000..1bc3b06
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -0,0 +1,232 @@
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common.utils import data_utils
+from tempest.common.utils.linux import remote_client
+from tempest import config
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+import re
+CONF = config.CONF
+
+
+class VolumeVerifyStress(stressaction.StressAction):
+
+    def _create_keypair(self):
+        keyname = data_utils.rand_name("key")
+        resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
+        assert(resp.status == 200)
+
+    def _delete_keypair(self):
+        resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
+        assert(resp.status == 202)
+
+    def _create_vm(self):
+        self.name = name = data_utils.rand_name("instance")
+        servers_client = self.manager.servers_client
+        self.logger.info("creating %s" % name)
+        vm_args = self.vm_extra_args.copy()
+        vm_args['security_groups'] = [self.sec_grp]
+        vm_args['key_name'] = self.key['name']
+        resp, server = servers_client.create_server(name, self.image,
+                                                    self.flavor,
+                                                    **vm_args)
+        self.server_id = server['id']
+        assert(resp.status == 202)
+        self.manager.servers_client.wait_for_server_status(self.server_id,
+                                                           'ACTIVE')
+
+    def _destroy_vm(self):
+        self.logger.info("deleting server: %s" % self.server_id)
+        resp, _ = self.manager.servers_client.delete_server(self.server_id)
+        assert(resp.status == 204)  # It cannot be 204 if I had to wait..
+        self.manager.servers_client.wait_for_server_termination(self.server_id)
+        self.logger.info("deleted server: %s" % self.server_id)
+
+    def _create_sec_group(self):
+        sec_grp_cli = self.manager.security_groups_client
+        s_name = data_utils.rand_name('sec_grp-')
+        s_description = data_utils.rand_name('desc-')
+        _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+                                                            s_description)
+        create_rule = sec_grp_cli.create_security_group_rule
+        create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+        create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+
+    def _destroy_sec_grp(self):
+        sec_grp_cli = self.manager.security_groups_client
+        sec_grp_cli.delete_security_group(self.sec_grp['id'])
+
+    def _create_floating_ip(self):
+        floating_cli = self.manager.floating_ips_client
+        _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+    def _destroy_floating_ip(self):
+        cli = self.manager.floating_ips_client
+        cli.delete_floating_ip(self.floating['id'])
+        cli.wait_for_resource_deletion(self.floating['id'])
+        self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+    def _create_volume(self):
+        name = data_utils.rand_name("volume")
+        self.logger.info("creating volume: %s" % name)
+        volumes_client = self.manager.volumes_client
+        resp, self.volume = volumes_client.create_volume(size=1,
+                                                         display_name=
+                                                         name)
+        assert(resp.status == 200)
+        volumes_client.wait_for_volume_status(self.volume['id'],
+                                              'available')
+        self.logger.info("created volume: %s" % self.volume['id'])
+
+    def _delete_volume(self):
+        self.logger.info("deleting volume: %s" % self.volume['id'])
+        volumes_client = self.manager.volumes_client
+        resp, _ = volumes_client.delete_volume(self.volume['id'])
+        assert(resp.status == 202)
+        volumes_client.wait_for_resource_deletion(self.volume['id'])
+        self.logger.info("deleted volume: %s" % self.volume['id'])
+
+    def _wait_disassociate(self):
+        cli = self.manager.floating_ips_client
+
+        def func():
+            _, floating = cli.get_floating_ip_details(self.floating['id'])
+            return floating['instance_id'] is None
+
+        if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
+                                            CONF.compute.build_interval):
+            raise RuntimeError("IP disassociate timeout!")
+
+    def new_server_ops(self):
+        self._create_vm()
+        cli = self.manager.floating_ips_client
+        cli.associate_floating_ip_to_server(self.floating['ip'],
+                                            self.server_id)
+        if self.ssh_test_before_attach and self.enable_ssh_verify:
+            self.logger.info("Scanning for block devices via ssh on %s"
+                             % self.server_id)
+            self.part_wait(self.detach_match_count)
+
+    def setUp(self, **kwargs):
+        """Note able configuration combinations:
+            Closest options to the test_stamp_pattern:
+                new_server = True
+                new_volume = True
+                enable_ssh_verify = True
+                ssh_test_before_attach = False
+            Just attaching:
+                new_server = False
+                new_volume = False
+                enable_ssh_verify = True
+                ssh_test_before_attach = True
+            Mostly API load by repeated attachment:
+                new_server = False
+                new_volume = False
+                enable_ssh_verify = False
+                ssh_test_before_attach = False
+            Minimal Nova load, but cinder load not decreased:
+                new_server = False
+                new_volume = True
+                enable_ssh_verify = True
+                ssh_test_before_attach = True
+        """
+        self.image = CONF.compute.image_ref
+        self.flavor = CONF.compute.flavor_ref
+        self.vm_extra_args = kwargs.get('vm_extra_args', {})
+        self.floating_pool = kwargs.get('floating_pool', None)
+        self.new_volume = kwargs.get('new_volume', True)
+        self.new_server = kwargs.get('new_server', False)
+        self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
+        self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
+                                                 False)
+        self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
+        self.detach_match_count = kwargs.get('detach_match_count', 1)
+        self.attach_match_count = kwargs.get('attach_match_count', 2)
+        self.part_name = kwargs.get('part_name', '/dev/vdc')
+
+        self._create_floating_ip()
+        self._create_sec_group()
+        self._create_keypair()
+        private_key = self.key['private_key']
+        username = CONF.compute.image_ssh_user
+        self.remote_client = remote_client.RemoteClient(self.floating['ip'],
+                                                        username,
+                                                        pkey=private_key)
+        if not self.new_volume:
+            self._create_volume()
+        if not self.new_server:
+            self.new_server_ops()
+
+    # now we just test is number of partition increased or decrised
+    def part_wait(self, num_match):
+        def _part_state():
+            self.partitions = self.remote_client.get_partitions().split('\n')
+            matching = 0
+            for part_line in self.partitions[1:]:
+                if self.part_line_re.match(part_line):
+                    matching += 1
+            return matching == num_match
+        if tempest.test.call_until_true(_part_state,
+                                        CONF.compute.build_timeout,
+                                        CONF.compute.build_interval):
+            return
+        else:
+            raise RuntimeError("Unexpected partitions: %s",
+                               str(self.partitions))
+
+    def run(self):
+        if self.new_server:
+            self.new_server_ops()
+        if self.new_volume:
+            self._create_volume()
+        servers_client = self.manager.servers_client
+        self.logger.info("attach volume (%s) to vm %s" %
+                        (self.volume['id'], self.server_id))
+        resp, body = servers_client.attach_volume(self.server_id,
+                                                  self.volume['id'],
+                                                  self.part_name)
+        assert(resp.status == 200)
+        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                           'in-use')
+        if self.enable_ssh_verify:
+            self.logger.info("Scanning for new block device on %s"
+                             % self.server_id)
+            self.part_wait(self.attach_match_count)
+
+        resp, body = servers_client.detach_volume(self.server_id,
+                                                  self.volume['id'])
+        assert(resp.status == 202)
+        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                           'available')
+        if self.enable_ssh_verify:
+            self.logger.info("Scanning for block device disapperance on %s"
+                             % self.server_id)
+            self.part_wait(self.detach_match_count)
+        if self.new_volume:
+            self._delete_volume()
+        if self.new_server:
+            self._destroy_vm()
+
+    def tearDown(self):
+        cli = self.manager.floating_ips_client
+        cli.disassociate_floating_ip_from_server(self.floating['ip'],
+                                                 self.server_id)
+        self._wait_disassociate()
+        if not self.new_server:
+            self._destroy_vm()
+        self._delete_keypair()
+        self._destroy_floating_ip()
+        self._destroy_sec_grp()
+        if not self.new_volume:
+            self._delete_volume()
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 9660081..25ae27f 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -80,11 +80,16 @@
     return ret
 
 
-def sigchld_handler(signal, frame):
+def sigchld_handler(signalnum, frame):
     """
     Signal handler (only active if stop_on_error is True).
     """
-    terminate_all_processes()
+    for process in processes:
+        if (not process['process'].is_alive() and
+                process['process'].exitcode != 0):
+            signal.signal(signalnum, signal.SIG_DFL)
+            terminate_all_processes()
+            break
 
 
 def terminate_all_processes(check_interval=20):
@@ -202,6 +207,8 @@
             had_errors = True
             break
 
+    if stop_on_error:
+        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
     terminate_all_processes()
 
     sum_fails = 0
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
new file mode 100644
index 0000000..731f5ed
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -0,0 +1,11 @@
+[{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
+  "threads": 1,
+  "use_admin": false,
+  "use_isolated_tenants": false,
+  "kwargs": {"vm_extra_args": {},
+             "new_volume": true,
+             "new_server": false,
+             "ssh_test_before_attach": false,
+             "enable_ssh_verify": true}
+}
+]
diff --git a/tempest/test.py b/tempest/test.py
index e4019f9..254fffa 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -75,12 +75,16 @@
             try:
                 f(cls)
             except Exception as se:
+                etype, value, trace = sys.exc_info()
                 LOG.exception("setUpClass failed: %s" % se)
                 try:
                     cls.tearDownClass()
                 except Exception as te:
                     LOG.exception("tearDownClass failed: %s" % te)
-                raise se
+                try:
+                    raise etype(value), None, trace
+                finally:
+                    del trace  # for avoiding circular refs
 
     return decorator
 
@@ -94,6 +98,7 @@
     service_list = {
         'compute': CONF.service_available.nova,
         'image': CONF.service_available.glance,
+        'baremetal': CONF.service_available.ironic,
         'volume': CONF.service_available.cinder,
         'orchestration': CONF.service_available.heat,
         # NOTE(mtreinish) nova-network will provide networking functionality
diff --git a/tempest/tests/common/test_debug.py b/tempest/tests/common/test_debug.py
index cd9936c..8a880f2 100644
--- a/tempest/tests/common/test_debug.py
+++ b/tempest/tests/common/test_debug.py
@@ -53,15 +53,14 @@
         self.useFixture(mockpatch.PatchObject(test.CONF.debug,
                                               'enable', True))
 
-        tables = ['filter', 'nat', 'mangle']
         self.ip_ns_list_mock.return_value = [1, 2]
 
         debug.log_ip_ns()
         self.ip_addr_raw_mock.assert_called_with()
         self.assertTrue(self.log_mock.info.called)
         self.ip_route_raw_mock.assert_called_with()
-        self.assertEqual(len(tables), self.iptables_raw_mock.call_count)
-        for table in tables:
+        self.assertEqual(len(debug.TABLES), self.iptables_raw_mock.call_count)
+        for table in debug.TABLES:
             self.assertIn(mock.call(table),
                           self.iptables_raw_mock.call_args_list)
 
@@ -76,10 +75,11 @@
             self.assertIn(mock.call(ns),
                           self.ip_ns_route_mock.call_args_list)
 
-        self.assertEqual(len(tables) * len(self.ip_ns_list_mock.return_value),
+        self.assertEqual(len(debug.TABLES) *
+                         len(self.ip_ns_list_mock.return_value),
                          self.iptables_ns_mock.call_count)
         for ns in self.ip_ns_list_mock.return_value:
-            for table in tables:
+            for table in debug.TABLES:
                 self.assertIn(mock.call(ns, table),
                               self.iptables_ns_mock.call_args_list)
 
diff --git a/tempest/tests/fake_auth_provider.py b/tempest/tests/fake_auth_provider.py
index bc68d26..ddffb4a 100644
--- a/tempest/tests/fake_auth_provider.py
+++ b/tempest/tests/fake_auth_provider.py
@@ -13,6 +13,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.tests import fake_credentials
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+    return fake_credentials.FakeCredentials()
+
 
 class FakeAuthProvider(object):
 
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4676cbd..4bed0c2 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -45,6 +45,16 @@
             os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
         self.conf.set_default('lock_path',
                               str(os.environ.get('OS_TEST_LOCK_PATH')))
+        self.conf.set_default('auth_version', 'v2', group='identity')
+        for config_option in ['username', 'password', 'tenant_name']:
+            # Identity group items
+            for prefix in ['', 'alt_', 'admin_']:
+                self.conf.set_default(prefix + config_option,
+                                      'fake_' + config_option,
+                                      group='identity')
+            # Compute Admin group items
+            self.conf.set_default(config_option, 'fake_' + config_option,
+                                  group='compute-admin')
 
 
 class FakePrivate(config.TempestConfigPrivate):
diff --git a/tempest/tests/fake_credentials.py b/tempest/tests/fake_credentials.py
new file mode 100644
index 0000000..a372973
--- /dev/null
+++ b/tempest/tests/fake_credentials.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import auth
+
+
+class FakeCredentials(auth.Credentials):
+
+    def is_valid(self):
+        return True
+
+
+class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
+
+    def __init__(self):
+        creds = dict(
+            username='fake_username',
+            password='fake_password',
+            tenant_name='fake_tenant_name'
+        )
+        super(FakeKeystoneV2Credentials, self).__init__(**creds)
diff --git a/tempest/tests/negative/test_generate_json.py b/tempest/tests/negative/test_generate_json.py
deleted file mode 100644
index e09fcdf..0000000
--- a/tempest/tests/negative/test_generate_json.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014 Deutsche Telekom AG
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.generator import negative_generator
-import tempest.test
-
-
-class TestNegativeGenerator(tempest.test.BaseTestCase):
-
-    fake_input_str = {"type": "string",
-                      "minLength": 2,
-                      "maxLength": 8,
-                      'results': {'gen_number': 404}}
-
-    fake_input_int = {"type": "integer",
-                      "maximum": 255,
-                      "minimum": 1}
-
-    fake_input_obj = {"type": "object",
-                      "properties": {"minRam": {"type": "integer"},
-                                     "diskName": {"type": "string"},
-                                     "maxRam": {"type": "integer", }
-                                     }
-                      }
-
-    def setUp(self):
-        super(TestNegativeGenerator, self).setUp()
-        self.negative = negative_generator.NegativeTestGenerator()
-
-    def _validate_result(self, data):
-        self.assertTrue(isinstance(data, list))
-        for t in data:
-            self.assertTrue(isinstance(t, tuple))
-
-    def test_generate_invalid_string(self):
-        result = self.negative.generate(self.fake_input_str)
-        self._validate_result(result)
-
-    def test_generate_invalid_integer(self):
-        result = self.negative.generate(self.fake_input_int)
-        self._validate_result(result)
-
-    def test_generate_invalid_obj(self):
-        result = self.negative.generate(self.fake_input_obj)
-        self._validate_result(result)
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index f2ed999..c77faca 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -16,7 +16,9 @@
 import jsonschema
 import mock
 
-import tempest.common.generator.base_generator as base_generator
+from tempest.common.generator import base_generator
+from tempest.common.generator import negative_generator
+from tempest.common.generator import valid_generator
 from tempest.tests import base
 
 
@@ -79,3 +81,73 @@
         self.assertRaises(jsonschema.SchemaError,
                           self.generator.validate_schema,
                           self.invalid_json_schema_desc)
+
+
+class BaseNegativeGenerator(object):
+    types = ['string', 'integer', 'object']
+
+    fake_input_str = {"type": "string",
+                      "minLength": 2,
+                      "maxLength": 8,
+                      'results': {'gen_int': 404}}
+
+    fake_input_int = {"type": "integer",
+                      "maximum": 255,
+                      "minimum": 1}
+
+    fake_input_obj = {"type": "object",
+                      "properties": {"minRam": {"type": "integer"},
+                                     "diskName": {"type": "string"},
+                                     "maxRam": {"type": "integer", }
+                                     }
+                      }
+
+    unkown_type_schema = {
+        "type": "not_defined"
+    }
+
+    def _validate_result(self, data):
+        self.assertTrue(isinstance(data, list))
+        for t in data:
+            self.assertIsInstance(t, tuple)
+            self.assertEqual(3, len(t))
+            self.assertIsInstance(t[0], str)
+
+    def test_generate_string(self):
+        result = self.generator.generate(self.fake_input_str)
+        self._validate_result(result)
+
+    def test_generate_integer(self):
+        result = self.generator.generate(self.fake_input_int)
+        self._validate_result(result)
+
+    def test_generate_obj(self):
+        result = self.generator.generate(self.fake_input_obj)
+        self._validate_result(result)
+
+    def test_generator_mandatory_functions(self):
+        for data_type in self.types:
+            self.assertIn(data_type, self.generator.types_dict)
+
+    def test_generate_with_unknown_type(self):
+        self.assertRaises(TypeError, self.generator.generate,
+                          self.unkown_type_schema)
+
+
+class TestNegativeValidGenerator(base.TestCase, BaseNegativeGenerator):
+    def setUp(self):
+        super(TestNegativeValidGenerator, self).setUp()
+        self.generator = valid_generator.ValidTestGenerator()
+
+    def test_generate_valid(self):
+        result = self.generator.generate_valid(self.fake_input_obj)
+        self.assertIn("minRam", result)
+        self.assertIsInstance(result["minRam"], int)
+        self.assertIn("diskName", result)
+        self.assertIsInstance(result["diskName"], str)
+
+
+class TestNegativeNegativeGenerator(base.TestCase, BaseNegativeGenerator):
+    def setUp(self):
+        super(TestNegativeNegativeGenerator, self).setUp()
+        self.generator = negative_generator.NegativeTestGenerator()
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 62c20e3..7b5b4d6 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -22,18 +22,16 @@
 from tempest import exceptions
 from tempest.openstack.common.fixture import mockpatch
 from tempest.tests import base
+from tempest.tests import fake_auth_provider
 from tempest.tests import fake_config
+from tempest.tests import fake_credentials
 from tempest.tests import fake_http
 from tempest.tests import fake_identity
 
 
 class BaseAuthTestsSetUp(base.TestCase):
     _auth_provider_class = None
-    credentials = {
-        'username': 'fake_user',
-        'password': 'fake_pwd',
-        'tenant_name': 'fake_tenant'
-    }
+    credentials = fake_credentials.FakeCredentials()
 
     def _auth(self, credentials, **params):
         """
@@ -47,6 +45,8 @@
         self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
         self.fake_http = fake_http.fake_httplib2(return_type=200)
         self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+        self.stubs.Set(auth, 'get_credentials',
+                       fake_auth_provider.get_credentials)
         self.auth_provider = self._auth(self.credentials)
 
 
@@ -58,12 +58,19 @@
     """
     _auth_provider_class = auth.AuthProvider
 
-    def test_check_credentials_is_dict(self):
-        self.assertTrue(self.auth_provider.check_credentials({}))
+    def test_check_credentials_class(self):
+        self.assertRaises(NotImplementedError,
+                          self.auth_provider.check_credentials,
+                          auth.Credentials())
 
     def test_check_credentials_bad_type(self):
         self.assertFalse(self.auth_provider.check_credentials([]))
 
+    def test_instantiate_with_dict(self):
+        # Dict credentials are only supported for backward compatibility
+        auth_provider = self._auth(credentials={})
+        self.assertIsInstance(auth_provider.credentials, auth.Credentials)
+
     def test_instantiate_with_bad_credentials_type(self):
         """
         Assure that credentials with bad type fail with TypeError
@@ -104,6 +111,7 @@
 class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
     _endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
     _auth_provider_class = auth.KeystoneV2AuthProvider
+    credentials = fake_credentials.FakeKeystoneV2Credentials()
 
     def setUp(self):
         super(TestKeystoneV2AuthProvider, self).setUp()
@@ -210,17 +218,6 @@
             del cred[attr]
             self.assertFalse(self.auth_provider.check_credentials(cred))
 
-    def test_check_credentials_not_scoped_missing_tenant_name(self):
-        cred = copy.copy(self.credentials)
-        del cred['tenant_name']
-        self.assertTrue(self.auth_provider.check_credentials(cred,
-                                                             scoped=False))
-
-    def test_check_credentials_missing_tenant_name(self):
-        cred = copy.copy(self.credentials)
-        del cred['tenant_name']
-        self.assertFalse(self.auth_provider.check_credentials(cred))
-
     def _test_base_url_helper(self, expected_url, filters,
                               auth_data=None):
 
diff --git a/tempest/tests/test_commands.py b/tempest/tests/test_commands.py
new file mode 100644
index 0000000..bdb9269
--- /dev/null
+++ b/tempest/tests/test_commands.py
@@ -0,0 +1,87 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+import subprocess
+
+from tempest.common import commands
+from tempest.tests import base
+
+
+class TestCommands(base.TestCase):
+
+    def setUp(self):
+        super(TestCommands, self).setUp()
+        self.subprocess_args = {'stdout': subprocess.PIPE,
+                                'stderr': subprocess.STDOUT}
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_addr_raw(self, mock):
+        expected = ['/usr/bin/sudo', '-n', 'ip', 'a']
+        commands.ip_addr_raw()
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_route_raw(self, mock):
+        expected = ['/usr/bin/sudo', '-n', 'ip', 'r']
+        commands.ip_route_raw()
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_ns_raw(self, mock):
+        expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
+        commands.ip_ns_raw()
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_iptables_raw(self, mock):
+        table = 'filter'
+        expected = ['/usr/bin/sudo', '-n', 'iptables', '-v', '-S', '-t',
+                    '%s' % table]
+        commands.iptables_raw(table)
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_ns_list(self, mock):
+        expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
+        commands.ip_ns_list()
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_ns_addr(self, mock):
+        ns_list = commands.ip_ns_list()
+        for ns in ns_list:
+            expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+                        'ip', 'a']
+            commands.ip_ns_addr(ns)
+            mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_ns_route(self, mock):
+        ns_list = commands.ip_ns_list()
+        for ns in ns_list:
+            expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+                        'ip', 'r']
+            commands.ip_ns_route(ns)
+            mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_iptables_ns(self, mock):
+        table = 'filter'
+        ns_list = commands.ip_ns_list()
+        for ns in ns_list:
+            expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+                        'iptables', '-v', '-S', '-t', table]
+            commands.iptables_ns(ns, table)
+            mock.assert_called_once_with(expected, **self.subprocess_args)
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
new file mode 100644
index 0000000..86600fa
--- /dev/null
+++ b/tempest/tests/test_credentials.py
@@ -0,0 +1,112 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import auth
+from tempest.common import http
+from tempest.common import tempest_fixtures as fixtures
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
+
+
+class CredentialsTests(base.TestCase):
+    attributes = {}
+    credentials_class = auth.Credentials
+
+    def _get_credentials(self, attributes=None):
+        if attributes is None:
+            attributes = self.attributes
+        return self.credentials_class(**attributes)
+
+    def setUp(self):
+        super(CredentialsTests, self).setUp()
+        self.fake_http = fake_http.fake_httplib2(return_type=200)
+        self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def test_create_invalid_attr(self):
+        self.assertRaises(exceptions.InvalidCredentials,
+                          self._get_credentials,
+                          attributes=dict(invalid='fake'))
+
+    def test_default(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        for ctype in self.credentials_class.TYPES:
+            self.assertRaises(NotImplementedError,
+                              self.credentials_class.get_default,
+                              credentials_type=ctype)
+
+    def test_invalid_default(self):
+        self.assertRaises(exceptions.InvalidCredentials,
+                          auth.Credentials.get_default,
+                          credentials_type='invalid_type')
+
+    def test_is_valid(self):
+        creds = self._get_credentials()
+        self.assertRaises(NotImplementedError, creds.is_valid)
+
+
+class KeystoneV2CredentialsTests(CredentialsTests):
+    attributes = {
+        'username': 'fake_username',
+        'password': 'fake_password',
+        'tenant_name': 'fake_tenant_name'
+    }
+
+    identity_response = fake_identity._fake_v2_response
+    credentials_class = auth.KeystoneV2Credentials
+
+    def setUp(self):
+        super(KeystoneV2CredentialsTests, self).setUp()
+        self.stubs.Set(http.ClosingHttp, 'request', self.identity_response)
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def _verify_credentials(self, credentials_class, creds_dict):
+        creds = auth.get_credentials(**creds_dict)
+        # Check the right version of credentials has been returned
+        self.assertIsInstance(creds, credentials_class)
+        # Check the id attributes are filled in
+        attributes = [x for x in creds.ATTRIBUTES if (
+            '_id' in x and x != 'domain_id')]
+        for attr in attributes:
+            self.assertIsNone(getattr(creds, attr))
+
+    def test_get_credentials(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        self._verify_credentials(self.credentials_class, self.attributes)
+
+    def test_is_valid(self):
+        creds = self._get_credentials()
+        self.assertTrue(creds.is_valid())
+
+    def test_is_not_valid(self):
+        creds = self._get_credentials()
+        for attr in self.attributes.keys():
+            delattr(creds, attr)
+            self.assertFalse(creds.is_valid(),
+                             "Credentials should be invalid without %s" % attr)
+
+    def test_default(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        for ctype in self.credentials_class.TYPES:
+            creds = self.credentials_class.get_default(credentials_type=ctype)
+            for attr in self.attributes.keys():
+                # Default configuration values related to credentials
+                # are defined as fake_* in fake_config.py
+                self.assertEqual(getattr(creds, attr), 'fake_' + attr)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index ebf0ca0..804204a 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 
+import mock
 import testtools
 
 from oslo.config import cfg
@@ -232,3 +233,19 @@
                           self._test_requires_ext_helper,
                           extension='enabled_ext',
                           service='bad_service')
+
+
+class TestSimpleNegativeDecorator(BaseDecoratorsTest):
+    @test.SimpleNegativeAutoTest
+    class FakeNegativeJSONTest(test.NegativeAutoTest):
+        _schema_file = 'fake/schemas/file.json'
+
+    def test_testfunc_exist(self):
+        self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
+
+    @mock.patch('tempest.test.NegativeAutoTest.execute')
+    def test_testfunc_calls_execute(self, mock):
+        obj = self.FakeNegativeJSONTest("test_fake_negative")
+        self.assertIn("test_fake_negative", dir(obj))
+        obj.test_fake_negative()
+        mock.assert_called_once_with(self.FakeNegativeJSONTest._schema_file)
diff --git a/tempest/tests/test_rest_client.py b/tempest/tests/test_rest_client.py
index cfbb37d..64ad3bc 100644
--- a/tempest/tests/test_rest_client.py
+++ b/tempest/tests/test_rest_client.py
@@ -139,6 +139,102 @@
         self._verify_headers(resp)
 
 
+class TestRestClientUpdateHeaders(BaseRestClientTestClass):
+    def setUp(self):
+        self.fake_http = fake_http.fake_httplib2()
+        super(TestRestClientUpdateHeaders, self).setUp()
+        self.useFixture(mockpatch.PatchObject(self.rest_client,
+                                              '_error_checker'))
+        self.headers = {'X-Configuration-Session': 'session_id'}
+
+    def test_post_update_headers(self):
+        __, return_dict = self.rest_client.post(self.url, {},
+                                                extra_headers=True,
+                                                headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_get_update_headers(self):
+        __, return_dict = self.rest_client.get(self.url,
+                                               extra_headers=True,
+                                               headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_delete_update_headers(self):
+        __, return_dict = self.rest_client.delete(self.url,
+                                                  extra_headers=True,
+                                                  headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_patch_update_headers(self):
+        __, return_dict = self.rest_client.patch(self.url, {},
+                                                 extra_headers=True,
+                                                 headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_put_update_headers(self):
+        __, return_dict = self.rest_client.put(self.url, {},
+                                               extra_headers=True,
+                                               headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_head_update_headers(self):
+        self.useFixture(mockpatch.PatchObject(self.rest_client,
+                                              'response_checker'))
+
+        __, return_dict = self.rest_client.head(self.url,
+                                                extra_headers=True,
+                                                headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_copy_update_headers(self):
+        __, return_dict = self.rest_client.copy(self.url,
+                                                extra_headers=True,
+                                                headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+
 class TestRestClientHeadersXML(TestRestClientHeadersJSON):
     TYPE = "xml"
 
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index e8610d3..33b8d6e 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -218,10 +218,8 @@
         else:
             self.assertNotEqual(instance.state, "running")
 
-    # NOTE(afazekas): doctored test case,
-    # with normal validation it would fail
     @test.attr(type='smoke')
-    def test_integration_1(self):
+    def test_compute_with_volumes(self):
         # EC2 1. integration test (not strict)
         image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
         sec_group_name = data_utils.rand_name("securitygroup-")
@@ -249,14 +247,20 @@
                                     instance_type=self.instance_type,
                                     key_name=self.keypair_name,
                                     security_groups=(sec_group_name,))
+
+        LOG.debug("Instance booted - state: %s",
+                  reservation.instances[0].state)
+
         self.addResourceCleanUp(self.destroy_reservation,
                                 reservation)
         volume = self.ec2_client.create_volume(1, self.zone)
+        LOG.debug("Volume created - status: %s", volume.status)
+
         self.addResourceCleanUp(self.destroy_volume_wait, volume)
         instance = reservation.instances[0]
-        LOG.info("state: %s", instance.state)
         if instance.state != "running":
             self.assertInstanceStateWait(instance, "running")
+        LOG.debug("Instance now running - state: %s", instance.state)
 
         address = self.ec2_client.allocate_address()
         rcuk_a = self.addResourceCleanUp(address.delete)
@@ -284,10 +288,21 @@
         volume.attach(instance.id, "/dev/vdh")
 
         def _volume_state():
+            """Return volume state realizing that 'in-use' is overloaded."""
             volume.update(validate=True)
-            return volume.status
+            status = volume.status
+            attached = volume.attach_data.status
+            LOG.debug("Volume %s is in status: %s, attach_status: %s",
+                      volume.id, status, attached)
+            # Nova reports 'in-use' on 'attaching' volumes because we
+            # have a single volume status, and EC2 has 2. Ensure that
+            # if we aren't attached yet we return something other than
+            # 'in-use'
+            if status == 'in-use' and attached != 'attached':
+                return 'attaching'
+            else:
+                return status
 
-        self.assertVolumeStatusWait(_volume_state, "in-use")
         wait.re_search_wait(_volume_state, "in-use")
 
         # NOTE(afazekas):  Different Hypervisor backends names
@@ -296,6 +311,7 @@
 
         def _part_state():
             current = ssh.get_partitions().split('\n')
+            LOG.debug("Partition map for instance: %s", current)
             if current > part_lines:
                 return 'INCREASE'
             if current < part_lines:
@@ -311,7 +327,6 @@
 
         self.assertVolumeStatusWait(_volume_state, "available")
         wait.re_search_wait(_volume_state, "available")
-        LOG.info("Volume %s state: %s", volume.id, volume.status)
 
         wait.state_wait(_part_state, 'DECREASE')
 
@@ -323,7 +338,7 @@
         self.assertAddressReleasedWait(address)
         self.cancelResourceCleanUp(rcuk_a)
 
-        LOG.info("state: %s", instance.state)
+        LOG.debug("Instance %s state: %s", instance.id, instance.state)
         if instance.state != "stopped":
             self.assertInstanceStateWait(instance, "stopped")
         # TODO(afazekas): move steps from teardown to the test case
diff --git a/tools/check_logs.py b/tools/check_logs.py
index b5b1780..bc4eaca 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -46,7 +46,6 @@
     'n-api',
     'n-cpu',
     'n-net',
-    'n-sch',
     'q-agt',
     'q-dhcp',
     'q-lbaas',
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
deleted file mode 100755
index aa92c0b..0000000
--- a/tools/verify_tempest_config.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import json
-import sys
-
-import httplib2
-
-from tempest import clients
-from tempest import config
-
-
-CONF = config.CONF
-RAW_HTTP = httplib2.Http()
-
-
-def verify_glance_api_versions(os):
-    # Check glance api versions
-    __, versions = os.image_client.get_versions()
-    if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
-                                             versions):
-        print('Config option image api_v1 should be change to: %s' % (
-            not CONF.image_feature_enabled.api_v1))
-    if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
-        print('Config option image api_v2 should be change to: %s' % (
-            not CONF.image_feature_enabled.api_v2))
-
-
-def verify_nova_api_versions(os):
-    # Check nova api versions - only get base URL without PATH
-    os.servers_client.skip_path = True
-    # The nova base endpoint url includes the version but to get the versions
-    # list the unversioned endpoint is needed
-    v2_endpoint = os.servers_client.base_url
-    v2_endpoint_parts = v2_endpoint.split('/')
-    endpoint = v2_endpoint_parts[0] + '//' + v2_endpoint_parts[2]
-    __, body = RAW_HTTP.request(endpoint, 'GET')
-    body = json.loads(body)
-    # Restore full base_url
-    os.servers_client.skip_path = False
-    versions = map(lambda x: x['id'], body['versions'])
-    if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
-        print('Config option compute api_v3 should be change to: %s' % (
-              not CONF.compute_feature_enabled.api_v3))
-
-
-def get_extension_client(os, service):
-    extensions_client = {
-        'nova': os.extensions_client,
-        'nova_v3': os.extensions_v3_client,
-        'cinder': os.volumes_extension_client,
-        'neutron': os.network_client,
-        'swift': os.account_client,
-    }
-    if service not in extensions_client:
-        print('No tempest extensions client for %s' % service)
-        exit(1)
-    return extensions_client[service]
-
-
-def get_enabled_extensions(service):
-    extensions_options = {
-        'nova': CONF.compute_feature_enabled.api_extensions,
-        'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
-        'cinder': CONF.volume_feature_enabled.api_extensions,
-        'neutron': CONF.network_feature_enabled.api_extensions,
-        'swift': CONF.object_storage_feature_enabled.discoverable_apis,
-    }
-    if service not in extensions_options:
-        print('No supported extensions list option for %s' % service)
-        exit(1)
-    return extensions_options[service]
-
-
-def verify_extensions(os, service, results):
-    extensions_client = get_extension_client(os, service)
-    __, resp = extensions_client.list_extensions()
-    if isinstance(resp, dict):
-        # Neutron's extension 'name' field has is not a single word (it has
-        # spaces in the string) Since that can't be used for list option the
-        # api_extension option in the network-feature-enabled group uses alias
-        # instead of name.
-        if service == 'neutron':
-            extensions = map(lambda x: x['alias'], resp['extensions'])
-        elif service == 'swift':
-            # Remove Swift general information from extensions list
-            resp.pop('swift')
-            extensions = resp.keys()
-        else:
-            extensions = map(lambda x: x['name'], resp['extensions'])
-
-    else:
-        extensions = map(lambda x: x['name'], resp)
-    if not results.get(service):
-        results[service] = {}
-    extensions_opt = get_enabled_extensions(service)
-    if extensions_opt[0] == 'all':
-        results[service]['extensions'] = 'all'
-        return results
-    # Verify that all configured extensions are actually enabled
-    for extension in extensions_opt:
-        results[service][extension] = extension in extensions
-    # Verify that there aren't additional extensions enabled that aren't
-    # specified in the config list
-    for extension in extensions:
-        if extension not in extensions_opt:
-            results[service][extension] = False
-    return results
-
-
-def display_results(results):
-    for service in results:
-        # If all extensions are specified as being enabled there is no way to
-        # verify this so we just assume this to be true
-        if results[service].get('extensions'):
-            continue
-        extension_list = get_enabled_extensions(service)
-        for extension in results[service]:
-            if not results[service][extension]:
-                if extension in extension_list:
-                    print("%s extension: %s should not be included in the list"
-                          " of enabled extensions" % (service, extension))
-                else:
-                    print("%s extension: %s should be included in the list of "
-                          "enabled extensions" % (service, extension))
-
-
-def check_service_availability(os):
-    services = []
-    avail_services = []
-    codename_match = {
-        'volume': 'cinder',
-        'network': 'neutron',
-        'image': 'glance',
-        'object_storage': 'swift',
-        'compute': 'nova',
-        'orchestration': 'heat',
-        'metering': 'ceilometer',
-        'telemetry': 'ceilometer',
-        'data_processing': 'savanna',
-        'baremetal': 'ironic',
-        'identity': 'keystone'
-
-    }
-    # Get catalog list for endpoints to use for validation
-    __, endpoints = os.endpoints_client.list_endpoints()
-    for endpoint in endpoints:
-        __, service = os.service_client.get_service(endpoint['service_id'])
-        services.append(service['type'])
-    # Pull all catalog types from config file and compare against endpoint list
-    for cfgname in dir(CONF._config):
-        cfg = getattr(CONF, cfgname)
-        catalog_type = getattr(cfg, 'catalog_type', None)
-        if not catalog_type:
-            continue
-        else:
-            if cfgname == 'identity':
-                # Keystone is a required service for tempest
-                continue
-            if catalog_type not in services:
-                if getattr(CONF.service_available, codename_match[cfgname]):
-                    print('Endpoint type %s not found either disable service '
-                          '%s or fix the catalog_type in the config file' % (
-                          catalog_type, codename_match[cfgname]))
-            else:
-                if not getattr(CONF.service_available,
-                               codename_match[cfgname]):
-                    print('Endpoint type %s is available, service %s should be'
-                          ' set as available in the config file.' % (
-                          catalog_type, codename_match[cfgname]))
-                else:
-                    avail_services.append(codename_match[cfgname])
-    return avail_services
-
-
-def main(argv):
-    print('Running config verification...')
-    os = clients.ComputeAdminManager(interface='json')
-    services = check_service_availability(os)
-    results = {}
-    for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
-        if service == 'nova_v3' and 'nova' not in services:
-            continue
-        elif service not in services:
-            continue
-        results = verify_extensions(os, service, results)
-    verify_glance_api_versions(os)
-    verify_nova_api_versions(os)
-    display_results(results)
-
-
-if __name__ == "__main__":
-    main(sys.argv)
diff --git a/tox.ini b/tox.ini
index 4a625f8..5e8d283 100644
--- a/tox.ini
+++ b/tox.ini
@@ -54,7 +54,7 @@
 setenv = OS_TEST_TIMEOUT=1200
 # The regex below is used to select heat api/scenario tests tagged as slow.
 commands =
-  bash tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
+  bash tools/pretty_tox.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
 
 [testenv:large-ops]
 sitepackages = True