Merge "Check non json type on glance client json_request method"
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..c9b6467
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,4 @@
+[run]
+branch = True
+source = tempest
+omit = tempest/tests/*,tempest/openstack/*
diff --git a/.gitignore b/.gitignore
index 28a9b9c..1777cb9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,4 +16,5 @@
 build
 .testrepository
 .coverage*
+!.coveragerc
 cover/
diff --git a/HACKING.rst b/HACKING.rst
index c0df0fb..8652971 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -120,13 +120,14 @@
  - A json schema: defines properties for a request.
 
 After that a test class must be added to automatically generate test scenarios
-out of the given interface description:
+out of the given interface description::
+
+    load_tests = test.NegativeAutoTest.load_tests
 
     class SampeTestNegativeTestJSON(<your base class>, test.NegativeAutoTest):
         _interface = 'json'
         _service = 'compute'
-        _schema_file = 'compute/servers/get_console_output.json'
-        scenarios = test.NegativeAutoTest.generate_scenario(_schema_file)
+        _schema_file = <your Schema file>
 
 Negative tests must be marked with a negative attribute::
 
diff --git a/README.rst b/README.rst
index 9daf873..4393ae9 100644
--- a/README.rst
+++ b/README.rst
@@ -7,7 +7,7 @@
 deployment.
 
 Design Principles
-----------
+-----------------
 Tempest Design Principles that we strive to live by.
 
 - Tempest should be able to run against any OpenStack cloud, be it a
@@ -127,6 +127,6 @@
 of tempest when running with Python 2.6. Additionally, to enable testr to work
 with tempest using python 2.6 the discover module from the unittest-ext
 project has to be patched to switch the unittest.TestSuite to use
-unittest2.TestSuite instead. See::
+unittest2.TestSuite instead. See:
 
 https://code.google.com/p/unittest-ext/issues/detail?id=79
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 1c32b9c..c45273e 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -33,14 +33,6 @@
    field_guide/thirdparty
    field_guide/unit_tests
 
-------------------
-API and test cases
-------------------
-.. toctree::
-   :maxdepth: 1
-
-   api/modules
-
 ==================
 Indices and tables
 ==================
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 761a077..ef5e217 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -101,14 +101,32 @@
 # Options defined in tempest.config
 #
 
-# Catalog type of the baremetal provisioning service. (string
+# Catalog type of the baremetal provisioning service (string
 # value)
 #catalog_type=baremetal
 
+# Whether the Ironic nova-compute driver is enabled (boolean
+# value)
+#driver_enabled=false
+
 # The endpoint type to use for the baremetal provisioning
-# service. (string value)
+# service (string value)
 #endpoint_type=publicURL
 
+# Timeout for Ironic node to completely provision (integer
+# value)
+#active_timeout=300
+
+# Timeout for association of Nova instance and Ironic node
+# (integer value)
+#association_timeout=10
+
+# Timeout for Ironic power transitions. (integer value)
+#power_timeout=20
+
+# Timeout for unprovisioning an Ironic node. (integer value)
+#unprovision_timeout=20
+
 
 [boto]
 
@@ -193,13 +211,14 @@
 # admin credentials are known. (boolean value)
 #allow_tenant_isolation=false
 
-# Valid secondary image reference to be used in tests. (string
-# value)
-#image_ref={$IMAGE_ID}
+# Valid primary image reference to be used in tests. This is a
+# required option (string value)
+#image_ref=<None>
 
-# Valid secondary image reference to be used in tests. (string
-# value)
-#image_ref_alt={$IMAGE_ID_ALT}
+# Valid secondary image reference to be used in tests. This is
+# a required option, but if only one image is available
+# duplicate the value of image_ref above (string value)
+#image_ref_alt=<None>
 
 # Valid primary flavor to use in tests. (string value)
 #flavor_ref=1
@@ -223,7 +242,7 @@
 #image_alt_ssh_password=password
 
 # Time in seconds between build status checks. (integer value)
-#build_interval=10
+#build_interval=1
 
 # Timeout in seconds to wait for an instance to build.
 # (integer value)
@@ -232,6 +251,19 @@
 # Should the tests ssh to instances? (boolean value)
 #run_ssh=false
 
+# Auth method used for authenticate to the instance. Valid
+# choices are: keypair, configured, adminpass. keypair: start
+# the servers with an ssh keypair. configured: use the
+# configured user and password. adminpass: use the injected
+# adminPass. disabled: avoid using ssh when it is an option.
+# (string value)
+#ssh_auth_method=keypair
+
+# How to connect to the instance? fixed: using the first ip
+# belongs the fixed network floating: creating and using a
+# floating ip (string value)
+#ssh_connect_method=fixed
+
 # User name used to authenticate to an instance. (string
 # value)
 #ssh_user=root
@@ -261,7 +293,7 @@
 # IP version used for SSH connections. (integer value)
 #ip_version_for_ssh=4
 
-# Dose the SSH uses Floating IP? (boolean value)
+# Does SSH use Floating IPs? (boolean value)
 #use_floatingip_for_ssh=true
 
 # Catalog type of the Compute service. (string value)
@@ -301,6 +333,9 @@
 # admin credentials are known. (boolean value)
 #allow_tenant_isolation=false
 
+# Time in seconds between build status checks. (integer value)
+#build_interval=1
+
 
 [compute-admin]
 
@@ -319,6 +354,10 @@
 # API key to use when authenticating as admin. (string value)
 #password=<None>
 
+# Domain name for authentication as admin (Keystone V3).The
+# same domain applies to user and project (string value)
+#domain_name=<None>
+
 
 [compute-feature-enabled]
 
@@ -333,11 +372,13 @@
 #disk_config=true
 
 # A list of enabled compute extensions with a special entry
-# all which indicates every extension is enabled (list value)
+# all which indicates every extension is enabled. Each
+# extension should be specified with alias name (list value)
 #api_extensions=all
 
 # A list of enabled v3 extensions with a special entry all
-# which indicates every extension is enabled (list value)
+# which indicates every extension is enabled. Each extension
+# should be specified with alias name (list value)
 #api_v3_extensions=all
 
 # Does the test environment support changing the admin
@@ -370,6 +411,14 @@
 # as [nova.vnc]->vnc_enabled in nova.conf (boolean value)
 #vnc_console=false
 
+# Enable Spice console. This configuration value should be
+# same as [nova.spice]->enabled in nova.conf (boolean value)
+#spice_console=false
+
+# Enable RDP console. This configuration value should be same
+# as [nova.rdp]->enabled in nova.conf (boolean value)
+#rdp_console=false
+
 
 [dashboard]
 
@@ -411,6 +460,10 @@
 # value)
 #db_flavor_ref=1
 
+# Current database version to use in database tests. (string
+# value)
+#db_current_version=v1.0
+
 
 [debug]
 
@@ -421,6 +474,22 @@
 # Enable diagnostic commands (boolean value)
 #enable=true
 
+# A regex to determine which requests should be traced.  This
+# is a regex to match the caller for rest client requests to
+# be able to selectively trace calls out of specific classes
+# and methods. It largely exists for test development, and is
+# not expected to be used in a real deploy of tempest. This
+# will be matched against the discovered ClassName:method in
+# the test environment.  Expected values for this field are:
+# * ClassName:test_method_name - traces one test_method  *
+# ClassName:setUp(Class) - traces specific setup functions  *
+# ClassName:tearDown(Class) - traces specific teardown
+# functions  * ClassName:_run_cleanups - traces the cleanup
+# functions  If nothing is specified, this feature is not
+# enabled. To trace everything specify .* as the regex.
+# (string value)
+#trace_requests=
+
 
 [identity]
 
@@ -469,6 +538,10 @@
 # API key to use when authenticating. (string value)
 #password=<None>
 
+# Domain name for authentication (Keystone V3).The same domain
+# applies to user and project (string value)
+#domain_name=<None>
+
 # Username of alternate user to use for Nova API requests.
 # (string value)
 #alt_username=<None>
@@ -481,6 +554,10 @@
 # (string value)
 #alt_password=<None>
 
+# Alternate domain name for authentication (Keystone V3).The
+# same domain applies to user and project (string value)
+#alt_domain_name=<None>
+
 # Administrative Username to use for Keystone API requests.
 # (string value)
 #admin_username=<None>
@@ -492,6 +569,10 @@
 # API key to use when authenticating as admin. (string value)
 #admin_password=<None>
 
+# Admin domain name for authentication (Keystone V3).The same
+# domain applies to user and project (string value)
+#admin_domain_name=<None>
+
 
 [identity-feature-enabled]
 
@@ -630,7 +711,11 @@
 
 # Time in seconds between network operation status checks.
 # (integer value)
-#build_interval=10
+#build_interval=1
+
+# List of dns servers whichs hould be used for subnet creation
+# (list value)
+#dns_servers=8.8.8.8,8.8.4.4
 
 
 [network-feature-enabled]
@@ -713,12 +798,9 @@
 # (string value)
 #endpoint_type=publicURL
 
-# Time in seconds between build status checks. (integer value)
-#build_interval=1
-
 # Timeout in seconds to wait for a stack to build. (integer
 # value)
-#build_timeout=600
+#build_timeout=1200
 
 # Instance type for tests. Needs to be big enough for a full
 # OS plus the test workload (string value)
@@ -736,6 +818,10 @@
 # (integer value)
 #max_template_size=524288
 
+# Value must match heat configuration of the same name.
+# (integer value)
+#max_resources_per_stack=1000
+
 
 [queuing]
 
@@ -746,6 +832,10 @@
 # Catalog type of the Queuing service. (string value)
 #catalog_type=queuing
 
+# The maximum number of queue records per page when listing
+# queues (integer value)
+#max_queues_per_page=20
+
 
 [scenario]
 
@@ -892,6 +982,10 @@
 # value)
 #endpoint_type=publicURL
 
+# This variable is used as flag to enable notification tests
+# (boolean value)
+#too_slow_to_test=true
+
 
 [volume]
 
@@ -901,7 +995,7 @@
 
 # Time in seconds between volume availability checks. (integer
 # value)
-#build_interval=10
+#build_interval=1
 
 # Timeout in seconds to wait for a volume to becomeavailable.
 # (integer value)
@@ -940,6 +1034,10 @@
 # value)
 #disk_format=raw
 
+# Default size in GB for volumes created by volumes tests
+# (integer value)
+#volume_size=1
+
 
 [volume-feature-enabled]
 
@@ -954,6 +1052,9 @@
 # Runs Cinder volumes backup test (boolean value)
 #backup=true
 
+# Runs Cinder volume snapshot test (boolean value)
+#snapshot=true
+
 # A list of enabled volume extensions with a special entry all
 # which indicates every extension is enabled (list value)
 #api_extensions=all
diff --git a/requirements.txt b/requirements.txt
index a18b092..f907e7d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,25 +1,25 @@
-pbr>=0.6,<1.0
+pbr>=0.6,!=0.7,<1.0
 anyjson>=0.3.3
 httplib2>=0.7.5
 jsonschema>=2.0.0,<3.0.0
 testtools>=0.9.34
 lxml>=2.3
 boto>=2.12.0,!=2.13.0
-paramiko>=1.9.0
+paramiko>=1.13.0
 netaddr>=0.7.6
 python-glanceclient>=0.9.0
-python-keystoneclient>=0.6.0
+python-keystoneclient>=0.8.0
 python-novaclient>=2.17.0
 python-neutronclient>=2.3.4,<3
 python-cinderclient>=1.0.6
-python-heatclient>=0.2.3
+python-heatclient>=0.2.9
+python-ironicclient
 python-saharaclient>=0.6.0
-python-swiftclient>=1.6
+python-swiftclient>=2.0.2
 testresources>=0.2.4
-keyring>=1.6.1,<2.0,>=2.1
 testrepository>=0.0.18
 oslo.config>=1.2.0
-six>=1.5.2
+six>=1.6.0
 iso8601>=0.1.9
 fixtures>=0.3.14
 testscenarios>=0.4
diff --git a/setup.cfg b/setup.cfg
index a701572..5c62710 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
 [metadata]
 name = tempest
-version = 2014.1
+version = 2
 summary = OpenStack Integration Testing
 description-file =
     README.rst
@@ -17,6 +17,12 @@
     Programming Language :: Python :: 2
     Programming Language :: Python :: 2.7
 
+[entry_points]
+console_scripts =
+    verify-tempest-config = tempest.cmd.verify_tempest_config:main
+    javelin2 = tempest.cmd.javelin:main
+    run-tempest-stress = tempest.cmd.run_stress:main
+
 [build_sphinx]
 all_files = 1
 build-dir = doc/build
diff --git a/setup.py b/setup.py
index 70c2b3f..7363757 100755
--- a/setup.py
+++ b/setup.py
@@ -17,6 +17,14 @@
 # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
 import setuptools
 
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
+
 setuptools.setup(
     setup_requires=['pbr'],
     pbr=True)
diff --git a/tempest/api/baremetal/base.py b/tempest/api/baremetal/base.py
index 2e745f8..6f7e438 100644
--- a/tempest/api/baremetal/base.py
+++ b/tempest/api/baremetal/base.py
@@ -27,13 +27,12 @@
     def decorator(f):
         @functools.wraps(f)
         def wrapper(cls, *args, **kwargs):
-            result = f(cls, *args, **kwargs)
-            body = result[resource]
+            resp, body = f(cls, *args, **kwargs)
 
             if 'uuid' in body:
                 cls.created_objects[resource].add(body['uuid'])
 
-            return result
+            return resp, body
         return wrapper
     return decorator
 
@@ -51,7 +50,7 @@
 
         mgr = clients.AdminManager()
         cls.client = mgr.baremetal_client
-
+        cls.power_timeout = CONF.baremetal.power_timeout
         cls.created_objects = {'chassis': set(),
                                'port': set(),
                                'node': set()}
@@ -81,8 +80,7 @@
         """
         description = description or data_utils.rand_name('test-chassis-')
         resp, body = cls.client.create_chassis(description=description)
-
-        return {'chassis': body, 'response': resp}
+        return resp, body
 
     @classmethod
     @creates('node')
@@ -102,23 +100,26 @@
                                             cpu_num=cpu_num, storage=storage,
                                             memory=memory, driver=driver)
 
-        return {'node': body, 'response': resp}
+        return resp, body
 
     @classmethod
     @creates('port')
-    def create_port(cls, node_id, address=None):
+    def create_port(cls, node_id, address, extra=None, uuid=None):
         """
         Wrapper utility for creating test ports.
 
-        :param address: MAC address of the port. If not supplied, a random
-            value will be generated.
+        :param address: MAC address of the port.
+        :param extra: Meta data of the port. If not supplied, an empty
+            dictionary will be created.
+        :param uuid: UUID of the port.
         :return: Created port.
 
         """
-        address = address or data_utils.rand_mac_address()
-        resp, body = cls.client.create_port(address=address, node_id=node_id)
+        extra = extra or {}
+        resp, body = cls.client.create_port(address=address, node_id=node_id,
+                                            extra=extra, uuid=uuid)
 
-        return {'port': body, 'response': resp}
+        return resp, body
 
     @classmethod
     def delete_chassis(cls, chassis_id):
@@ -170,3 +171,12 @@
             cls.created_objects['port'].remove(port_id)
 
         return resp
+
+    def validate_self_link(self, resource, uuid, link):
+        """Check whether the given self link formatted correctly."""
+        expected_link = "{base}/{pref}/{res}/{uuid}".format(
+                        base=self.client.base_url,
+                        pref=self.client.uri_prefix,
+                        res=resource,
+                        uuid=uuid)
+        self.assertEqual(expected_link, link)
diff --git a/tempest/api/baremetal/test_api_discovery.py b/tempest/api/baremetal/test_api_discovery.py
index e594b3e..bee10b9 100644
--- a/tempest/api/baremetal/test_api_discovery.py
+++ b/tempest/api/baremetal/test_api_discovery.py
@@ -20,6 +20,7 @@
     @test.attr(type='smoke')
     def test_api_versions(self):
         resp, descr = self.client.get_api_description()
+        self.assertEqual('200', resp['status'])
         expected_versions = ('v1',)
 
         versions = [version['id'] for version in descr['versions']]
@@ -30,6 +31,7 @@
     @test.attr(type='smoke')
     def test_default_version(self):
         resp, descr = self.client.get_api_description()
+        self.assertEqual('200', resp['status'])
         default_version = descr['default_version']
 
         self.assertEqual(default_version['id'], 'v1')
@@ -37,6 +39,7 @@
     @test.attr(type='smoke')
     def test_version_1_resources(self):
         resp, descr = self.client.get_version_description(version='v1')
+        self.assertEqual('200', resp['status'])
         expected_resources = ('nodes', 'chassis',
                               'ports', 'links', 'media_types')
 
diff --git a/tempest/api/baremetal/test_chassis.py b/tempest/api/baremetal/test_chassis.py
index 7af1336..4ab86c2 100644
--- a/tempest/api/baremetal/test_chassis.py
+++ b/tempest/api/baremetal/test_chassis.py
@@ -20,57 +20,64 @@
 class TestChassis(base.BaseBaremetalTest):
     """Tests for chassis."""
 
+    @classmethod
+    def setUpClass(cls):
+        super(TestChassis, cls).setUpClass()
+        _, cls.chassis = cls.create_chassis()
+
+    def _assertExpected(self, expected, actual):
+        # Check if not expected keys/values exists in actual response body
+        for key, value in expected.iteritems():
+            if key not in ('created_at', 'updated_at'):
+                self.assertIn(key, actual)
+                self.assertEqual(value, actual[key])
+
     @test.attr(type='smoke')
     def test_create_chassis(self):
         descr = data_utils.rand_name('test-chassis-')
-        ch = self.create_chassis(description=descr)['chassis']
-
-        self.assertEqual(ch['description'], descr)
+        resp, chassis = self.create_chassis(description=descr)
+        self.assertEqual('201', resp['status'])
+        self.assertEqual(chassis['description'], descr)
 
     @test.attr(type='smoke')
     def test_create_chassis_unicode_description(self):
         # Use a unicode string for testing:
         # 'We ♡ OpenStack in Ukraine'
         descr = u'В Україні ♡ OpenStack!'
-        ch = self.create_chassis(description=descr)['chassis']
-
-        self.assertEqual(ch['description'], descr)
+        resp, chassis = self.create_chassis(description=descr)
+        self.assertEqual('201', resp['status'])
+        self.assertEqual(chassis['description'], descr)
 
     @test.attr(type='smoke')
     def test_show_chassis(self):
-        descr = data_utils.rand_name('test-chassis-')
-        uuid = self.create_chassis(description=descr)['chassis']['uuid']
-
-        resp, chassis = self.client.show_chassis(uuid)
-
-        self.assertEqual(chassis['uuid'], uuid)
-        self.assertEqual(chassis['description'], descr)
+        resp, chassis = self.client.show_chassis(self.chassis['uuid'])
+        self.assertEqual('200', resp['status'])
+        self._assertExpected(self.chassis, chassis)
 
     @test.attr(type="smoke")
     def test_list_chassis(self):
-        created_ids = [self.create_chassis()['chassis']['uuid']
-                       for i in range(0, 5)]
-
         resp, body = self.client.list_chassis()
-        loaded_ids = [ch['uuid'] for ch in body['chassis']]
-
-        for i in created_ids:
-            self.assertIn(i, loaded_ids)
+        self.assertEqual('200', resp['status'])
+        self.assertIn(self.chassis['uuid'],
+                      [i['uuid'] for i in body['chassis']])
 
     @test.attr(type='smoke')
     def test_delete_chassis(self):
-        uuid = self.create_chassis()['chassis']['uuid']
+        resp, body = self.create_chassis()
+        uuid = body['uuid']
 
-        self.delete_chassis(uuid)
-
+        resp = self.delete_chassis(uuid)
+        self.assertEqual('204', resp['status'])
         self.assertRaises(exc.NotFound, self.client.show_chassis, uuid)
 
     @test.attr(type='smoke')
     def test_update_chassis(self):
-        chassis_id = self.create_chassis()['chassis']['uuid']
+        resp, body = self.create_chassis()
+        uuid = body['uuid']
 
         new_description = data_utils.rand_name('new-description-')
-        self.client.update_chassis(chassis_id, description=new_description)
-
-        resp, chassis = self.client.show_chassis(chassis_id)
+        resp, body = (self.client.update_chassis(uuid,
+                      description=new_description))
+        self.assertEqual('200', resp['status'])
+        resp, chassis = self.client.show_chassis(uuid)
         self.assertEqual(chassis['description'], new_description)
diff --git a/tempest/api/baremetal/test_drivers.py b/tempest/api/baremetal/test_drivers.py
new file mode 100644
index 0000000..445ca60
--- /dev/null
+++ b/tempest/api/baremetal/test_drivers.py
@@ -0,0 +1,26 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.baremetal import base
+from tempest import test
+
+
+class TestDrivers(base.BaseBaremetalTest):
+    """Tests for drivers."""
+
+    @test.attr(type="smoke")
+    def test_list_drivers(self):
+        resp, drivers = self.client.list_drivers()
+        self.assertEqual('200', resp['status'])
+        self.assertIn('fake', [d['name'] for d in drivers['drivers']])
diff --git a/tempest/api/baremetal/test_nodes.py b/tempest/api/baremetal/test_nodes.py
index 0f585cb..b6432ad 100644
--- a/tempest/api/baremetal/test_nodes.py
+++ b/tempest/api/baremetal/test_nodes.py
@@ -23,7 +23,15 @@
     def setUp(self):
         super(TestNodes, self).setUp()
 
-        self.chassis = self.create_chassis()['chassis']
+        _, self.chassis = self.create_chassis()
+        _, self.node = self.create_node(self.chassis['uuid'])
+
+    def _assertExpected(self, expected, actual):
+        # Check if not expected keys/values exists in actual response body
+        for key, value in six.iteritems(expected):
+            if key not in ('created_at', 'updated_at'):
+                self.assertIn(key, actual)
+                self.assertEqual(value, actual[key])
 
     @test.attr(type='smoke')
     def test_create_node(self):
@@ -32,45 +40,32 @@
                   'storage': '10240',
                   'memory': '1024'}
 
-        node = self.create_node(self.chassis['uuid'], **params)['node']
-
-        for key in params:
-            self.assertEqual(node['properties'][key], params[key])
+        resp, body = self.create_node(self.chassis['uuid'], **params)
+        self.assertEqual('201', resp['status'])
+        self._assertExpected(params, body['properties'])
 
     @test.attr(type='smoke')
     def test_delete_node(self):
-        node = self.create_node(self.chassis['uuid'])['node']
-        node_id = node['uuid']
+        resp, node = self.create_node(self.chassis['uuid'])
+        self.assertEqual('201', resp['status'])
 
-        resp = self.delete_node(node_id)
+        resp = self.delete_node(node['uuid'])
 
         self.assertEqual(resp['status'], '204')
-        self.assertRaises(exc.NotFound, self.client.show_node, node_id)
+        self.assertRaises(exc.NotFound, self.client.show_node, node['uuid'])
 
     @test.attr(type='smoke')
     def test_show_node(self):
-        params = {'cpu_arch': 'x86_64',
-                  'cpu_num': '4',
-                  'storage': '100',
-                  'memory': '512'}
-
-        created_node = self.create_node(self.chassis['uuid'], **params)['node']
-        resp, loaded_node = self.client.show_node(created_node['uuid'])
-
-        for key, val in created_node.iteritems():
-            if key not in ('created_at', 'updated_at'):
-                self.assertEqual(loaded_node[key], val)
+        resp, loaded_node = self.client.show_node(self.node['uuid'])
+        self.assertEqual('200', resp['status'])
+        self._assertExpected(self.node, loaded_node)
 
     @test.attr(type='smoke')
     def test_list_nodes(self):
-        uuids = [self.create_node(self.chassis['uuid'])['node']['uuid']
-                 for i in range(0, 5)]
-
         resp, body = self.client.list_nodes()
-        loaded_uuids = [n['uuid'] for n in body['nodes']]
-
-        for u in uuids:
-            self.assertIn(u, loaded_uuids)
+        self.assertEqual('200', resp['status'])
+        self.assertIn(self.node['uuid'],
+                      [i['uuid'] for i in body['nodes']])
 
     @test.attr(type='smoke')
     def test_update_node(self):
@@ -79,17 +74,16 @@
                  'storage': '10',
                  'memory': '128'}
 
-        node = self.create_node(self.chassis['uuid'], **props)['node']
-        node_id = node['uuid']
+        resp, node = self.create_node(self.chassis['uuid'], **props)
+        self.assertEqual('201', resp['status'])
 
-        new_props = {'cpu_arch': 'x86',
-                     'cpu_num': '1',
-                     'storage': '10000',
-                     'memory': '12300'}
+        new_p = {'cpu_arch': 'x86',
+                 'cpu_num': '1',
+                 'storage': '10000',
+                 'memory': '12300'}
 
-        self.client.update_node(node_id, properties=new_props)
-        resp, node = self.client.show_node(node_id)
-
-        for name, value in six.iteritems(new_props):
-            if name not in ('created_at', 'updated_at'):
-                self.assertEqual(node['properties'][name], value)
+        resp, body = self.client.update_node(node['uuid'], properties=new_p)
+        self.assertEqual('200', resp['status'])
+        resp, node = self.client.show_node(node['uuid'])
+        self.assertEqual('200', resp['status'])
+        self._assertExpected(new_p, node['properties'])
diff --git a/tempest/api/baremetal/test_nodestates.py b/tempest/api/baremetal/test_nodestates.py
new file mode 100644
index 0000000..f0e084b
--- /dev/null
+++ b/tempest/api/baremetal/test_nodestates.py
@@ -0,0 +1,62 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.baremetal import base
+from tempest import exceptions
+from tempest.openstack.common import timeutils
+from tempest import test
+
+
+class TestNodeStates(base.BaseBaremetalTest):
+    """Tests for baremetal NodeStates."""
+
+    @classmethod
+    def setUpClass(cls):
+        super(TestNodeStates, cls).setUpClass()
+        cls.chassis = cls.create_chassis()['chassis']
+        cls.node = cls.create_node(cls.chassis['uuid'])['node']
+
+    def _validate_power_state(self, node_uuid, power_state):
+        # Validate that power state is set within timeout
+        if power_state == 'rebooting':
+            power_state = 'power on'
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(
+                start, timeutils.utcnow()) < self.power_timeout:
+            resp, node = self.client.show_node(node_uuid)
+            self.assertEqual(200, resp.status)
+            if node['power_state'] == power_state:
+                return
+        message = ('Failed to set power state within '
+                   'the required time: %s sec.' % self.power_timeout)
+        raise exceptions.TimeoutException(message)
+
+    @test.attr(type='smoke')
+    def test_list_nodestates(self):
+        resp, nodestates = self.client.list_nodestates(self.node['uuid'])
+        self.assertEqual('200', resp['status'])
+        for key in nodestates:
+            self.assertEqual(nodestates[key], self.node[key])
+
+    @test.attr(type='smoke')
+    def test_set_node_power_state(self):
+        node = self.create_node(self.chassis['uuid'])['node']
+        states = ["power on", "rebooting", "power off"]
+        for state in states:
+            # Set power state
+            resp, _ = self.client.set_node_power_state(node['uuid'],
+                                                       state)
+            self.assertEqual('202', resp['status'])
+            # Check power state after state is set
+            self._validate_power_state(node['uuid'], state)
diff --git a/tempest/api/baremetal/test_ports.py b/tempest/api/baremetal/test_ports.py
index fb2acc7..c2af29a 100644
--- a/tempest/api/baremetal/test_ports.py
+++ b/tempest/api/baremetal/test_ports.py
@@ -22,62 +22,249 @@
     def setUp(self):
         super(TestPorts, self).setUp()
 
-        chassis = self.create_chassis()['chassis']
-        self.node = self.create_node(chassis['uuid'])['node']
+        _, self.chassis = self.create_chassis()
+        _, self.node = self.create_node(self.chassis['uuid'])
+        _, self.port = self.create_port(self.node['uuid'],
+                                        data_utils.rand_mac_address())
+
+    def _assertExpected(self, expected, actual):
+        # Check if not expected keys/values exists in actual response body
+        for key, value in expected.iteritems():
+            if key not in ('created_at', 'updated_at'):
+                self.assertIn(key, actual)
+                self.assertEqual(value, actual[key])
 
     @test.attr(type='smoke')
     def test_create_port(self):
         node_id = self.node['uuid']
         address = data_utils.rand_mac_address()
 
-        port = self.create_port(node_id=node_id, address=address)['port']
+        resp, port = self.create_port(node_id=node_id, address=address)
+        self.assertEqual(201, resp.status)
 
-        self.assertEqual(port['address'], address)
-        self.assertEqual(port['node_uuid'], node_id)
+        resp, body = self.client.show_port(port['uuid'])
+
+        self.assertEqual(200, resp.status)
+        self._assertExpected(port, body)
+
+    @test.attr(type='smoke')
+    def test_create_port_specifying_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        uuid = data_utils.rand_uuid()
+
+        resp, port = self.create_port(node_id=node_id,
+                                      address=address, uuid=uuid)
+        self.assertEqual(201, resp.status)
+
+        resp, body = self.client.show_port(uuid)
+        self.assertEqual(200, resp.status)
+        self._assertExpected(port, body)
+
+    @test.attr(type='smoke')
+    def test_create_port_with_extra(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        resp, port = self.create_port(node_id=node_id, address=address,
+                                      extra=extra)
+        self.assertEqual(201, resp.status)
+
+        resp, body = self.client.show_port(port['uuid'])
+        self.assertEqual(200, resp.status)
+        self._assertExpected(port, body)
 
     @test.attr(type='smoke')
     def test_delete_port(self):
         node_id = self.node['uuid']
-        port_id = self.create_port(node_id=node_id)['port']['uuid']
+        address = data_utils.rand_mac_address()
+        resp, port = self.create_port(node_id=node_id, address=address)
+        self.assertEqual(201, resp.status)
 
-        resp = self.delete_port(port_id)
+        resp = self.delete_port(port['uuid'])
 
-        self.assertEqual(resp['status'], '204')
-        self.assertRaises(exc.NotFound, self.client.show_port, port_id)
+        self.assertEqual(204, resp.status)
+        self.assertRaises(exc.NotFound, self.client.show_port, port['uuid'])
 
     @test.attr(type='smoke')
     def test_show_port(self):
-        node_id = self.node['uuid']
-        address = data_utils.rand_mac_address()
+        resp, port = self.client.show_port(self.port['uuid'])
+        self.assertEqual(200, resp.status)
+        self._assertExpected(self.port, port)
 
-        port_id = self.create_port(node_id=node_id,
-                                   address=address)['port']['uuid']
-
-        resp, port = self.client.show_port(port_id)
-
-        self.assertEqual(port['uuid'], port_id)
-        self.assertEqual(port['address'], address)
+    @test.attr(type='smoke')
+    def test_show_port_with_links(self):
+        resp, port = self.client.show_port(self.port['uuid'])
+        self.assertEqual(200, resp.status)
+        self.assertIn('links', port.keys())
+        self.assertEqual(2, len(port['links']))
+        self.assertIn(port['uuid'], port['links'][0]['href'])
 
     @test.attr(type='smoke')
     def test_list_ports(self):
-        node_id = self.node['uuid']
-
-        uuids = [self.create_port(node_id=node_id)['port']['uuid']
-                 for i in range(0, 5)]
-
         resp, body = self.client.list_ports()
-        loaded_uuids = [p['uuid'] for p in body['ports']]
-
-        for u in uuids:
-            self.assertIn(u, loaded_uuids)
+        self.assertEqual(200, resp.status)
+        self.assertIn(self.port['uuid'],
+                      [i['uuid'] for i in body['ports']])
+        # Verify self links.
+        for port in body['ports']:
+            self.validate_self_link('ports', port['uuid'],
+                                    port['links'][0]['href'])
 
     @test.attr(type='smoke')
-    def test_update_port(self):
+    def test_list_with_limit(self):
+        resp, body = self.client.list_ports(limit=3)
+        self.assertEqual(200, resp.status)
+
+        next_marker = body['ports'][-1]['uuid']
+        self.assertIn(next_marker, body['next'])
+
+    def test_list_ports_details(self):
         node_id = self.node['uuid']
-        port_id = self.create_port(node_id=node_id)['port']['uuid']
+
+        uuids = [
+            self.create_port(node_id=node_id,
+                             address=data_utils.rand_mac_address())
+            [1]['uuid'] for i in range(0, 5)]
+
+        resp, body = self.client.list_ports_detail()
+        self.assertEqual(200, resp.status)
+
+        ports_dict = dict((port['uuid'], port) for port in body['ports']
+                          if port['uuid'] in uuids)
+
+        for uuid in uuids:
+            self.assertIn(uuid, ports_dict)
+            port = ports_dict[uuid]
+            self.assertIn('extra', port)
+            self.assertIn('node_uuid', port)
+            # never expose the node_id
+            self.assertNotIn('node_id', port)
+            # Verify self link.
+            self.validate_self_link('ports', port['uuid'],
+                                    port['links'][0]['href'])
+
+    @test.attr(type='smoke')
+    def test_update_port_replace(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
+
+        resp, port = self.create_port(node_id=node_id, address=address,
+                                      extra=extra)
+        self.assertEqual(201, resp.status)
 
         new_address = data_utils.rand_mac_address()
-        self.client.update_port(port_id, address=new_address)
+        new_extra = {'key1': 'new-value1', 'key2': 'new-value2',
+                     'key3': 'new-value3'}
 
-        resp, body = self.client.show_port(port_id)
-        self.assertEqual(body['address'], new_address)
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': new_address},
+                 {'path': '/extra/key1',
+                  'op': 'replace',
+                  'value': new_extra['key1']},
+                 {'path': '/extra/key2',
+                  'op': 'replace',
+                  'value': new_extra['key2']},
+                 {'path': '/extra/key3',
+                  'op': 'replace',
+                  'value': new_extra['key3']}]
+
+        resp, _ = self.client.update_port(port['uuid'], patch)
+        self.assertEqual(200, resp.status)
+
+        resp, body = self.client.show_port(port['uuid'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(new_address, body['address'])
+        self.assertEqual(new_extra, body['extra'])
+
+    @test.attr(type='smoke')
+    def test_update_port_remove(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
+
+        resp, port = self.create_port(node_id=node_id, address=address,
+                                      extra=extra)
+        self.assertEqual(201, resp.status)
+
+        # Removing one item from the collection
+        resp, _ = self.client.update_port(port['uuid'],
+                                          [{'path': '/extra/key2',
+                                           'op': 'remove'}])
+        self.assertEqual(200, resp.status)
+        extra.pop('key2')
+        resp, body = self.client.show_port(port['uuid'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(extra, body['extra'])
+
+        # Removing the collection
+        resp, _ = self.client.update_port(port['uuid'], [{'path': '/extra',
+                                                         'op': 'remove'}])
+        self.assertEqual(200, resp.status)
+        resp, body = self.client.show_port(port['uuid'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual({}, body['extra'])
+
+        # Assert nothing else was changed
+        self.assertEqual(node_id, body['node_uuid'])
+        self.assertEqual(address, body['address'])
+
+    @test.attr(type='smoke')
+    def test_update_port_add(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        resp, port = self.create_port(node_id=node_id, address=address)
+        self.assertEqual(201, resp.status)
+
+        extra = {'key1': 'value1', 'key2': 'value2'}
+
+        patch = [{'path': '/extra/key1',
+                  'op': 'add',
+                  'value': extra['key1']},
+                 {'path': '/extra/key2',
+                  'op': 'add',
+                  'value': extra['key2']}]
+
+        resp, _ = self.client.update_port(port['uuid'], patch)
+        self.assertEqual(200, resp.status)
+
+        resp, body = self.client.show_port(port['uuid'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(extra, body['extra'])
+
+    @test.attr(type='smoke')
+    def test_update_port_mixed_ops(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key1': 'value1', 'key2': 'value2'}
+
+        resp, port = self.create_port(node_id=node_id, address=address,
+                                      extra=extra)
+        self.assertEqual(201, resp.status)
+
+        new_address = data_utils.rand_mac_address()
+        new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': new_address},
+                 {'path': '/extra/key1',
+                  'op': 'replace',
+                  'value': new_extra['key1']},
+                 {'path': '/extra/key2',
+                  'op': 'remove'},
+                 {'path': '/extra/key3',
+                  'op': 'add',
+                  'value': new_extra['key3']}]
+
+        resp, _ = self.client.update_port(port['uuid'], patch)
+        self.assertEqual(200, resp.status)
+
+        resp, body = self.client.show_port(port['uuid'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(new_address, body['address'])
+        self.assertEqual(new_extra, body['extra'])
diff --git a/tempest/api/baremetal/test_ports_negative.py b/tempest/api/baremetal/test_ports_negative.py
index 6cb8812..4cbe00e 100644
--- a/tempest/api/baremetal/test_ports_negative.py
+++ b/tempest/api/baremetal/test_ports_negative.py
@@ -25,16 +25,346 @@
         chassis = self.create_chassis()['chassis']
         self.node = self.create_node(chassis['uuid'])['node']
 
-    @test.attr(type='negative')
-    def test_create_port_invalid_mac(self):
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_malformed_mac(self):
         node_id = self.node['uuid']
-        address = 'not an uuid'
+        address = 'malformed:mac'
 
         self.assertRaises(exc.BadRequest,
                           self.create_port, node_id=node_id, address=address)
 
-    @test.attr(type='negative')
-    def test_create_port_wrong_node_id(self):
-        node_id = str(data_utils.rand_uuid())
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_malformed_extra(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 0.123}
+        self.assertRaises(exc.BadRequest,
+                          self.create_port, node_id=node_id,
+                          address=address, extra=extra)
 
-        self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id)
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_nonexsistent_node_id(self):
+        node_id = str(data_utils.rand_uuid())
+        address = data_utils.rand_mac_address()
+        self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+                          address=address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_show_port_malformed_uuid(self):
+        self.assertRaises(exc.BadRequest, self.client.show_port,
+                          'malformed:uuid')
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_show_port_nonexistent_uuid(self):
+        self.assertRaises(exc.NotFound, self.client.show_port,
+                          data_utils.rand_uuid())
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_show_port_by_mac_not_allowed(self):
+        self.assertRaises(exc.BadRequest, self.client.show_port,
+                          data_utils.rand_mac_address())
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_duplicated_port_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        uuid = data_utils.rand_uuid()
+
+        self.create_port(node_id=node_id, address=address, uuid=uuid)
+        self.assertRaises(exc.Conflict, self.create_port, node_id=node_id,
+                          address=address, uuid=uuid)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_no_mandatory_field_node_id(self):
+        address = data_utils.rand_mac_address()
+
+        self.assertRaises(exc.BadRequest, self.create_port, node_id=None,
+                          address=address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_no_mandatory_field_mac(self):
+        node_id = self.node['uuid']
+
+        self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+                          address=None)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_malformed_port_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        uuid = 'malformed:uuid'
+
+        self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+                          address=address, uuid=uuid)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_malformed_node_id(self):
+        address = data_utils.rand_mac_address()
+        self.assertRaises(exc.BadRequest, self.create_port,
+                          node_id='malformed:nodeid', address=address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_create_port_duplicated_mac(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        self.create_port(node_id=node_id, address=address)
+        self.assertRaises(exc.Conflict,
+                          self.create_port, node_id=node_id,
+                          address=address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_by_mac_not_allowed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        self.create_port(node_id=node_id, address=address, extra=extra)
+
+        patch = [{'path': '/extra/key',
+                  'op': 'replace',
+                  'value': 'new-value'}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, address,
+                          patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_nonexistent(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        port_id = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)['port']['uuid']
+        self.client.delete_port(port_id)
+
+        patch = [{'path': '/extra/key',
+                  'op': 'replace',
+                  'value': 'new-value'}]
+        self.assertRaises(exc.NotFound,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_malformed_port_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        self.create_port(node_id=node_id, address=address)
+
+        new_address = data_utils.rand_mac_address()
+        self.assertRaises(exc.BadRequest, self.client.update_port,
+                          uuid='malformed:uuid',
+                          patch=[{'path': '/address', 'op': 'replace',
+                                  'value': new_address}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_add_malformed_extra(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/extra/key', ' op': 'add',
+                            'value': 0.123}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_add_whole_malformed_extra(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/extra',
+                            'op': 'add',
+                            'value': [1, 2, 3, 4, 'a']}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_add_nonexistent_property(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/nonexistent', ' op': 'add',
+                            'value': 'value'}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_node_id_with_malformed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address)['port']['uuid']
+
+        patch = [{'path': '/node_uuid',
+                  'op': 'replace',
+                  'value': 'malformed:node_uuid'}]
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_mac_with_duplicated(self):
+        node_id = self.node['uuid']
+        address1 = data_utils.rand_mac_address()
+        address2 = data_utils.rand_mac_address()
+
+        self.create_port(node_id=node_id, address=address1)
+        port_id = self.create_port(node_id=node_id,
+                                   address=address2)['port']['uuid']
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': address1}]
+        self.assertRaises(exc.Conflict,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_node_id_with_nonexistent(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address)['port']['uuid']
+
+        patch = [{'path': '/node_uuid',
+                  'op': 'replace',
+                  'value': data_utils.rand_uuid()}]
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_mac_with_malformed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address)['port']['uuid']
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': 'malformed:mac'}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_extra_item_with_malformed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address,
+                                   extra=extra)['port']['uuid']
+        patch = [{'path': '/extra/key',
+                  'op': 'replace',
+                  'value': 0.123}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_whole_extra_with_malformed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key': 'value'}
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address,
+                                   extra=extra)['port']['uuid']
+        patch = [{'path': '/extra',
+                  'op': 'replace',
+                  'value': [1, 2, 3, 4, 'a']}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_replace_nonexistent_property(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id,
+                                   address=address)['port']['uuid']
+
+        patch = [{'path': '/nonexistent', ' op': 'replace', 'value': 'value'}]
+
+        self.assertRaises(exc.BadRequest,
+                          self.client.update_port, port_id, patch)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_remove_mandatory_field_mac(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/address', 'op': 'remove'}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_remove_mandatory_field_port_uuid(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/uuid', 'op': 'remove'}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_remove_nonexistent_property(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        port_id = self.create_port(node_id=node_id, address=address)['port'][
+            'uuid']
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          [{'path': '/nonexistent', 'op': 'remove'}])
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_delete_port_by_mac_not_allowed(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+
+        self.create_port(node_id=node_id, address=address)
+        self.assertRaises(exc.BadRequest, self.client.delete_port, address)
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_update_port_mixed_ops_integrity(self):
+        node_id = self.node['uuid']
+        address = data_utils.rand_mac_address()
+        extra = {'key1': 'value1', 'key2': 'value2'}
+
+        port_id = self.create_port(node_id=node_id, address=address,
+                                   extra=extra)['port']['uuid']
+
+        new_address = data_utils.rand_mac_address()
+        new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+
+        patch = [{'path': '/address',
+                  'op': 'replace',
+                  'value': new_address},
+                 {'path': '/extra/key1',
+                  'op': 'replace',
+                  'value': new_extra['key1']},
+                 {'path': '/extra/key2',
+                  'op': 'remove'},
+                 {'path': '/extra/key3',
+                  'op': 'add',
+                  'value': new_extra['key3']},
+                 {'path': '/nonexistent',
+                  'op': 'replace',
+                  'value': 'value'}]
+
+        self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+                          patch)
+
+        # patch should not be applied
+        resp, body = self.client.show_port(port_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(address, body['address'])
+        self.assertEqual(extra, body['extra'])
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
new file mode 100644
index 0000000..4808601
--- /dev/null
+++ b/tempest/api/compute/admin/test_agents.py
@@ -0,0 +1,123 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest.common.utils import data_utils
+from tempest import exceptions
+from tempest.openstack.common import log
+from tempest import test
+
+LOG = log.getLogger(__name__)
+
+
+class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
+    """
+    Tests Agents API
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(AgentsAdminTestJSON, cls).setUpClass()
+        cls.client = cls.os_adm.agents_client
+
+    def setUp(self):
+        super(AgentsAdminTestJSON, self).setUp()
+        params = self._param_helper(
+            hypervisor='common', os='linux', architecture='x86_64',
+            version='7.0', url='xxx://xxxx/xxx/xxx',
+            md5hash='add6bb58e139be103324d04d82d8f545')
+        resp, body = self.client.create_agent(**params)
+        self.assertEqual(200, resp.status)
+        self.agent_id = body['agent_id']
+
+    def tearDown(self):
+        try:
+            self.client.delete_agent(self.agent_id)
+        except exceptions.NotFound:
+            pass
+        except Exception:
+            LOG.exception('Exception raised deleting agent %s', self.agent_id)
+        super(AgentsAdminTestJSON, self).tearDown()
+
+    def _param_helper(self, **kwargs):
+        rand_key = 'architecture'
+        if rand_key in kwargs:
+            # NOTE: The rand_name is for avoiding agent conflicts.
+            # If you try to create an agent with the same hypervisor,
+            # os and architecture as an exising agent, Nova will return
+            # an HTTPConflict or HTTPServerError.
+            kwargs[rand_key] = data_utils.rand_name(kwargs[rand_key])
+        return kwargs
+
+    @test.attr(type='gate')
+    def test_create_agent(self):
+        # Create an agent.
+        params = self._param_helper(
+            hypervisor='kvm', os='win', architecture='x86',
+            version='7.0', url='xxx://xxxx/xxx/xxx',
+            md5hash='add6bb58e139be103324d04d82d8f545')
+        resp, body = self.client.create_agent(**params)
+        self.assertEqual(200, resp.status)
+        self.addCleanup(self.client.delete_agent, body['agent_id'])
+        for expected_item, value in params.items():
+            self.assertEqual(value, body[expected_item])
+
+    @test.attr(type='gate')
+    def test_update_agent(self):
+        # Update an agent.
+        params = self._param_helper(
+            version='8.0', url='xxx://xxxx/xxx/xxx2',
+            md5hash='add6bb58e139be103324d04d82d8f547')
+        resp, body = self.client.update_agent(self.agent_id, **params)
+        self.assertEqual(200, resp.status)
+        for expected_item, value in params.items():
+            self.assertEqual(value, body[expected_item])
+
+    @test.attr(type='gate')
+    def test_delete_agent(self):
+        # Delete an agent.
+        resp, _ = self.client.delete_agent(self.agent_id)
+        self.assertEqual(200, resp.status)
+
+        # Verify the list doesn't contain the deleted agent.
+        resp, agents = self.client.list_agents()
+        self.assertEqual(200, resp.status)
+        self.assertNotIn(self.agent_id, map(lambda x: x['agent_id'], agents))
+
+    @test.attr(type='gate')
+    def test_list_agents(self):
+        # List all agents.
+        resp, agents = self.client.list_agents()
+        self.assertEqual(200, resp.status)
+        self.assertTrue(len(agents) > 0, 'Cannot get any agents.(%s)' % agents)
+        self.assertIn(self.agent_id, map(lambda x: x['agent_id'], agents))
+
+    @test.attr(type='gate')
+    def test_list_agents_with_filter(self):
+        # List the agent builds by the filter.
+        params = self._param_helper(
+            hypervisor='xen', os='linux', architecture='x86',
+            version='7.0', url='xxx://xxxx/xxx/xxx1',
+            md5hash='add6bb58e139be103324d04d82d8f546')
+        resp, agent_xen = self.client.create_agent(**params)
+        self.assertEqual(200, resp.status)
+        self.addCleanup(self.client.delete_agent, agent_xen['agent_id'])
+
+        agent_id_xen = agent_xen['agent_id']
+        params_filter = {'hypervisor': agent_xen['hypervisor']}
+        resp, agents = self.client.list_agents(params_filter)
+        self.assertEqual(200, resp.status)
+        self.assertTrue(len(agents) > 0, 'Cannot get any agents.(%s)' % agents)
+        self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
+        self.assertNotIn(self.agent_id, map(lambda x: x['agent_id'], agents))
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index fb249e5..c2376c9 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -16,6 +16,7 @@
 from tempest.api.compute import base
 from tempest.common import tempest_fixtures as fixtures
 from tempest.common.utils import data_utils
+from tempest import exceptions
 from tempest import test
 
 
@@ -39,11 +40,20 @@
                     filter(lambda y: y['service'] == 'compute', hosts_all))
         cls.host = hosts[0]
 
+    def _try_delete_aggregate(self, aggregate_id):
+        # delete aggregate, if it exists
+        try:
+            self.client.delete_aggregate(aggregate_id)
+        # if aggregate not found, it depict it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
     @test.attr(type='gate')
     def test_aggregate_create_delete(self):
         # Create and delete an aggregate.
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         resp, aggregate = self.client.create_aggregate(name=aggregate_name)
+        self.addCleanup(self._try_delete_aggregate, aggregate['id'])
         self.assertEqual(200, resp.status)
         self.assertEqual(aggregate_name, aggregate['name'])
         self.assertIsNone(aggregate['availability_zone'])
@@ -59,6 +69,7 @@
         az_name = data_utils.rand_name(self.az_name_prefix)
         resp, aggregate = self.client.create_aggregate(
             name=aggregate_name, availability_zone=az_name)
+        self.addCleanup(self._try_delete_aggregate, aggregate['id'])
         self.assertEqual(200, resp.status)
         self.assertEqual(aggregate_name, aggregate['name'])
         self.assertEqual(az_name, aggregate['availability_zone'])
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 3c06624..9555367 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -17,15 +17,15 @@
 from tempest import test
 
 
-class AZAdminTestJSON(base.BaseV2ComputeAdminTest):
-
+class AZAdminV3Test(base.BaseComputeAdminTest):
     """
     Tests Availability Zone API List
     """
+    _api_version = 3
 
     @classmethod
     def setUpClass(cls):
-        super(AZAdminTestJSON, cls).setUpClass()
+        super(AZAdminV3Test, cls).setUpClass()
         cls.client = cls.os_adm.availability_zone_client
 
     @test.attr(type='gate')
@@ -44,5 +44,9 @@
         self.assertTrue(len(availability_zone) > 0)
 
 
-class AZAdminTestXML(AZAdminTestJSON):
+class AZAdminV2TestJSON(AZAdminV3Test):
+    _api_version = 2
+
+
+class AZAdminV2TestXML(AZAdminV2TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 759585e..a8a9bb4 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -30,8 +30,8 @@
     @classmethod
     def setUpClass(cls):
         super(FlavorsAdminTestJSON, cls).setUpClass()
-        if not test.is_extension_enabled('FlavorExtraData', 'compute'):
-            msg = "FlavorExtraData extension not enabled."
+        if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+            msg = "OS-FLV-EXT-DATA extension not enabled."
             raise cls.skipException(msg)
 
         cls.client = cls.os_adm.flavors_client
@@ -170,7 +170,6 @@
                 flag = True
         self.assertTrue(flag)
 
-    @test.skip_because(bug='1286297')
     @test.attr(type='gate')
     def test_list_non_public_flavor(self):
         # Create a flavor with os-flavor-access:is_public false.
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index aa0138f..f2554ea 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -28,25 +28,21 @@
     @classmethod
     def setUpClass(cls):
         super(FlavorsAccessTestJSON, cls).setUpClass()
-        if not test.is_extension_enabled('FlavorExtraData', 'compute'):
-            msg = "FlavorExtraData extension not enabled."
+        if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+            msg = "OS-FLV-EXT-DATA extension not enabled."
             raise cls.skipException(msg)
 
+        # Compute admin flavor client
         cls.client = cls.os_adm.flavors_client
-        admin_client = cls._get_identity_admin_client()
-        cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
-                                                     tenant_name)
-        cls.tenant_id = cls.tenant['id']
-        cls.adm_tenant = admin_client.get_tenant_by_name(cls.os_adm.
-                                                         flavors_client.
-                                                         tenant_name)
-        cls.adm_tenant_id = cls.adm_tenant['id']
+        # Non admin tenant ID
+        cls.tenant_id = cls.flavors_client.tenant_id
+        # Compute admin tenant ID
+        cls.adm_tenant_id = cls.client.tenant_id
         cls.flavor_name_prefix = 'test_flavor_access_'
         cls.ram = 512
         cls.vcpus = 1
         cls.disk = 10
 
-    @test.skip_because(bug='1286297')
     @test.attr(type='gate')
     def test_flavor_access_list_with_private_flavor(self):
         # Test to make sure that list flavor access on a newly created
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index 8fe3331..b636ccd 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -31,19 +31,12 @@
     @classmethod
     def setUpClass(cls):
         super(FlavorsAccessNegativeTestJSON, cls).setUpClass()
-        if not test.is_extension_enabled('FlavorExtraData', 'compute'):
-            msg = "FlavorExtraData extension not enabled."
+        if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+            msg = "OS-FLV-EXT-DATA extension not enabled."
             raise cls.skipException(msg)
 
         cls.client = cls.os_adm.flavors_client
-        admin_client = cls._get_identity_admin_client()
-        cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
-                                                     tenant_name)
-        cls.tenant_id = cls.tenant['id']
-        cls.adm_tenant = admin_client.get_tenant_by_name(cls.os_adm.
-                                                         flavors_client.
-                                                         tenant_name)
-        cls.adm_tenant_id = cls.adm_tenant['id']
+        cls.tenant_id = cls.flavors_client.tenant_id
         cls.flavor_name_prefix = 'test_flavor_access_'
         cls.ram = 512
         cls.vcpus = 1
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 91145ec..56daf96 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -29,8 +29,8 @@
     @classmethod
     def setUpClass(cls):
         super(FlavorsExtraSpecsTestJSON, cls).setUpClass()
-        if not test.is_extension_enabled('FlavorExtraData', 'compute'):
-            msg = "FlavorExtraData extension not enabled."
+        if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+            msg = "OS-FLV-EXT-DATA extension not enabled."
             raise cls.skipException(msg)
 
         cls.client = cls.os_adm.flavors_client
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index a139c2f..1e5695f 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -30,8 +30,8 @@
     @classmethod
     def setUpClass(cls):
         super(FlavorsExtraSpecsNegativeTestJSON, cls).setUpClass()
-        if not test.is_extension_enabled('FlavorExtraData', 'compute'):
-            msg = "FlavorExtraData extension not enabled."
+        if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+            msg = "OS-FLV-EXT-DATA extension not enabled."
             raise cls.skipException(msg)
 
         cls.client = cls.os_adm.flavors_client
diff --git a/tempest/api/compute/admin/test_flavors_negative.py b/tempest/api/compute/admin/test_flavors_negative.py
index 162e419..9e4412f 100644
--- a/tempest/api/compute/admin/test_flavors_negative.py
+++ b/tempest/api/compute/admin/test_flavors_negative.py
@@ -32,8 +32,8 @@
     @classmethod
     def setUpClass(cls):
         super(FlavorsAdminNegativeTestJSON, cls).setUpClass()
-        if not test.is_extension_enabled('FlavorExtraData', 'compute'):
-            msg = "FlavorExtraData extension not enabled."
+        if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+            msg = "OS-FLV-EXT-DATA extension not enabled."
             raise cls.skipException(msg)
 
         cls.client = cls.os_adm.flavors_client
@@ -101,13 +101,9 @@
                           self.flavor_ref_alt)
 
 
+@test.SimpleNegativeAutoTest
 class FlavorCreateNegativeTestJSON(base.BaseV2ComputeAdminTest,
                                    test.NegativeAutoTest):
     _interface = 'json'
     _service = 'compute'
     _schema_file = 'compute/admin/flavor_create.json'
-
-    @test.attr(type=['negative', 'gate'])
-    def test_create_flavor(self):
-        # flavor details are not returned for non-existent flavors
-        self.execute(self._schema_file)
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
new file mode 100644
index 0000000..514f1fa
--- /dev/null
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -0,0 +1,55 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(MigrationsAdminTest, cls).setUpClass()
+        cls.client = cls.os_adm.migrations_client
+
+    @test.attr(type='gate')
+    def test_list_migrations(self):
+        # Admin can get the migrations list
+        resp, _ = self.client.list_migrations()
+        self.assertEqual(200, resp.status)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='gate')
+    def test_list_migrations_in_flavor_resize_situation(self):
+        # Admin can get the migrations list which contains the resized server
+        resp, server = self.create_test_server(wait_until="ACTIVE")
+        server_id = server['id']
+
+        resp, _ = self.servers_client.resize(server_id, self.flavor_ref_alt)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(server_id, 'VERIFY_RESIZE')
+        self.servers_client.confirm_resize(server_id)
+        self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+        resp, body = self.client.list_migrations()
+        self.assertEqual(200, resp.status)
+
+        instance_uuids = [x['instance_uuid'] for x in body]
+        self.assertIn(server_id, instance_uuids)
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 09c7274..348666d 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -28,8 +28,7 @@
 
         # NOTE(afazekas): these test cases should always create and use a new
         # tenant most of them should be skipped if we can't do that
-        cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
-            'tenantId')
+        cls.demo_tenant_id = cls.quotas_client.tenant_id
 
         cls.default_quota_set = set(('injected_file_content_bytes',
                                      'metadata_items', 'injected_files',
@@ -76,21 +75,38 @@
     # TODO(afazekas): merge these test cases
     @test.attr(type='gate')
     def test_get_updated_quotas(self):
-        # Verify that GET shows the updated quota set
+        # Verify that GET shows the updated quota set of tenant
         tenant_name = data_utils.rand_name('cpu_quota_tenant_')
         tenant_desc = tenant_name + '-desc'
         identity_client = self.os_adm.identity_client
         _, tenant = identity_client.create_tenant(name=tenant_name,
                                                   description=tenant_desc)
         tenant_id = tenant['id']
-        self.addCleanup(identity_client.delete_tenant,
-                        tenant_id)
+        self.addCleanup(identity_client.delete_tenant, tenant_id)
 
-        self.adm_client.update_quota_set(tenant_id,
-                                         ram='5120')
+        self.adm_client.update_quota_set(tenant_id, ram='5120')
         resp, quota_set = self.adm_client.get_quota_set(tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(quota_set['ram'], 5120)
+        self.assertEqual(5120, quota_set['ram'])
+
+        # Verify that GET shows the updated quota set of user
+        user_name = data_utils.rand_name('cpu_quota_user_')
+        password = data_utils.rand_name('password-')
+        email = user_name + '@testmail.tm'
+        _, user = identity_client.create_user(name=user_name,
+                                              password=password,
+                                              tenant_id=tenant_id,
+                                              email=email)
+        user_id = user['id']
+        self.addCleanup(identity_client.delete_user, user_id)
+
+        self.adm_client.update_quota_set(tenant_id,
+                                         user_id=user_id,
+                                         ram='2048')
+        resp, quota_set = self.adm_client.get_quota_set(tenant_id,
+                                                        user_id=user_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(2048, quota_set['ram'])
 
     @test.attr(type='gate')
     def test_delete_quota(self):
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 5b2b5fd..e1dc685 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -33,8 +33,7 @@
 
         # NOTE(afazekas): these test cases should always create and use a new
         # tenant most of them should be skipped if we can't do that
-        cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
-            'tenantId')
+        cls.demo_tenant_id = cls.client.tenant_id
 
     @test.attr(type=['negative', 'gate'])
     def test_update_quota_normal_user(self):
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 1f2ddf4..49af645 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -14,7 +14,6 @@
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
-from tempest import exceptions
 from tempest import test
 
 
@@ -44,16 +43,6 @@
                                               wait_until='ACTIVE')
         cls.s2_id = server['id']
 
-    def _get_unused_flavor_id(self):
-        flavor_id = data_utils.rand_int_id(start=1000)
-        while True:
-            try:
-                resp, body = self.flavors_client.get_flavor_details(flavor_id)
-            except exceptions.NotFound:
-                break
-            flavor_id = data_utils.rand_int_id(start=1000)
-        return flavor_id
-
     @test.attr(type='gate')
     def test_list_servers_by_admin(self):
         # Listing servers by admin user returns empty list by default
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index 797b780..8b3a0b5 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -14,11 +14,16 @@
 
 import uuid
 
+import testtools
+
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
 
@@ -32,10 +37,7 @@
         cls.client = cls.os_adm.servers_client
         cls.non_adm_client = cls.servers_client
         cls.flavors_client = cls.os_adm.flavors_client
-        cls.identity_client = cls._get_identity_admin_client()
-        tenant = cls.identity_client.get_tenant_by_name(
-            cls.client.tenant_name)
-        cls.tenant_id = tenant['id']
+        cls.tenant_id = cls.client.tenant_id
 
         cls.s1_name = data_utils.rand_name('server')
         resp, server = cls.create_test_server(name=cls.s1_name,
@@ -119,6 +121,8 @@
                           self.client.migrate_server,
                           str(uuid.uuid4()))
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_migrate_server_invalid_state(self):
         # create server.
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage.py b/tempest/api/compute/admin/test_simple_tenant_usage.py
index 33cd6f3..f3a81d1 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage.py
@@ -27,11 +27,7 @@
         super(TenantUsagesTestJSON, cls).setUpClass()
         cls.adm_client = cls.os_adm.tenant_usages_client
         cls.client = cls.os.tenant_usages_client
-        cls.identity_client = cls._get_identity_admin_client()
-
-        resp, tenants = cls.identity_client.list_tenants()
-        cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
-                         cls.client.tenant_name][0]
+        cls.tenant_id = cls.client.tenant_id
 
         # Create a server in the demo tenant
         resp, server = cls.create_test_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
index a080f2e..d69c43c 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
@@ -52,11 +52,9 @@
         params = {'start': self.end,
                   'end': self.start}
         resp, tenants = self.identity_client.list_tenants()
-        tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
-                     self.client.tenant_name][0]
         self.assertRaises(exceptions.BadRequest,
                           self.adm_client.get_tenant_usage,
-                          tenant_id, params)
+                          self.client.tenant_id, params)
 
     @test.attr(type=['negative', 'gate'])
     def test_list_usage_all_tenants_with_non_admin_user(self):
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index abd36a6..7c70aec 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -30,12 +30,16 @@
 class BaseComputeTest(tempest.test.BaseTestCase):
     """Base test case class for all Compute API tests."""
 
+    _api_version = 3
     force_tenant_isolation = False
 
     @classmethod
     def setUpClass(cls):
+        cls.set_network_resources()
         super(BaseComputeTest, cls).setUpClass()
 
+        # TODO(andreaf) WE should care also for the alt_manager here
+        # but only once client lazy load in the manager is done
         os = cls.get_client_manager()
 
         cls.os = os
@@ -52,6 +56,58 @@
         cls.images = []
         cls.multi_user = cls.get_multi_user()
         cls.security_groups = []
+        cls.server_groups = []
+
+        if cls._api_version == 2:
+            cls.servers_client = cls.os.servers_client
+            cls.flavors_client = cls.os.flavors_client
+            cls.images_client = cls.os.images_client
+            cls.extensions_client = cls.os.extensions_client
+            cls.floating_ips_client = cls.os.floating_ips_client
+            cls.keypairs_client = cls.os.keypairs_client
+            cls.security_groups_client = cls.os.security_groups_client
+            cls.quotas_client = cls.os.quotas_client
+            cls.limits_client = cls.os.limits_client
+            cls.volumes_extensions_client = cls.os.volumes_extensions_client
+            cls.volumes_client = cls.os.volumes_client
+            cls.interfaces_client = cls.os.interfaces_client
+            cls.fixed_ips_client = cls.os.fixed_ips_client
+            cls.availability_zone_client = cls.os.availability_zone_client
+            cls.agents_client = cls.os.agents_client
+            cls.aggregates_client = cls.os.aggregates_client
+            cls.services_client = cls.os.services_client
+            cls.instance_usages_audit_log_client = \
+                cls.os.instance_usages_audit_log_client
+            cls.hypervisor_client = cls.os.hypervisor_client
+            cls.certificates_client = cls.os.certificates_client
+            cls.migrations_client = cls.os.migrations_client
+
+        elif cls._api_version == 3:
+            if not CONF.compute_feature_enabled.api_v3:
+                skip_msg = ("%s skipped as nova v3 api is not available" %
+                            cls.__name__)
+                raise cls.skipException(skip_msg)
+            cls.servers_client = cls.os.servers_v3_client
+            cls.images_client = cls.os.image_client
+            cls.flavors_client = cls.os.flavors_v3_client
+            cls.services_client = cls.os.services_v3_client
+            cls.extensions_client = cls.os.extensions_v3_client
+            cls.availability_zone_client = cls.os.availability_zone_v3_client
+            cls.interfaces_client = cls.os.interfaces_v3_client
+            cls.hypervisor_client = cls.os.hypervisor_v3_client
+            cls.keypairs_client = cls.os.keypairs_v3_client
+            cls.volumes_client = cls.os.volumes_client
+            cls.certificates_client = cls.os.certificates_v3_client
+            cls.keypairs_client = cls.os.keypairs_v3_client
+            cls.aggregates_client = cls.os.aggregates_v3_client
+            cls.hosts_client = cls.os.hosts_v3_client
+            cls.quotas_client = cls.os.quotas_v3_client
+            cls.version_client = cls.os.version_v3_client
+            cls.migrations_client = cls.os.migrations_v3_client
+        else:
+            msg = ("Unexpected API version is specified (%s)" %
+                   cls._api_version)
+            raise exceptions.InvalidConfiguration(message=msg)
 
     @classmethod
     def get_multi_user(cls):
@@ -91,6 +147,26 @@
                 pass
 
     @classmethod
+    def server_check_teardown(cls):
+        """Checks is the shared server clean enough for subsequent test.
+           Method will delete the server when it's dirty.
+           The setUp method is responsible for creating a new server.
+           Exceptions raised in tearDown class are fails the test case,
+           This method supposed to use only by tierDown methods, when
+           the shared server_id is stored in the server_id of the class.
+        """
+        if getattr(cls, 'server_id', None) is not None:
+            try:
+                cls.servers_client.wait_for_server_status(cls.server_id,
+                                                          'ACTIVE')
+            except Exception as exc:
+                LOG.exception(exc)
+                cls.servers_client.delete_server(cls.server_id)
+                cls.servers_client.wait_for_server_termination(cls.server_id)
+                cls.server_id = None
+                raise
+
+    @classmethod
     def clear_images(cls):
         for image_id in cls.images:
             try:
@@ -100,7 +176,6 @@
                 pass
             except Exception:
                 LOG.exception('Exception raised deleting image %s' % image_id)
-                pass
 
     @classmethod
     def clear_security_groups(cls):
@@ -115,7 +190,18 @@
                 LOG.info('Exception raised deleting security group %s',
                          sg['id'])
                 LOG.exception(exc)
+
+    @classmethod
+    def clear_server_groups(cls):
+        for server_group_id in cls.server_groups:
+            try:
+                cls.client.delete_server_group(server_group_id)
+            except exceptions.NotFound:
+                # The server-group may have already been deleted which is OK.
                 pass
+            except Exception:
+                LOG.exception('Exception raised deleting server-group %s',
+                              server_group_id)
 
     @classmethod
     def tearDownClass(cls):
@@ -123,6 +209,7 @@
         cls.clear_servers()
         cls.clear_security_groups()
         cls.clear_isolated_creds()
+        cls.clear_server_groups()
         super(BaseComputeTest, cls).tearDownClass()
 
     @classmethod
@@ -176,6 +263,16 @@
 
         return resp, body
 
+    @classmethod
+    def create_test_server_group(cls, name="", policy=[]):
+        if not name:
+            name = data_utils.rand_name(cls.__name__ + "-Server-Group")
+        if not policy:
+            policy = ['affinity']
+        resp, body = cls.servers_client.create_server_group(name, policy)
+        cls.server_groups.append(body)
+        return resp, body
+
     def wait_for(self, condition):
         """Repeatedly calls condition() until a timeout."""
         start_time = int(time.time())
@@ -203,36 +300,12 @@
             LOG.warn("Unable to delete volume '%s' since it was not found. "
                      "Maybe it was already deleted?" % volume_id)
 
-
-class BaseV2ComputeTest(BaseComputeTest):
-
-    _interface = "json"
-
     @classmethod
-    def setUpClass(cls):
-        # By default compute tests do not create network resources
-        cls.set_network_resources()
-        super(BaseV2ComputeTest, cls).setUpClass()
-        cls.servers_client = cls.os.servers_client
-        cls.flavors_client = cls.os.flavors_client
-        cls.images_client = cls.os.images_client
-        cls.extensions_client = cls.os.extensions_client
-        cls.floating_ips_client = cls.os.floating_ips_client
-        cls.keypairs_client = cls.os.keypairs_client
-        cls.security_groups_client = cls.os.security_groups_client
-        cls.quotas_client = cls.os.quotas_client
-        cls.limits_client = cls.os.limits_client
-        cls.volumes_extensions_client = cls.os.volumes_extensions_client
-        cls.volumes_client = cls.os.volumes_client
-        cls.interfaces_client = cls.os.interfaces_client
-        cls.fixed_ips_client = cls.os.fixed_ips_client
-        cls.availability_zone_client = cls.os.availability_zone_client
-        cls.aggregates_client = cls.os.aggregates_client
-        cls.services_client = cls.os.services_client
-        cls.instance_usages_audit_log_client = \
-            cls.os.instance_usages_audit_log_client
-        cls.hypervisor_client = cls.os.hypervisor_client
-        cls.certificates_client = cls.os.certificates_client
+    def prepare_instance_network(cls):
+        if (CONF.compute.ssh_auth_method != 'disabled' and
+                CONF.compute.ssh_connect_method == 'floating'):
+            cls.set_network_resources(network=True, subnet=True, router=True,
+                                      dhcp=True)
 
     @classmethod
     def create_image_from_server(cls, server_id, **kwargs):
@@ -241,21 +314,25 @@
         if 'name' in kwargs:
             name = kwargs.pop('name')
 
-        resp, image = cls.images_client.create_image(
-            server_id, name)
+        if cls._api_version == 2:
+            resp, image = cls.images_client.create_image(server_id, name)
+        elif cls._api_version == 3:
+            resp, image = cls.servers_client.create_image(server_id, name)
         image_id = data_utils.parse_image_id(resp['location'])
         cls.images.append(image_id)
 
         if 'wait_until' in kwargs:
             cls.images_client.wait_for_image_status(image_id,
                                                     kwargs['wait_until'])
-            resp, image = cls.images_client.get_image(image_id)
+            if cls._api_version == 2:
+                resp, image = cls.images_client.get_image(image_id)
+            elif cls._api_version == 3:
+                resp, image = cls.images_client.get_image_meta(image_id)
 
             if kwargs['wait_until'] == 'ACTIVE':
                 if kwargs.get('wait_for_server', True):
                     cls.servers_client.wait_for_server_status(server_id,
                                                               'ACTIVE')
-
         return resp, image
 
     @classmethod
@@ -267,148 +344,72 @@
                 cls.servers_client.wait_for_server_termination(server_id)
             except Exception:
                 LOG.exception('Failed to delete server %s' % server_id)
-                pass
         resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
-        cls.password = server['adminPass']
+        if cls._api_version == 2:
+            cls.password = server['adminPass']
+        elif cls._api_version == 3:
+            cls.password = server['admin_password']
         return server['id']
 
     @classmethod
     def delete_volume(cls, volume_id):
         """Deletes the given volume and waits for it to be gone."""
-        cls._delete_volume(cls.volumes_extensions_client, volume_id)
+        if cls._api_version == 2:
+            cls._delete_volume(cls.volumes_extensions_client, volume_id)
+        elif cls._api_version == 3:
+            cls._delete_volume(cls.volumes_client, volume_id)
 
 
-class BaseV2ComputeAdminTest(BaseV2ComputeTest):
-    """Base test case class for Compute Admin V2 API tests."""
-
-    @classmethod
-    def setUpClass(cls):
-        super(BaseV2ComputeAdminTest, cls).setUpClass()
-        admin_username = CONF.compute_admin.username
-        admin_password = CONF.compute_admin.password
-        admin_tenant = CONF.compute_admin.tenant_name
-        if not (admin_username and admin_password and admin_tenant):
-            msg = ("Missing Compute Admin API credentials "
-                   "in configuration.")
-            raise cls.skipException(msg)
-        if (CONF.compute.allow_tenant_isolation or
-            cls.force_tenant_isolation is True):
-            creds = cls.isolated_creds.get_admin_creds()
-            admin_username, admin_tenant_name, admin_password = creds
-            cls.os_adm = clients.Manager(username=admin_username,
-                                         password=admin_password,
-                                         tenant_name=admin_tenant_name,
-                                         interface=cls._interface)
-        else:
-            cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+class BaseV2ComputeTest(BaseComputeTest):
+    _api_version = 2
+    _interface = "json"
 
 
 class BaseV3ComputeTest(BaseComputeTest):
+    _api_version = 3
+    _interface = "json"
 
+
+class BaseComputeAdminTest(BaseComputeTest):
+    """Base test case class for Compute Admin API tests."""
     _interface = "json"
 
     @classmethod
     def setUpClass(cls):
-        # By default compute tests do not create network resources
-        if cls._interface == "xml":
-            skip_msg = ("XML interface is being removed from Nova v3. "
-                        "%s will be removed shortly" % cls.__name__)
-            raise cls.skipException(skip_msg)
-
-        if not CONF.compute_feature_enabled.api_v3:
-            skip_msg = ("%s skipped as nova v3 api is not available" %
-                        cls.__name__)
-            raise cls.skipException(skip_msg)
-
-        cls.set_network_resources()
-        super(BaseV3ComputeTest, cls).setUpClass()
-
-        cls.servers_client = cls.os.servers_v3_client
-        cls.images_client = cls.os.image_client
-        cls.flavors_client = cls.os.flavors_v3_client
-        cls.services_client = cls.os.services_v3_client
-        cls.extensions_client = cls.os.extensions_v3_client
-        cls.availability_zone_client = cls.os.availability_zone_v3_client
-        cls.interfaces_client = cls.os.interfaces_v3_client
-        cls.hypervisor_client = cls.os.hypervisor_v3_client
-        cls.keypairs_client = cls.os.keypairs_v3_client
-        cls.volumes_client = cls.os.volumes_client
-        cls.certificates_client = cls.os.certificates_v3_client
-        cls.keypairs_client = cls.os.keypairs_v3_client
-        cls.aggregates_client = cls.os.aggregates_v3_client
-        cls.hosts_client = cls.os.hosts_v3_client
-        cls.quotas_client = cls.os.quotas_v3_client
-        cls.version_client = cls.os.version_v3_client
-
-    @classmethod
-    def create_image_from_server(cls, server_id, **kwargs):
-        """Wrapper utility that returns an image created from the server."""
-        name = data_utils.rand_name(cls.__name__ + "-image")
-        if 'name' in kwargs:
-            name = kwargs.pop('name')
-
-        resp, image = cls.servers_client.create_image(
-            server_id, name)
-        image_id = data_utils.parse_image_id(resp['location'])
-        cls.images.append(image_id)
-
-        if 'wait_until' in kwargs:
-            cls.images_client.wait_for_image_status(image_id,
-                                                    kwargs['wait_until'])
-            resp, image = cls.images_client.get_image_meta(image_id)
-
-        return resp, image
-
-    @classmethod
-    def rebuild_server(cls, server_id, **kwargs):
-        # Destroy an existing server and creates a new one
-        try:
-            cls.servers_client.delete_server(server_id)
-            cls.servers_client.wait_for_server_termination(server_id)
-        except Exception:
-            LOG.exception('Failed to delete server %s' % server_id)
-            pass
-        resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
-        cls.password = server['admin_password']
-        return server['id']
-
-    @classmethod
-    def delete_volume(cls, volume_id):
-        """Deletes the given volume and waits for it to be gone."""
-        cls._delete_volume(cls.volumes_client, volume_id)
-
-
-class BaseV3ComputeAdminTest(BaseV3ComputeTest):
-    """Base test case class for all Compute Admin API V3 tests."""
-
-    @classmethod
-    def setUpClass(cls):
-        super(BaseV3ComputeAdminTest, cls).setUpClass()
-        admin_username = CONF.compute_admin.username
-        admin_password = CONF.compute_admin.password
-        admin_tenant = CONF.compute_admin.tenant_name
-        if not (admin_username and admin_password and admin_tenant):
-            msg = ("Missing Compute Admin API credentials "
-                   "in configuration.")
-            raise cls.skipException(msg)
-        if CONF.compute.allow_tenant_isolation:
+        super(BaseComputeAdminTest, cls).setUpClass()
+        if (CONF.compute.allow_tenant_isolation or
+            cls.force_tenant_isolation is True):
             creds = cls.isolated_creds.get_admin_creds()
-            admin_username, admin_tenant_name, admin_password = creds
-            os_adm = clients.Manager(username=admin_username,
-                                     password=admin_password,
-                                     tenant_name=admin_tenant_name,
-                                     interface=cls._interface)
+            cls.os_adm = clients.Manager(credentials=creds,
+                                         interface=cls._interface)
         else:
-            os_adm = clients.ComputeAdminManager(interface=cls._interface)
+            try:
+                cls.os_adm = clients.ComputeAdminManager(
+                    interface=cls._interface)
+            except exceptions.InvalidCredentials:
+                msg = ("Missing Compute Admin API credentials "
+                       "in configuration.")
+                raise cls.skipException(msg)
 
-        cls.os_adm = os_adm
-        cls.servers_admin_client = cls.os_adm.servers_v3_client
-        cls.services_admin_client = cls.os_adm.services_v3_client
-        cls.availability_zone_admin_client = \
-            cls.os_adm.availability_zone_v3_client
-        cls.hypervisor_admin_client = cls.os_adm.hypervisor_v3_client
-        cls.flavors_admin_client = cls.os_adm.flavors_v3_client
-        cls.aggregates_admin_client = cls.os_adm.aggregates_v3_client
-        cls.hosts_admin_client = cls.os_adm.hosts_v3_client
-        cls.quotas_admin_client = cls.os_adm.quotas_v3_client
-        cls.agents_admin_client = cls.os_adm.agents_v3_client
+        if cls._api_version == 3:
+            cls.servers_admin_client = cls.os_adm.servers_v3_client
+            cls.services_admin_client = cls.os_adm.services_v3_client
+            cls.availability_zone_admin_client = \
+                cls.os_adm.availability_zone_v3_client
+            cls.hypervisor_admin_client = cls.os_adm.hypervisor_v3_client
+            cls.flavors_admin_client = cls.os_adm.flavors_v3_client
+            cls.aggregates_admin_client = cls.os_adm.aggregates_v3_client
+            cls.hosts_admin_client = cls.os_adm.hosts_v3_client
+            cls.quotas_admin_client = cls.os_adm.quotas_v3_client
+            cls.agents_admin_client = cls.os_adm.agents_v3_client
+            cls.migrations_admin_client = cls.os_adm.migrations_v3_client
+
+
+class BaseV2ComputeAdminTest(BaseComputeAdminTest):
+    """Base test case class for Compute Admin V2 API tests."""
+    _api_version = 2
+
+
+class BaseV3ComputeAdminTest(BaseComputeAdminTest):
+    """Base test case class for Compute Admin V3 API tests."""
+    _api_version = 3
diff --git a/tempest/api/compute/certificates/test_certificates.py b/tempest/api/compute/certificates/test_certificates.py
index 5299d13..0f921c5 100644
--- a/tempest/api/compute/certificates/test_certificates.py
+++ b/tempest/api/compute/certificates/test_certificates.py
@@ -17,15 +17,19 @@
 from tempest import test
 
 
-class CertificatesTestJSON(base.BaseV2ComputeTest):
+class CertificatesV3Test(base.BaseComputeTest):
+
+    _api_version = 3
 
     @test.attr(type='gate')
-    def test_create_and_get_root_certificate(self):
+    def test_create_root_certificate(self):
         # create certificates
-        resp, create_body = self.certificates_client.create_certificate()
-        self.assertEqual(200, resp.status)
-        self.assertIn('data', create_body)
-        self.assertIn('private_key', create_body)
+        resp, body = self.certificates_client.create_certificate()
+        self.assertIn('data', body)
+        self.assertIn('private_key', body)
+
+    @test.attr(type='gate')
+    def test_get_root_certificate(self):
         # get the root certificate
         resp, body = self.certificates_client.get_certificate('root')
         self.assertEqual(200, resp.status)
@@ -33,5 +37,9 @@
         self.assertIn('private_key', body)
 
 
-class CertificatesTestXML(CertificatesTestJSON):
+class CertificatesV2TestJSON(CertificatesV3Test):
+    _api_version = 2
+
+
+class CertificatesV2TestXML(CertificatesV2TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 6e202f6..bfebb5e 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -17,11 +17,15 @@
 from tempest import test
 
 
-class FlavorsTestJSON(base.BaseV2ComputeTest):
+class FlavorsV3Test(base.BaseComputeTest):
+
+    _api_version = 3
+    _min_disk = 'min_disk'
+    _min_ram = 'min_ram'
 
     @classmethod
     def setUpClass(cls):
-        super(FlavorsTestJSON, cls).setUpClass()
+        super(FlavorsV3Test, cls).setUpClass()
         cls.client = cls.flavors_client
 
     @test.attr(type='smoke')
@@ -89,7 +93,7 @@
         flavors = sorted(flavors, key=lambda k: k['disk'])
         flavor_id = flavors[0]['id']
 
-        params = {'minDisk': flavors[0]['disk'] + 1}
+        params = {self._min_disk: flavors[0]['disk'] + 1}
         resp, flavors = self.client.list_flavors_with_detail(params)
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
 
@@ -100,7 +104,7 @@
         flavors = sorted(flavors, key=lambda k: k['ram'])
         flavor_id = flavors[0]['id']
 
-        params = {'minRam': flavors[0]['ram'] + 1}
+        params = {self._min_ram: flavors[0]['ram'] + 1}
         resp, flavors = self.client.list_flavors_with_detail(params)
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
 
@@ -111,7 +115,7 @@
         flavors = sorted(flavors, key=lambda k: k['disk'])
         flavor_id = flavors[0]['id']
 
-        params = {'minDisk': flavors[0]['disk'] + 1}
+        params = {self._min_disk: flavors[0]['disk'] + 1}
         resp, flavors = self.client.list_flavors(params)
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
 
@@ -122,10 +126,17 @@
         flavors = sorted(flavors, key=lambda k: k['ram'])
         flavor_id = flavors[0]['id']
 
-        params = {'minRam': flavors[0]['ram'] + 1}
+        params = {self._min_ram: flavors[0]['ram'] + 1}
         resp, flavors = self.client.list_flavors(params)
         self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
 
 
-class FlavorsTestXML(FlavorsTestJSON):
+class FlavorsV2TestJSON(FlavorsV3Test):
+
+    _api_version = 2
+    _min_disk = 'minDisk'
+    _min_ram = 'minRam'
+
+
+class FlavorsV2TestXML(FlavorsV2TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index a81b7d9..1638f2d 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -21,16 +21,14 @@
 load_tests = test.NegativeAutoTest.load_tests
 
 
-class FlavorsListNegativeTestJSON(base.BaseV2ComputeTest,
-                                  test.NegativeAutoTest):
+@test.SimpleNegativeAutoTest
+class FlavorsListWithDetailsNegativeTestJSON(base.BaseV2ComputeTest,
+                                             test.NegativeAutoTest):
     _service = 'compute'
     _schema_file = 'compute/flavors/flavors_list.json'
 
-    @test.attr(type=['negative', 'gate'])
-    def test_list_flavors_with_detail(self):
-        self.execute(self._schema_file)
 
-
+@test.SimpleNegativeAutoTest
 class FlavorDetailsNegativeTestJSON(base.BaseV2ComputeTest,
                                     test.NegativeAutoTest):
     _service = 'compute'
@@ -40,8 +38,3 @@
     def setUpClass(cls):
         super(FlavorDetailsNegativeTestJSON, cls).setUpClass()
         cls.set_resource("flavor", cls.flavor_ref)
-
-    @test.attr(type=['negative', 'gate'])
-    def test_get_flavor_details(self):
-        # flavor details are not returned for non-existent flavors
-        self.execute(self._schema_file)
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index abd8a4c..b3789f8 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -45,6 +45,14 @@
             resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
         super(FloatingIPsTestJSON, cls).tearDownClass()
 
+    def _try_delete_floating_ip(self, floating_ip_id):
+        # delete floating ip, if it exists
+        try:
+            self.client.delete_floating_ip(floating_ip_id)
+        # if not found, it depicts it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
     @test.attr(type='gate')
     def test_allocate_floating_ip(self):
         # Positive test:Allocation of a new floating IP to a project
@@ -66,6 +74,7 @@
         # should be successful
         # Creating the floating IP that is to be deleted in this method
         resp, floating_ip_body = self.client.create_floating_ip()
+        self.addCleanup(self._try_delete_floating_ip, floating_ip_body['id'])
         # Storing the details of floating IP before deleting it
         cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
         resp, floating_ip_details = cli_resp
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index d2fd970..c81cec5 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -26,6 +26,11 @@
 
 class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
 
+    def tearDown(self):
+        """Terminate test instances created after a test is executed."""
+        self.server_check_teardown()
+        super(ImagesOneServerTestJSON, self).tearDown()
+
     def setUp(self):
         # NOTE(afazekas): Normally we use the same server with all test cases,
         # but if it has an issue, we build a new one
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 41a0590..9c4ab00 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
         for image_id in self.image_ids:
             self.client.delete_image(image_id)
             self.image_ids.remove(image_id)
+        self.server_check_teardown()
         super(ImagesOneServerNegativeTestJSON, self).tearDown()
 
     def setUp(self):
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 67fafed..01979c0 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -18,16 +18,17 @@
 from tempest import test
 
 
-class KeyPairsTestJSON(base.BaseV2ComputeTest):
+class KeyPairsV3Test(base.BaseComputeTest):
+
+    _api_version = 3
 
     @classmethod
     def setUpClass(cls):
-        super(KeyPairsTestJSON, cls).setUpClass()
+        super(KeyPairsV3Test, cls).setUpClass()
         cls.client = cls.keypairs_client
 
     def _delete_keypair(self, keypair_name):
         resp, _ = self.client.delete_keypair(keypair_name)
-        self.assertEqual(202, resp.status)
 
     def _create_keypair(self, keypair_name, pub_key=None):
         resp, body = self.client.create_keypair(keypair_name, pub_key)
@@ -46,7 +47,6 @@
             # as the keypair dicts from list API doesn't have them.
             keypair.pop('private_key')
             keypair.pop('user_id')
-            self.assertEqual(200, resp.status)
             key_list.append(keypair)
         # Fetch all keypairs and verify the list
         # has all created keypairs
@@ -69,7 +69,6 @@
         # Keypair should be created, verified and deleted
         k_name = data_utils.rand_name('keypair-')
         resp, keypair = self._create_keypair(k_name)
-        self.assertEqual(200, resp.status)
         private_key = keypair['private_key']
         key_name = keypair['name']
         self.assertEqual(key_name, k_name,
@@ -108,7 +107,6 @@
                    "XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
                    "snSA8wzBx3A/8y9Pp1B nova@ubuntu")
         resp, keypair = self._create_keypair(k_name, pub_key)
-        self.assertEqual(200, resp.status)
         self.assertFalse('private_key' in keypair,
                          "Field private_key is not empty!")
         key_name = keypair['name']
@@ -117,5 +115,9 @@
                          "to the requested name!")
 
 
-class KeyPairsTestXML(KeyPairsTestJSON):
+class KeyPairsV2TestJSON(KeyPairsV3Test):
+    _api_version = 2
+
+
+class KeyPairsV2TestXML(KeyPairsV2TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index b04ab8a..35f6fc2 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -75,7 +75,6 @@
                                                    to_port,
                                                    cidr=cidr,
                                                    group_id=group_id)
-        self.addCleanup(self.client.delete_security_group_rule, rule['id'])
         self.assertEqual(200, resp.status)
 
     @test.attr(type='smoke')
@@ -95,8 +94,6 @@
                                                    ip_protocol1,
                                                    from_port1, to_port1)
         rule1_id = rule['id']
-        # Delete the Security Group rule1 at the end of this method
-        self.addCleanup(self.client.delete_security_group_rule, rule1_id)
 
         # Add a second rule to the created Security Group
         ip_protocol2 = 'icmp'
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 3736f28..a077943 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -30,29 +30,31 @@
     def test_security_groups_create_list_delete(self):
         # Positive test:Should return the list of Security Groups
         # Create 3 Security Groups
+        security_group_list = []
         for i in range(3):
-            resp, securitygroup = self.create_security_group()
+            resp, body = self.create_security_group()
             self.assertEqual(200, resp.status)
+            security_group_list.append(body)
         # Fetch all Security Groups and verify the list
         # has all created Security Groups
         resp, fetched_list = self.client.list_security_groups()
         self.assertEqual(200, resp.status)
         # Now check if all the created Security Groups are in fetched list
         missing_sgs = \
-            [sg for sg in self.security_groups if sg not in fetched_list]
+            [sg for sg in security_group_list if sg not in fetched_list]
         self.assertFalse(missing_sgs,
                          "Failed to find Security Group %s in fetched "
                          "list" % ', '.join(m_group['name']
                                             for m_group in missing_sgs))
         # Delete all security groups
-        for sg in self.security_groups:
+        for sg in security_group_list:
             resp, _ = self.client.delete_security_group(sg['id'])
             self.assertEqual(202, resp.status)
             self.client.wait_for_resource_deletion(sg['id'])
         # Now check if all the created Security Groups are deleted
         resp, fetched_list = self.client.list_security_groups()
         deleted_sgs = \
-            [sg for sg in self.security_groups if sg in fetched_list]
+            [sg for sg in security_group_list if sg in fetched_list]
         self.assertFalse(deleted_sgs,
                          "Failed to delete Security Group %s "
                          "list" % ', '.join(m_group['name']
@@ -78,6 +80,9 @@
         self.assertEqual(securitygroup, fetched_group,
                          "The fetched Security Group is different "
                          "from the created Group")
+        resp, _ = self.client.delete_security_group(securitygroup['id'])
+        self.assertEqual(202, resp.status)
+        self.client.wait_for_resource_deletion(securitygroup['id'])
 
     @test.attr(type='smoke')
     def test_server_security_groups(self):
@@ -120,9 +125,9 @@
         self.servers_client.delete_server(server_id)
         self.servers_client.wait_for_server_termination(server_id)
 
-        self.client.delete_security_group(sg['id'])
+        resp, _ = self.client.delete_security_group(sg['id'])
         self.assertEqual(202, resp.status)
-        self.client.delete_security_group(sg2['id'])
+        resp, _ = self.client.delete_security_group(sg2['id'])
         self.assertEqual(202, resp.status)
 
     @test.attr(type='smoke')
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index f6eed00..297b300 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -127,7 +127,7 @@
         _ifs = self._test_delete_interface(server, ifs)
         self.assertEqual(len(ifs) - 1, len(_ifs))
 
-    @test.attr(type='gate')
+    @test.attr(type='smoke')
     def test_add_remove_fixed_ip(self):
         # Add and Remove the fixed IP to server.
         server, ifs = self._create_server_get_interfaces()
diff --git a/tempest/api/compute/servers/test_availability_zone.py b/tempest/api/compute/servers/test_availability_zone.py
index 7b12555..cf9837f 100644
--- a/tempest/api/compute/servers/test_availability_zone.py
+++ b/tempest/api/compute/servers/test_availability_zone.py
@@ -17,15 +17,15 @@
 from tempest import test
 
 
-class AZTestJSON(base.BaseV2ComputeTest):
-
+class AZV3Test(base.BaseComputeTest):
     """
     Tests Availability Zone API List
     """
+    _api_version = 3
 
     @classmethod
     def setUpClass(cls):
-        super(AZTestJSON, cls).setUpClass()
+        super(AZV3Test, cls).setUpClass()
         cls.client = cls.availability_zone_client
 
     @test.attr(type='gate')
@@ -36,5 +36,9 @@
         self.assertTrue(len(availability_zone) > 0)
 
 
-class AZTestXML(AZTestJSON):
+class AZV2TestJSON(AZV3Test):
+    _api_version = 2
+
+
+class AZV2TestXML(AZV2TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 778294e..e135eca 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -28,11 +28,11 @@
 
 
 class ServersTestJSON(base.BaseV2ComputeTest):
-    run_ssh = CONF.compute.run_ssh
     disk_config = 'AUTO'
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersTestJSON, cls).setUpClass()
         cls.meta = {'hello': 'world'}
         cls.accessIPv4 = '1.1.1.1'
@@ -54,13 +54,6 @@
         resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
     @test.attr(type='smoke')
-    def test_create_server_response(self):
-        # Check that the required fields are returned with values
-        self.assertEqual(202, self.resp.status)
-        self.assertTrue(self.server_initial['id'] is not None)
-        self.assertTrue(self.server_initial['adminPass'] is not None)
-
-    @test.attr(type='smoke')
     def test_verify_server_details(self):
         # Verify the specified server attributes are set correctly
         self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
@@ -89,7 +82,8 @@
         found = any([i for i in servers if i['id'] == self.server['id']])
         self.assertTrue(found)
 
-    @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+    @testtools.skipUnless(CONF.compute.run_ssh,
+                          'Instance validation tests are disabled.')
     @test.attr(type='gate')
     def test_verify_created_server_vcpus(self):
         # Verify that the number of vcpus reported by the instance matches
@@ -99,7 +93,8 @@
                                                   self.password)
         self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
 
-    @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+    @testtools.skipUnless(CONF.compute.run_ssh,
+                          'Instance validation tests are disabled.')
     @test.attr(type='gate')
     def test_host_name_is_same_as_server_name(self):
         # Verify the instance host name is the same as the server name
@@ -109,38 +104,22 @@
 
 
 class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
-    run_ssh = CONF.compute.run_ssh
     disk_config = 'AUTO'
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersWithSpecificFlavorTestJSON, cls).setUpClass()
-        cls.meta = {'hello': 'world'}
-        cls.accessIPv4 = '1.1.1.1'
-        cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
-        cls.name = data_utils.rand_name('server')
-        file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
-                       'contents': base64.b64encode(file_contents)}]
-        cls.client = cls.servers_client
         cls.flavor_client = cls.os_adm.flavors_client
-        cli_resp = cls.create_test_server(name=cls.name,
-                                          meta=cls.meta,
-                                          accessIPv4=cls.accessIPv4,
-                                          accessIPv6=cls.accessIPv6,
-                                          personality=personality,
-                                          disk_config=cls.disk_config)
-        cls.resp, cls.server_initial = cli_resp
-        cls.password = cls.server_initial['adminPass']
-        cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
-        resp, cls.server = cls.client.get_server(cls.server_initial['id'])
+        cls.client = cls.servers_client
 
-    @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+    @testtools.skipUnless(CONF.compute.run_ssh,
+                          'Instance validation tests are disabled.')
     @test.attr(type='gate')
     def test_verify_created_server_ephemeral_disk(self):
         # Verify that the ephemeral disk is created when creating server
 
-        def create_flavor_with_extra_specs(self):
+        def create_flavor_with_extra_specs():
             flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
             flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
             ram = 64
@@ -153,12 +132,12 @@
                                           ram, vcpus, disk,
                                           flavor_with_eph_disk_id,
                                           ephemeral=1))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
+            self.addCleanup(flavor_clean_up, flavor['id'])
             self.assertEqual(200, resp.status)
 
             return flavor['id']
 
-        def create_flavor_without_extra_specs(self):
+        def create_flavor_without_extra_specs():
             flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
             flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
 
@@ -171,18 +150,18 @@
                             create_flavor(flavor_no_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_no_eph_disk_id))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
+            self.addCleanup(flavor_clean_up, flavor['id'])
             self.assertEqual(200, resp.status)
 
             return flavor['id']
 
-        def flavor_clean_up(self, flavor_id):
+        def flavor_clean_up(flavor_id):
             resp, body = self.flavor_client.delete_flavor(flavor_id)
             self.assertEqual(resp.status, 202)
             self.flavor_client.wait_for_resource_deletion(flavor_id)
 
-        flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
-        flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+        flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+        flavor_no_eph_disk_id = create_flavor_without_extra_specs()
 
         admin_pass = self.image_ssh_password
 
@@ -195,13 +174,18 @@
                                       adminPass=admin_pass,
                                       flavor=flavor_with_eph_disk_id))
         # Get partition number of server without extra specs.
+        _, server_no_eph_disk = self.client.get_server(
+            server_no_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_no_eph_disk,
-                                                  self.ssh_user, self.password)
-        partition_num = len(linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num = len(linux_client.get_partitions().split('\n'))
 
+        _, server_with_eph_disk = self.client.get_server(
+            server_with_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_with_eph_disk,
-                                                  self.ssh_user, self.password)
-        self.assertEqual(partition_num + 1, linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num_emph = len(linux_client.get_partitions().split('\n'))
+        self.assertEqual(partition_num + 1, partition_num_emph)
 
 
 class ServersTestManualDisk(ServersTestJSON):
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 7e34213..9e34922 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -23,7 +23,6 @@
 
 
 class DeleteServersTestJSON(base.BaseV2ComputeTest):
-    pause_available = CONF.compute_feature_enabled.pause
 
     # NOTE: Server creations of each test class should be under 10
     # for preventing "Quota exceeded for instances"
@@ -59,7 +58,8 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
-    @testtools.skipIf(not pause_available, 'Pause is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type='gate')
     def test_delete_server_while_in_pause_state(self):
         # Delete a server while it's VM state is Pause
@@ -102,6 +102,26 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @test.attr(type='gate')
+    def test_delete_server_while_in_attached_volume(self):
+        # Delete a server while a volume is attached to it
+        volumes_client = self.volumes_extensions_client
+        device = '/dev/%s' % CONF.compute.volume_device_name
+        resp, server = self.create_test_server(wait_until='ACTIVE')
+
+        resp, volume = volumes_client.create_volume(1)
+        self.addCleanup(volumes_client.delete_volume, volume['id'])
+        volumes_client.wait_for_volume_status(volume['id'], 'available')
+        resp, body = self.client.attach_volume(server['id'],
+                                               volume['id'],
+                                               device=device)
+        volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+        resp, _ = self.client.delete_server(server['id'])
+        self.assertEqual('204', resp['status'])
+        self.client.wait_for_server_termination(server['id'])
+        volumes_client.wait_for_volume_status(volume['id'], 'available')
+
 
 class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
     # NOTE: Server creations of each test class should be under 10
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index f0913f1..f66020c 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -28,6 +28,7 @@
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
+        cls.set_network_resources(network=True, subnet=True, dhcp=True)
         super(ListServerFiltersTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
 
@@ -69,8 +70,12 @@
         resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
                                               flavor=cls.flavor_ref_alt,
                                               wait_until='ACTIVE')
-
-        cls.fixed_network_name = CONF.compute.fixed_network_name
+        if (CONF.service_available.neutron and
+                CONF.compute.allow_tenant_isolation):
+            network = cls.isolated_creds.get_primary_network()
+            cls.fixed_network_name = network['name']
+        else:
+            cls.fixed_network_name = CONF.compute.fixed_network_name
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @test.attr(type='gate')
@@ -226,7 +231,6 @@
         self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
         self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
 
-    @test.skip_because(bug="1170718")
     @test.attr(type='gate')
     def test_list_servers_filtered_by_ip(self):
         # Filter servers by ip
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 768cc11..28d64fb 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import datetime
-
 from six import moves
 
 from tempest.api.compute import base
@@ -37,9 +35,8 @@
         # tearDownClass method of the super-class.
         cls.existing_fixtures = []
         cls.deleted_fixtures = []
-        cls.start_time = datetime.datetime.utcnow()
         for x in moves.xrange(2):
-            resp, srv = cls.create_test_server()
+            resp, srv = cls.create_test_server(wait_until='ACTIVE')
             cls.existing_fixtures.append(srv)
 
         resp, srv = cls.create_test_server()
@@ -127,19 +124,6 @@
         self.assertRaises(exceptions.BadRequest, self.client.list_servers,
                           {'limit': -1})
 
-    @test.attr(type='gate')
-    def test_list_servers_by_changes_since(self):
-        # Servers are listed by specifying changes-since date
-        changes_since = {'changes-since': self.start_time.isoformat()}
-        resp, body = self.client.list_servers(changes_since)
-        self.assertEqual('200', resp['status'])
-        # changes-since returns all instances, including deleted.
-        num_expected = (len(self.existing_fixtures) +
-                        len(self.deleted_fixtures))
-        self.assertEqual(num_expected, len(body['servers']),
-                         "Number of servers %d is wrong in %s" %
-                         (num_expected, body['servers']))
-
     @test.attr(type=['negative', 'gate'])
     def test_list_servers_by_changes_since_invalid_date(self):
         # Return an error when invalid date format is passed
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 72ccc71..d0fd876 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -29,9 +29,6 @@
 
 
 class ServerActionsTestJSON(base.BaseV2ComputeTest):
-    resize_available = CONF.compute_feature_enabled.resize
-    pause_available = CONF.compute_feature_enabled.pause
-    suspend_available = CONF.compute_feature_enabled.suspend
     run_ssh = CONF.compute.run_ssh
 
     def setUp(self):
@@ -45,8 +42,15 @@
             # Rebuild server if something happened to it during a test
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+        _, server = self.client.get_server(self.server_id)
+        self.assertEqual(self.image_ref, server['image']['id'])
+        self.server_check_teardown()
+        super(ServerActionsTestJSON, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServerActionsTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
         cls.server_id = cls.rebuild_server(None)
@@ -128,7 +132,6 @@
                                                    metadata=meta,
                                                    personality=personality,
                                                    adminPass=password)
-        self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -148,6 +151,8 @@
             linux_client = remote_client.RemoteClient(server, self.ssh_user,
                                                       password)
             linux_client.validate_authentication()
+        if self.image_ref_alt != self.image_ref:
+            self.client.rebuild(self.server_id, self.image_ref)
 
     @test.attr(type='gate')
     def test_rebuild_server_in_stop_state(self):
@@ -160,11 +165,7 @@
         resp, server = self.client.stop(self.server_id)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
-        self.addCleanup(self.client.start, self.server_id)
         resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
-        self.addCleanup(self.client.wait_for_server_status, self.server_id,
-                        'SHUTOFF')
-        self.addCleanup(self.client.rebuild, self.server_id, old_image)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -178,6 +179,12 @@
         rebuilt_image_id = server['image']['id']
         self.assertEqual(new_image, rebuilt_image_id)
 
+        # Restore to the original image (The tearDown will test it again)
+        if self.image_ref_alt != self.image_ref:
+            self.client.rebuild(self.server_id, old_image)
+            self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+        self.client.start(self.server_id)
+
     def _detect_server_image_flavor(self, server_id):
         # Detects the current server image flavor ref.
         resp, server = self.client.get_server(server_id)
@@ -186,26 +193,48 @@
             if current_flavor == self.flavor_ref else self.flavor_ref
         return current_flavor, new_flavor_ref
 
-    @testtools.skipIf(not resize_available, 'Resize not available.')
-    @test.attr(type='smoke')
-    def test_resize_server_confirm(self):
+    def _test_resize_server_confirm(self, stop=False):
         # The server's RAM and disk space should be modified to that of
         # the provided flavor
 
         previous_flavor_ref, new_flavor_ref = \
             self._detect_server_image_flavor(self.server_id)
 
+        if stop:
+            resp = self.servers_client.stop(self.server_id)[0]
+            self.assertEqual(202, resp.status)
+            self.servers_client.wait_for_server_status(self.server_id,
+                                                       'SHUTOFF')
+
         resp, server = self.client.resize(self.server_id, new_flavor_ref)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
 
         self.client.confirm_resize(self.server_id)
-        self.client.wait_for_server_status(self.server_id, 'ACTIVE')
+        expected_status = 'SHUTOFF' if stop else 'ACTIVE'
+        self.client.wait_for_server_status(self.server_id, expected_status)
 
         resp, server = self.client.get_server(self.server_id)
         self.assertEqual(new_flavor_ref, server['flavor']['id'])
 
-    @testtools.skipIf(not resize_available, 'Resize not available.')
+        if stop:
+            # NOTE(mriedem): tearDown requires the server to be started.
+            self.client.start(self.server_id)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='smoke')
+    def test_resize_server_confirm(self):
+        self._test_resize_server_confirm(stop=False)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='smoke')
+    def test_resize_server_confirm_from_stopped(self):
+        self._test_resize_server_confirm(stop=True)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
     @test.attr(type='gate')
     def test_resize_server_revert(self):
         # The server's RAM and disk space should return to its original
@@ -342,7 +371,8 @@
 
         self.wait_for(self._get_output)
 
-    @testtools.skipIf(not pause_available, 'Pause is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type='gate')
     def test_pause_unpause_server(self):
         resp, server = self.client.pause_server(self.server_id)
@@ -352,7 +382,8 @@
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'ACTIVE')
 
-    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type='gate')
     def test_suspend_resume_server(self):
         resp, server = self.client.suspend_server(self.server_id)
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
new file mode 100644
index 0000000..0cd23fd
--- /dev/null
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -0,0 +1,112 @@
+# Copyright 2014 NEC Technologies India Ltd.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class ServerGroupTestJSON(base.BaseV2ComputeTest):
+    """
+    These tests check for the server-group APIs
+    They create/delete server-groups with different policies.
+    policies = affinity/anti-affinity
+    It also adds the tests for list and get details of server-groups
+    """
+    @classmethod
+    @test.safe_setup
+    def setUpClass(cls):
+        super(ServerGroupTestJSON, cls).setUpClass()
+        if not test.is_extension_enabled('os-server-groups', 'compute'):
+            msg = "os-server-groups extension is not enabled."
+            raise cls.skipException(msg)
+        cls.client = cls.servers_client
+        server_group_name = data_utils.rand_name('server-group')
+        cls.policy = ['affinity']
+
+        _, cls.created_server_group = cls.create_test_server_group(
+            server_group_name,
+            cls.policy)
+
+    def _create_server_group(self, name, policy):
+        # create the test server-group with given policy
+        server_group = {'name': name, 'policies': policy}
+        resp, body = self.create_test_server_group(name, policy)
+        self.assertEqual(200, resp.status)
+        for key in ['name', 'policies']:
+            self.assertEqual(server_group[key], body[key])
+        return body
+
+    def _delete_server_group(self, server_group):
+        # delete the test server-group
+        resp, _ = self.client.delete_server_group(server_group['id'])
+        self.assertEqual(204, resp.status)
+        # validation of server-group deletion
+        resp, server_group_list = self.client.list_server_groups()
+        self.assertEqual(200, resp.status)
+        self.assertNotIn(server_group, server_group_list)
+
+    def _create_delete_server_group(self, policy):
+        # Create and Delete the server-group with given policy
+        name = data_utils.rand_name('server-group')
+        server_group = self._create_server_group(name, policy)
+        self._delete_server_group(server_group)
+
+    @test.attr(type='gate')
+    def test_create_delete_server_group_with_affinity_policy(self):
+        # Create and Delete the server-group with affinity policy
+        self._create_delete_server_group(self.policy)
+
+    @test.attr(type='gate')
+    def test_create_delete_server_group_with_anti_affinity_policy(self):
+        # Create and Delete the server-group with anti-affinity policy
+        policy = ['anti-affinity']
+        self._create_delete_server_group(policy)
+
+    @test.attr(type='gate')
+    def test_create_delete_server_group_with_multiple_policies(self):
+        # Create and Delete the server-group with multiple policies
+        policies = ['affinity', 'affinity']
+        self._create_delete_server_group(policies)
+
+    @test.attr(type='gate')
+    def test_create_delete_multiple_server_groups_with_same_name_policy(self):
+        # Create and Delete the server-groups with same name and same policy
+        server_groups = []
+        server_group_name = data_utils.rand_name('server-group')
+        for i in range(0, 2):
+            server_groups.append(self._create_server_group(server_group_name,
+                                                           self.policy))
+        for key in ['name', 'policies']:
+            self.assertEqual(server_groups[0][key], server_groups[1][key])
+        self.assertNotEqual(server_groups[0]['id'], server_groups[1]['id'])
+
+        for i in range(0, 2):
+            self._delete_server_group(server_groups[i])
+
+    @test.attr(type='gate')
+    def test_get_server_group(self):
+        # Get the server-group
+        resp, body = self.client.get_server_group(
+            self.created_server_group['id'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(self.created_server_group, body)
+
+    @test.attr(type='gate')
+    def test_list_server_groups(self):
+        # List the server-group
+        resp, body = self.client.list_server_groups()
+        self.assertEqual(200, resp.status)
+        self.assertIn(self.created_server_group, body)
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 8b69c78..b55833c 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -26,10 +26,7 @@
         super(ServerMetadataNegativeTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
         cls.quotas = cls.quotas_client
-        cls.admin_client = cls._get_identity_admin_client()
-        resp, tenants = cls.admin_client.list_tenants()
-        cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
-                         cls.client.tenant_name][0]
+        cls.tenant_id = cls.client.tenant_id
         resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
 
         cls.server_id = server['id']
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index ddfc1d5..b7e4e38 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -60,25 +60,6 @@
         resp, server = self.create_test_server(personality=person)
         self.assertEqual('202', resp['status'])
 
-    @test.attr(type='gate')
-    def test_create_server_with_existent_personality_file(self):
-        # Any existing file that match specified file will be renamed to
-        # include the bak extension appended with a time stamp
-
-        # TODO(zhikunliu): will add validations when ssh instance validation
-        # re-factor is ready
-        file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
-                       'contents': base64.b64encode(file_contents)}]
-        resp, server = self.create_test_server(personality=personality,
-                                               wait_until="ACTIVE")
-        resp, image = self.create_image_from_server(server['id'],
-                                                    wait_until="ACTIVE")
-        resp, server = self.create_test_server(image_id=image['id'],
-                                               personality=personality,
-                                               wait_until="ACTIVE")
-        self.assertEqual('202', resp['status'])
-
 
 class ServerPersonalityTestXML(ServerPersonalityTestJSON):
     _interface = "xml"
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index ef45585..dae4709 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -12,12 +12,16 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import testtools
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class ServerRescueNegativeTestJSON(base.BaseV2ComputeTest):
 
@@ -45,6 +49,7 @@
         cls.servers_client.rescue_server(
             cls.rescue_id, adminPass=rescue_password)
         cls.servers_client.wait_for_server_status(cls.rescue_id, 'RESCUE')
+        cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
 
     @classmethod
     def tearDownClass(cls):
@@ -66,6 +71,8 @@
         self.assertEqual(202, resp.status)
         self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_rescue_paused_instance(self):
         # Rescue a paused server
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 40b97d7..936b871 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -70,20 +70,34 @@
         resp, server = self.client.get_server(server['id'])
         self.assertEqual(key_name, server['key_name'])
 
+    def _update_server_name(self, server_id, status):
+        # The server name should be changed to the the provided value
+        new_name = data_utils.rand_name('server')
+        # Update the server with a new name
+        resp, server = self.client.update_server(server_id,
+                                                 name=new_name)
+        self.client.wait_for_server_status(server_id, status)
+
+        # Verify the name of the server has changed
+        resp, server = self.client.get_server(server_id)
+        self.assertEqual(new_name, server['name'])
+        return server
+
     @test.attr(type='gate')
     def test_update_server_name(self):
         # The server name should be changed to the the provided value
         resp, server = self.create_test_server(wait_until='ACTIVE')
 
-        # Update the server with a new name
-        resp, server = self.client.update_server(server['id'],
-                                                 name='newname')
-        self.assertEqual(200, resp.status)
-        self.client.wait_for_server_status(server['id'], 'ACTIVE')
+        self._update_server_name(server['id'], 'ACTIVE')
 
-        # Verify the name of the server has changed
-        resp, server = self.client.get_server(server['id'])
-        self.assertEqual('newname', server['name'])
+    @test.attr(type='gate')
+    def test_update_server_name_in_stop_state(self):
+        # The server name should be changed to the the provided value
+        resp, server = self.create_test_server(wait_until='ACTIVE')
+        self.client.stop(server['id'])
+        self.client.wait_for_server_status(server['id'], 'SHUTOFF')
+        updated_server = self._update_server_name(server['id'], 'SHUTOFF')
+        self.assertNotIn('progress', updated_server)
 
     @test.attr(type='gate')
     def test_update_access_server_address(self):
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index cbfec5c..6343ead 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -29,8 +29,6 @@
 
 
 class ServersNegativeTestJSON(base.BaseV2ComputeTest):
-    pause_available = CONF.compute_feature_enabled.pause
-    suspend_available = CONF.compute_feature_enabled.suspend
 
     def setUp(self):
         super(ServersNegativeTestJSON, self).setUp()
@@ -39,11 +37,18 @@
         except Exception:
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+        self.server_check_teardown()
+        super(ServersNegativeTestJSON, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
         super(ServersNegativeTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
-        cls.alt_os = clients.AltManager()
+        if CONF.compute.allow_tenant_isolation:
+            cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
+        else:
+            cls.alt_os = clients.AltManager()
         cls.alt_client = cls.alt_os.servers_client
         resp, server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
@@ -129,17 +134,17 @@
         self.assertRaises(exceptions.NotFound, self.client.reboot,
                           nonexistent_server, 'SOFT')
 
-    @testtools.skipIf(not pause_available, 'Pause is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_pause_paused_server(self):
         # Pause a paused server.
         self.client.pause_server(self.server_id)
-        self.addCleanup(self.client.unpause_server,
-                        self.server_id)
         self.client.wait_for_server_status(self.server_id, 'PAUSED')
         self.assertRaises(exceptions.Conflict,
                           self.client.pause_server,
                           self.server_id)
+        self.client.unpause_server(self.server_id)
 
     @test.attr(type=['negative', 'gate'])
     def test_rebuild_reboot_deleted_server(self):
@@ -309,7 +314,8 @@
         self.assertRaises(exceptions.NotFound, self.servers_client.stop,
                           nonexistent_server)
 
-    @testtools.skipIf(not pause_available, 'Pause is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_pause_non_existent_server(self):
         # pause a non existent server
@@ -317,7 +323,8 @@
         self.assertRaises(exceptions.NotFound, self.client.pause_server,
                           nonexistent_server)
 
-    @testtools.skipIf(not pause_available, 'Pause is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_unpause_non_existent_server(self):
         # unpause a non existent server
@@ -325,7 +332,8 @@
         self.assertRaises(exceptions.NotFound, self.client.unpause_server,
                           nonexistent_server)
 
-    @testtools.skipIf(not pause_available, 'Pause is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_unpause_server_invalid_state(self):
         # unpause an active server.
@@ -333,7 +341,8 @@
                           self.client.unpause_server,
                           self.server_id)
 
-    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_suspend_non_existent_server(self):
         # suspend a non existent server
@@ -341,20 +350,21 @@
         self.assertRaises(exceptions.NotFound, self.client.suspend_server,
                           nonexistent_server)
 
-    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_suspend_server_invalid_state(self):
         # suspend a suspended server.
         resp, _ = self.client.suspend_server(self.server_id)
-        self.addCleanup(self.client.resume_server,
-                        self.server_id)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
         self.assertRaises(exceptions.Conflict,
                           self.client.suspend_server,
                           self.server_id)
+        self.client.resume_server(self.server_id)
 
-    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_resume_non_existent_server(self):
         # resume a non existent server
@@ -362,7 +372,8 @@
         self.assertRaises(exceptions.NotFound, self.client.resume_server,
                           nonexistent_server)
 
-    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_resume_server_invalid_state(self):
         # resume an active server.
@@ -420,7 +431,6 @@
         # shelve a shelved server.
         resp, server = self.client.shelve_server(self.server_id)
         self.assertEqual(202, resp.status)
-        self.addCleanup(self.client.unshelve_server, self.server_id)
 
         offload_time = CONF.compute.shelved_offload_time
         if offload_time >= 0:
@@ -442,6 +452,8 @@
                           self.client.shelve_server,
                           self.server_id)
 
+        self.client.unshelve_server(self.server_id)
+
     @test.attr(type=['negative', 'gate'])
     def test_unshelve_non_existent_server(self):
         # unshelve a non existent server
diff --git a/tempest/api/compute/servers/test_servers_negative_new.py b/tempest/api/compute/servers/test_servers_negative_new.py
index f860ff9..43ddb3a 100644
--- a/tempest/api/compute/servers/test_servers_negative_new.py
+++ b/tempest/api/compute/servers/test_servers_negative_new.py
@@ -21,6 +21,7 @@
 load_tests = test.NegativeAutoTest.load_tests
 
 
+@test.SimpleNegativeAutoTest
 class GetConsoleOutputNegativeTestJSON(base.BaseV2ComputeTest,
                                        test.NegativeAutoTest):
     _service = 'compute'
@@ -31,7 +32,3 @@
         super(GetConsoleOutputNegativeTestJSON, cls).setUpClass()
         _resp, server = cls.create_test_server()
         cls.set_resource("server", server['id'])
-
-    @test.attr(type=['negative', 'gate'])
-    def test_get_console_output(self):
-        self.execute(self._schema_file)
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 7f909d7..375ddf8 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -43,10 +43,7 @@
 
         if CONF.compute.allow_tenant_isolation:
             creds = cls.isolated_creds.get_alt_creds()
-            username, tenant_name, password = creds
-            cls.alt_manager = clients.Manager(username=username,
-                                              password=password,
-                                              tenant_name=tenant_name)
+            cls.alt_manager = clients.Manager(credentials=creds)
         else:
             # Use the alt_XXX credentials in the config file
             cls.alt_manager = clients.AltManager()
@@ -60,7 +57,7 @@
         resp, cls.server = cls.client.get_server(server['id'])
 
         name = data_utils.rand_name('image')
-        resp, body = cls.client.create_image(server['id'], name)
+        resp, body = cls.images_client.create_image(server['id'], name)
         image_id = data_utils.parse_image_id(resp['location'])
         cls.images_client.wait_for_image_status(image_id, 'ACTIVE')
         resp, cls.image = cls.images_client.get_image(image_id)
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 4db8c56..dc85e76 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -23,13 +23,8 @@
     def setUpClass(cls):
         super(QuotasTestJSON, cls).setUpClass()
         cls.client = cls.quotas_client
-        cls.admin_client = cls._get_identity_admin_client()
-        resp, tenants = cls.admin_client.list_tenants()
-        cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
-                         cls.client.tenant_name][0]
-        resp, users = cls.admin_client.list_users_for_tenant(cls.tenant_id)
-        cls.user_id = [user['id'] for user in users if user['name'] ==
-                       cls.client.user][0]
+        cls.tenant_id = cls.client.tenant_id
+        cls.user_id = cls.client.user_id
         cls.default_quota_set = set(('injected_file_content_bytes',
                                      'metadata_items', 'injected_files',
                                      'ram', 'floating_ips',
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api/compute/v2/__init__.py
similarity index 100%
rename from tempest/api/compute/v3/certificates/__init__.py
rename to tempest/api/compute/v2/__init__.py
diff --git a/tempest/api/compute/v3/admin/test_availability_zone.py b/tempest/api/compute/v3/admin/test_availability_zone.py
deleted file mode 100644
index 9ca8953..0000000
--- a/tempest/api/compute/v3/admin/test_availability_zone.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2013 NEC Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api.compute import base
-from tempest.test import attr
-
-
-class AZAdminV3Test(base.BaseV3ComputeAdminTest):
-
-    """
-    Tests Availability Zone API List
-    """
-
-    @classmethod
-    def setUpClass(cls):
-        super(AZAdminV3Test, cls).setUpClass()
-        cls.client = cls.availability_zone_admin_client
-
-    @attr(type='gate')
-    def test_get_availability_zone_list(self):
-        # List of availability zone
-        resp, availability_zone = self.client.get_availability_zone_list()
-        self.assertEqual(200, resp.status)
-        self.assertTrue(len(availability_zone) > 0)
-
-    @attr(type='gate')
-    def test_get_availability_zone_list_detail(self):
-        # List of availability zones and available services
-        resp, availability_zone = \
-            self.client.get_availability_zone_list_detail()
-        self.assertEqual(200, resp.status)
-        self.assertTrue(len(availability_zone) > 0)
diff --git a/tempest/api/compute/v3/admin/test_availability_zone_negative.py b/tempest/api/compute/v3/admin/test_availability_zone_negative.py
index f3af6df..b012e65 100644
--- a/tempest/api/compute/v3/admin/test_availability_zone_negative.py
+++ b/tempest/api/compute/v3/admin/test_availability_zone_negative.py
@@ -15,7 +15,7 @@
 
 from tempest.api.compute import base
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class AZAdminNegativeV3Test(base.BaseV3ComputeAdminTest):
@@ -30,7 +30,7 @@
         cls.client = cls.availability_zone_admin_client
         cls.non_adm_client = cls.availability_zone_client
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_availability_zone_list_detail_with_non_admin_user(self):
         # List of availability zones and available services with
         # non-administrator user
diff --git a/tempest/api/compute/v3/admin/test_flavors.py b/tempest/api/compute/v3/admin/test_flavors.py
index 401eb85..2a4fc02 100644
--- a/tempest/api/compute/v3/admin/test_flavors.py
+++ b/tempest/api/compute/v3/admin/test_flavors.py
@@ -169,7 +169,6 @@
                 flag = True
         self.assertTrue(flag)
 
-    @test.skip_because(bug="1209101")
     @test.attr(type='gate')
     def test_list_non_public_flavor(self):
         # Create a flavor with os-flavor-access:is_public false should
diff --git a/tempest/api/compute/v3/admin/test_flavors_access.py b/tempest/api/compute/v3/admin/test_flavors_access.py
index 03305ff..c641bf6 100644
--- a/tempest/api/compute/v3/admin/test_flavors_access.py
+++ b/tempest/api/compute/v3/admin/test_flavors_access.py
@@ -31,12 +31,8 @@
 
         cls.client = cls.flavors_admin_client
         admin_client = cls._get_identity_admin_client()
-        cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
-                                                     tenant_name)
-        cls.tenant_id = cls.tenant['id']
-        cls.adm_tenant = admin_client.get_tenant_by_name(
-            cls.flavors_admin_client.tenant_name)
-        cls.adm_tenant_id = cls.adm_tenant['id']
+        cls.tenant_id = cls.client.tenant_id
+        cls.adm_tenant_id = admin_client.tenant_id
         cls.flavor_name_prefix = 'test_flavor_access_'
         cls.ram = 512
         cls.vcpus = 1
diff --git a/tempest/api/compute/v3/admin/test_flavors_access_negative.py b/tempest/api/compute/v3/admin/test_flavors_access_negative.py
index 334d124..02ecb24 100644
--- a/tempest/api/compute/v3/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/v3/admin/test_flavors_access_negative.py
@@ -33,13 +33,7 @@
         super(FlavorsAccessNegativeV3Test, cls).setUpClass()
 
         cls.client = cls.flavors_admin_client
-        admin_client = cls._get_identity_admin_client()
-        cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
-                                                     tenant_name)
-        cls.tenant_id = cls.tenant['id']
-        cls.adm_tenant = admin_client.get_tenant_by_name(
-            cls.flavors_admin_client.tenant_name)
-        cls.adm_tenant_id = cls.adm_tenant['id']
+        cls.tenant_id = cls.client.tenant_id
         cls.flavor_name_prefix = 'test_flavor_access_'
         cls.ram = 512
         cls.vcpus = 1
diff --git a/tempest/api/compute/v3/admin/test_hypervisor.py b/tempest/api/compute/v3/admin/test_hypervisor.py
index 93d4441..f3397a8 100644
--- a/tempest/api/compute/v3/admin/test_hypervisor.py
+++ b/tempest/api/compute/v3/admin/test_hypervisor.py
@@ -14,7 +14,7 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest.test import attr
+from tempest import test
 
 
 class HypervisorAdminV3Test(base.BaseV3ComputeAdminTest):
@@ -34,20 +34,20 @@
         self.assertEqual(200, resp.status)
         return hypers
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_hypervisor_list(self):
         # List of hypervisor and available hypervisors hostname
         hypers = self._list_hypervisors()
         self.assertTrue(len(hypers) > 0)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_hypervisor_list_details(self):
         # Display the details of the all hypervisor
         resp, hypers = self.client.get_hypervisor_list_details()
         self.assertEqual(200, resp.status)
         self.assertTrue(len(hypers) > 0)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_hypervisor_show_details(self):
         # Display the details of the specified hypervisor
         hypers = self._list_hypervisors()
@@ -60,7 +60,7 @@
         self.assertEqual(details['hypervisor_hostname'],
                          hypers[0]['hypervisor_hostname'])
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_hypervisor_show_servers(self):
         # Show instances about the specific hypervisors
         hypers = self._list_hypervisors()
@@ -71,14 +71,14 @@
         self.assertEqual(200, resp.status)
         self.assertTrue(len(hypervisors) > 0)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_hypervisor_stats(self):
         # Verify the stats of the all hypervisor
         resp, stats = self.client.get_hypervisor_stats()
         self.assertEqual(200, resp.status)
         self.assertTrue(len(stats) > 0)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_hypervisor_uptime(self):
         # Verify that GET shows the specified hypervisor uptime
         hypers = self._list_hypervisors()
@@ -87,7 +87,7 @@
         self.assertEqual(200, resp.status)
         self.assertTrue(len(uptime) > 0)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_search_hypervisor(self):
         hypers = self._list_hypervisors()
         self.assertTrue(len(hypers) > 0)
diff --git a/tempest/api/compute/v3/admin/test_hypervisor_negative.py b/tempest/api/compute/v3/admin/test_hypervisor_negative.py
index 45642b7..ae4df15 100644
--- a/tempest/api/compute/v3/admin/test_hypervisor_negative.py
+++ b/tempest/api/compute/v3/admin/test_hypervisor_negative.py
@@ -18,7 +18,7 @@
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class HypervisorAdminNegativeV3Test(base.BaseV3ComputeAdminTest):
@@ -39,7 +39,7 @@
         self.assertEqual(200, resp.status)
         return hypers
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_show_nonexistent_hypervisor(self):
         nonexistent_hypervisor_id = str(uuid.uuid4())
 
@@ -48,7 +48,7 @@
             self.client.get_hypervisor_show_details,
             nonexistent_hypervisor_id)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_show_hypervisor_with_non_admin_user(self):
         hypers = self._list_hypervisors()
         self.assertTrue(len(hypers) > 0)
@@ -58,7 +58,7 @@
             self.non_adm_client.get_hypervisor_show_details,
             hypers[0]['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_show_servers_with_non_admin_user(self):
         hypers = self._list_hypervisors()
         self.assertTrue(len(hypers) > 0)
@@ -68,7 +68,7 @@
             self.non_adm_client.get_hypervisor_servers,
             hypers[0]['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_show_servers_with_nonexistent_hypervisor(self):
         nonexistent_hypervisor_id = str(uuid.uuid4())
 
@@ -77,13 +77,13 @@
             self.client.get_hypervisor_servers,
             nonexistent_hypervisor_id)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_hypervisor_stats_with_non_admin_user(self):
         self.assertRaises(
             exceptions.Unauthorized,
             self.non_adm_client.get_hypervisor_stats)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_nonexistent_hypervisor_uptime(self):
         nonexistent_hypervisor_id = str(uuid.uuid4())
 
@@ -92,7 +92,7 @@
             self.client.get_hypervisor_uptime,
             nonexistent_hypervisor_id)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_hypervisor_uptime_with_non_admin_user(self):
         hypers = self._list_hypervisors()
         self.assertTrue(len(hypers) > 0)
@@ -102,21 +102,21 @@
             self.non_adm_client.get_hypervisor_uptime,
             hypers[0]['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_hypervisor_list_with_non_admin_user(self):
         # List of hypervisor and available services with non admin user
         self.assertRaises(
             exceptions.Unauthorized,
             self.non_adm_client.get_hypervisor_list)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_hypervisor_list_details_with_non_admin_user(self):
         # List of hypervisor details and available services with non admin user
         self.assertRaises(
             exceptions.Unauthorized,
             self.non_adm_client.get_hypervisor_list_details)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_search_nonexistent_hypervisor(self):
         nonexistent_hypervisor_name = data_utils.rand_name('test_hypervisor')
 
@@ -125,7 +125,7 @@
         self.assertEqual(200, resp.status)
         self.assertEqual(0, len(hypers))
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_search_hypervisor_with_non_admin_user(self):
         hypers = self._list_hypervisors()
         self.assertTrue(len(hypers) > 0)
diff --git a/tempest/api/compute/v3/admin/test_migrations.py b/tempest/api/compute/v3/admin/test_migrations.py
new file mode 100644
index 0000000..e8bd473
--- /dev/null
+++ b/tempest/api/compute/v3/admin/test_migrations.py
@@ -0,0 +1,50 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class MigrationsAdminV3Test(base.BaseV3ComputeAdminTest):
+
+    @test.attr(type='gate')
+    def test_list_migrations(self):
+        # Admin can get the migrations list
+        resp, _ = self.migrations_admin_client.list_migrations()
+        self.assertEqual(200, resp.status)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='gate')
+    def test_list_migrations_in_flavor_resize_situation(self):
+        # Admin can get the migrations list which contains the resized server
+        resp, server = self.create_test_server(wait_until="ACTIVE")
+        server_id = server['id']
+
+        resp, _ = self.servers_client.resize(server_id, self.flavor_ref_alt)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(server_id, 'VERIFY_RESIZE')
+        self.servers_client.confirm_resize(server_id)
+        self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+        resp, body = self.migrations_admin_client.list_migrations()
+        self.assertEqual(200, resp.status)
+
+        instance_uuids = [x['instance_uuid'] for x in body]
+        self.assertIn(server_id, instance_uuids)
diff --git a/tempest/api/compute/v3/admin/test_quotas.py b/tempest/api/compute/v3/admin/test_quotas.py
index 917c115..19c31fe 100644
--- a/tempest/api/compute/v3/admin/test_quotas.py
+++ b/tempest/api/compute/v3/admin/test_quotas.py
@@ -32,8 +32,7 @@
 
         # NOTE(afazekas): these test cases should always create and use a new
         # tenant most of them should be skipped if we can't do that
-        cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
-            'tenantId')
+        cls.demo_tenant_id = cls.client.tenant_id
 
         cls.default_quota_set = set(('metadata_items',
                                      'ram', 'floating_ips',
@@ -94,21 +93,38 @@
     # TODO(afazekas): merge these test cases
     @test.attr(type='gate')
     def test_get_updated_quotas(self):
-        # Verify that GET shows the updated quota set
+        # Verify that GET shows the updated quota set of tenant
         tenant_name = data_utils.rand_name('cpu_quota_tenant_')
         tenant_desc = tenant_name + '-desc'
         identity_client = self.os_adm.identity_client
         _, tenant = identity_client.create_tenant(name=tenant_name,
                                                   description=tenant_desc)
         tenant_id = tenant['id']
-        self.addCleanup(identity_client.delete_tenant,
-                        tenant_id)
+        self.addCleanup(identity_client.delete_tenant, tenant_id)
 
-        self.adm_client.update_quota_set(tenant_id,
-                                         ram='5120')
+        self.adm_client.update_quota_set(tenant_id, ram='5120')
         resp, quota_set = self.adm_client.get_quota_set(tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(quota_set['ram'], 5120)
+        self.assertEqual(5120, quota_set['ram'])
+
+        # Verify that GET shows the updated quota set of user
+        user_name = data_utils.rand_name('cpu_quota_user_')
+        password = data_utils.rand_name('password-')
+        email = user_name + '@testmail.tm'
+        _, user = identity_client.create_user(name=user_name,
+                                              password=password,
+                                              tenant_id=tenant_id,
+                                              email=email)
+        user_id = user['id']
+        self.addCleanup(identity_client.delete_user, user_id)
+
+        self.adm_client.update_quota_set(tenant_id,
+                                         user_id=user_id,
+                                         ram='2048')
+        resp, quota_set = self.adm_client.get_quota_set(tenant_id,
+                                                        user_id=user_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(2048, quota_set['ram'])
 
     @test.attr(type='gate')
     def test_delete_quota(self):
diff --git a/tempest/api/compute/v3/admin/test_quotas_negative.py b/tempest/api/compute/v3/admin/test_quotas_negative.py
index d138e80..307462f 100644
--- a/tempest/api/compute/v3/admin/test_quotas_negative.py
+++ b/tempest/api/compute/v3/admin/test_quotas_negative.py
@@ -30,8 +30,7 @@
 
         # NOTE(afazekas): these test cases should always create and use a new
         # tenant most of them should be skipped if we can't do that
-        cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
-            'tenantId')
+        cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
 
     # TODO(afazekas): Add dedicated tenant to the skiped quota tests
     # it can be moved into the setUpClass as well
diff --git a/tempest/api/compute/v3/admin/test_servers.py b/tempest/api/compute/v3/admin/test_servers.py
index 579a535..366cfc6 100644
--- a/tempest/api/compute/v3/admin/test_servers.py
+++ b/tempest/api/compute/v3/admin/test_servers.py
@@ -14,7 +14,6 @@
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
-from tempest import exceptions
 from tempest import test
 
 
@@ -44,16 +43,6 @@
                                               wait_until='ACTIVE')
         cls.s2_id = server['id']
 
-    def _get_unused_flavor_id(self):
-        flavor_id = data_utils.rand_int_id(start=1000)
-        while True:
-            try:
-                resp, body = self.flavors_client.get_flavor_details(flavor_id)
-            except exceptions.NotFound:
-                break
-            flavor_id = data_utils.rand_int_id(start=1000)
-        return flavor_id
-
     @test.attr(type='gate')
     def test_list_servers_by_admin(self):
         # Listing servers by admin user returns empty list by default
diff --git a/tempest/api/compute/v3/admin/test_servers_negative.py b/tempest/api/compute/v3/admin/test_servers_negative.py
index cc1be4e..a971463 100644
--- a/tempest/api/compute/v3/admin/test_servers_negative.py
+++ b/tempest/api/compute/v3/admin/test_servers_negative.py
@@ -14,10 +14,15 @@
 
 import uuid
 
+import testtools
+
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
+
+CONF = config.CONF
 
 
 class ServersAdminNegativeV3Test(base.BaseV3ComputeAdminTest):
@@ -32,10 +37,7 @@
         cls.client = cls.servers_admin_client
         cls.non_adm_client = cls.servers_client
         cls.flavors_client = cls.flavors_admin_client
-        cls.identity_client = cls._get_identity_admin_client()
-        tenant = cls.identity_client.get_tenant_by_name(
-            cls.client.tenant_name)
-        cls.tenant_id = tenant['id']
+        cls.tenant_id = cls.client.tenant_id
 
         cls.s1_name = data_utils.rand_name('server')
         resp, server = cls.create_test_server(name=cls.s1_name,
@@ -52,7 +54,7 @@
             flavor_id = data_utils.rand_int_id(start=1000)
         return flavor_id
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_resize_server_using_overlimit_ram(self):
         flavor_name = data_utils.rand_name("flavor-")
         flavor_id = self._get_unused_flavor_id()
@@ -70,7 +72,7 @@
                           self.servers[0]['id'],
                           flavor_ref['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_resize_server_using_overlimit_vcpus(self):
         flavor_name = data_utils.rand_name("flavor-")
         flavor_id = self._get_unused_flavor_id()
@@ -88,38 +90,40 @@
                           self.servers[0]['id'],
                           flavor_ref['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_reset_state_server_invalid_state(self):
         self.assertRaises(exceptions.BadRequest,
                           self.client.reset_state, self.s1_id,
                           state='invalid')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_reset_state_server_invalid_type(self):
         self.assertRaises(exceptions.BadRequest,
                           self.client.reset_state, self.s1_id,
                           state=1)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_reset_state_server_nonexistent_server(self):
         self.assertRaises(exceptions.NotFound,
                           self.client.reset_state, '999')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_server_diagnostics_by_non_admin(self):
         # Non-admin user can not view server diagnostics according to policy
         self.assertRaises(exceptions.Unauthorized,
                           self.non_adm_client.get_server_diagnostics,
                           self.s1_id)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_migrate_non_existent_server(self):
         # migrate a non existent server
         self.assertRaises(exceptions.NotFound,
                           self.client.migrate_server,
                           str(uuid.uuid4()))
 
-    @attr(type=['negative', 'gate'])
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
+    @test.attr(type=['negative', 'gate'])
     def test_migrate_server_invalid_state(self):
         # create server.
         resp, server = self.create_test_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/v3/admin/test_services.py b/tempest/api/compute/v3/admin/test_services.py
index b367dad..e6efb70 100644
--- a/tempest/api/compute/v3/admin/test_services.py
+++ b/tempest/api/compute/v3/admin/test_services.py
@@ -15,7 +15,7 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest.test import attr
+from tempest import test
 
 
 class ServicesAdminV3Test(base.BaseV3ComputeAdminTest):
@@ -29,13 +29,13 @@
         super(ServicesAdminV3Test, cls).setUpClass()
         cls.client = cls.services_admin_client
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_list_services(self):
         resp, services = self.client.list_services()
         self.assertEqual(200, resp.status)
         self.assertNotEqual(0, len(services))
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_service_by_service_binary_name(self):
         binary_name = 'nova-compute'
         params = {'binary': binary_name}
@@ -45,7 +45,7 @@
         for service in services:
             self.assertEqual(binary_name, service['binary'])
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_service_by_host_name(self):
         resp, services = self.client.list_services()
         self.assertEqual(200, resp.status)
@@ -65,7 +65,7 @@
         # on order.
         self.assertEqual(sorted(s1), sorted(s2))
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_service_by_service_and_host_name(self):
         resp, services = self.client.list_services()
         host_name = services[0]['host']
diff --git a/tempest/api/compute/v3/admin/test_services_negative.py b/tempest/api/compute/v3/admin/test_services_negative.py
index 3168af2..6ac78d4 100644
--- a/tempest/api/compute/v3/admin/test_services_negative.py
+++ b/tempest/api/compute/v3/admin/test_services_negative.py
@@ -16,7 +16,7 @@
 
 from tempest.api.compute import base
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class ServicesAdminNegativeV3Test(base.BaseV3ComputeAdminTest):
@@ -31,12 +31,12 @@
         cls.client = cls.services_admin_client
         cls.non_admin_client = cls.services_client
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_services_with_non_admin_user(self):
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.list_services)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_service_by_invalid_params(self):
         # return all services if send the request with invalid parameter
         resp, services = self.client.list_services()
@@ -45,7 +45,7 @@
         self.assertEqual(200, resp.status)
         self.assertEqual(len(services), len(services_xxx))
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_service_by_invalid_service_and_valid_host(self):
         resp, services = self.client.list_services()
         host_name = services[0]['host']
@@ -54,7 +54,7 @@
         self.assertEqual(200, resp.status)
         self.assertEqual(0, len(services))
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_service_with_valid_service_and_invalid_host(self):
         resp, services = self.client.list_services()
         binary_name = services[0]['binary']
diff --git a/tempest/api/compute/v3/certificates/test_certificates.py b/tempest/api/compute/v3/certificates/test_certificates.py
deleted file mode 100644
index ce025fc..0000000
--- a/tempest/api/compute/v3/certificates/test_certificates.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api.compute import base
-from tempest.test import attr
-
-
-class CertificatesV3Test(base.BaseV3ComputeTest):
-
-    @attr(type='gate')
-    def test_create_and_get_root_certificate(self):
-        # create certificates
-        resp, create_body = self.certificates_client.create_certificate()
-        self.assertEqual(201, resp.status)
-        self.assertIn('data', create_body)
-        self.assertIn('private_key', create_body)
-        # get the root certificate
-        resp, body = self.certificates_client.get_certificate('root')
-        self.assertEqual(200, resp.status)
-        self.assertIn('data', body)
-        self.assertIn('private_key', body)
diff --git a/tempest/api/compute/v3/flavors/test_flavors.py b/tempest/api/compute/v3/flavors/test_flavors.py
deleted file mode 100644
index a0bbba6..0000000
--- a/tempest/api/compute/v3/flavors/test_flavors.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api.compute import base
-from tempest import test
-
-
-class FlavorsV3Test(base.BaseV3ComputeTest):
-
-    @classmethod
-    def setUpClass(cls):
-        super(FlavorsV3Test, cls).setUpClass()
-        cls.client = cls.flavors_client
-
-    @test.attr(type='smoke')
-    def test_list_flavors(self):
-        # List of all flavors should contain the expected flavor
-        resp, flavors = self.client.list_flavors()
-        resp, flavor = self.client.get_flavor_details(self.flavor_ref)
-        flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
-                             'name': flavor['name']}
-        self.assertIn(flavor_min_detail, flavors)
-
-    @test.attr(type='smoke')
-    def test_list_flavors_with_detail(self):
-        # Detailed list of all flavors should contain the expected flavor
-        resp, flavors = self.client.list_flavors_with_detail()
-        resp, flavor = self.client.get_flavor_details(self.flavor_ref)
-        self.assertIn(flavor, flavors)
-
-    @test.attr(type='smoke')
-    def test_get_flavor(self):
-        # The expected flavor details should be returned
-        resp, flavor = self.client.get_flavor_details(self.flavor_ref)
-        self.assertEqual(self.flavor_ref, flavor['id'])
-
-    @test.attr(type='gate')
-    def test_list_flavors_limit_results(self):
-        # Only the expected number of flavors should be returned
-        params = {'limit': 1}
-        resp, flavors = self.client.list_flavors(params)
-        self.assertEqual(1, len(flavors))
-
-    @test.attr(type='gate')
-    def test_list_flavors_detailed_limit_results(self):
-        # Only the expected number of flavors (detailed) should be returned
-        params = {'limit': 1}
-        resp, flavors = self.client.list_flavors_with_detail(params)
-        self.assertEqual(1, len(flavors))
-
-    @test.attr(type='gate')
-    def test_list_flavors_using_marker(self):
-        # The list of flavors should start from the provided marker
-        resp, flavors = self.client.list_flavors()
-        flavor_id = flavors[0]['id']
-
-        params = {'marker': flavor_id}
-        resp, flavors = self.client.list_flavors(params)
-        self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]),
-                         'The list of flavors did not start after the marker.')
-
-    @test.attr(type='gate')
-    def test_list_flavors_detailed_using_marker(self):
-        # The list of flavors should start from the provided marker
-        resp, flavors = self.client.list_flavors_with_detail()
-        flavor_id = flavors[0]['id']
-
-        params = {'marker': flavor_id}
-        resp, flavors = self.client.list_flavors_with_detail(params)
-        self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]),
-                         'The list of flavors did not start after the marker.')
-
-    @test.attr(type='gate')
-    def test_list_flavors_detailed_filter_by_min_disk(self):
-        # The detailed list of flavors should be filtered by disk space
-        resp, flavors = self.client.list_flavors_with_detail()
-        flavors = sorted(flavors, key=lambda k: k['disk'])
-        flavor_id = flavors[0]['id']
-
-        params = {'min_disk': flavors[0]['disk'] + 1}
-        resp, flavors = self.client.list_flavors_with_detail(params)
-        self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
-    @test.attr(type='gate')
-    def test_list_flavors_detailed_filter_by_min_ram(self):
-        # The detailed list of flavors should be filtered by RAM
-        resp, flavors = self.client.list_flavors_with_detail()
-        flavors = sorted(flavors, key=lambda k: k['ram'])
-        flavor_id = flavors[0]['id']
-
-        params = {'min_ram': flavors[0]['ram'] + 1}
-        resp, flavors = self.client.list_flavors_with_detail(params)
-        self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
-    @test.attr(type='gate')
-    def test_list_flavors_filter_by_min_disk(self):
-        # The list of flavors should be filtered by disk space
-        resp, flavors = self.client.list_flavors_with_detail()
-        flavors = sorted(flavors, key=lambda k: k['disk'])
-        flavor_id = flavors[0]['id']
-
-        params = {'min_disk': flavors[0]['disk'] + 1}
-        resp, flavors = self.client.list_flavors(params)
-        self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
-    @test.attr(type='gate')
-    def test_list_flavors_filter_by_min_ram(self):
-        # The list of flavors should be filtered by RAM
-        resp, flavors = self.client.list_flavors_with_detail()
-        flavors = sorted(flavors, key=lambda k: k['ram'])
-        flavor_id = flavors[0]['id']
-
-        params = {'min_ram': flavors[0]['ram'] + 1}
-        resp, flavors = self.client.list_flavors(params)
-        self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
diff --git a/tempest/api/compute/v3/flavors/test_flavors_negative.py b/tempest/api/compute/v3/flavors/test_flavors_negative.py
index 1c0e4fb..657e2cd 100644
--- a/tempest/api/compute/v3/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/v3/flavors/test_flavors_negative.py
@@ -20,16 +20,14 @@
 load_tests = test.NegativeAutoTest.load_tests
 
 
+@test.SimpleNegativeAutoTest
 class FlavorsListNegativeV3Test(base.BaseV3ComputeTest,
                                 test.NegativeAutoTest):
     _service = 'computev3'
     _schema_file = 'compute/flavors/flavors_list_v3.json'
 
-    @test.attr(type=['negative', 'gate'])
-    def test_list_flavors_with_detail(self):
-        self.execute(self._schema_file)
 
-
+@test.SimpleNegativeAutoTest
 class FlavorDetailsNegativeV3Test(base.BaseV3ComputeTest,
                                   test.NegativeAutoTest):
     _service = 'computev3'
@@ -39,8 +37,3 @@
     def setUpClass(cls):
         super(FlavorDetailsNegativeV3Test, cls).setUpClass()
         cls.set_resource("flavor", cls.flavor_ref)
-
-    @test.attr(type=['negative', 'gate'])
-    def test_get_flavor_details(self):
-        # flavor details are not returned for non-existent flavors
-        self.execute(self._schema_file)
diff --git a/tempest/api/compute/v3/images/test_images_oneserver.py b/tempest/api/compute/v3/images/test_images_oneserver.py
index 3aab1e1..795437b 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver.py
@@ -41,6 +41,11 @@
             # Usually it means the server had a serious accident
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+        """Terminate test instances created after a test is executed."""
+        self.server_check_teardown()
+        super(ImagesOneServerV3Test, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
         super(ImagesOneServerV3Test, cls).setUpClass()
diff --git a/tempest/api/compute/v3/images/test_images_oneserver_negative.py b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
index 7679eee..eed81c6 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
         for image_id in self.image_ids:
             self.client.delete_image(image_id)
             self.image_ids.remove(image_id)
+        self.server_check_teardown()
         super(ImagesOneServerNegativeV3Test, self).tearDown()
 
     def setUp(self):
diff --git a/tempest/api/compute/v3/keypairs/test_keypairs.py b/tempest/api/compute/v3/keypairs/test_keypairs.py
deleted file mode 100644
index 668a295..0000000
--- a/tempest/api/compute/v3/keypairs/test_keypairs.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api.compute import base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-class KeyPairsV3Test(base.BaseV3ComputeTest):
-
-    @classmethod
-    def setUpClass(cls):
-        super(KeyPairsV3Test, cls).setUpClass()
-        cls.client = cls.keypairs_client
-
-    def _delete_keypair(self, keypair_name):
-        resp, _ = self.client.delete_keypair(keypair_name)
-        self.assertEqual(204, resp.status)
-
-    def _create_keypair(self, keypair_name, pub_key=None):
-        resp, body = self.client.create_keypair(keypair_name, pub_key)
-        self.addCleanup(self._delete_keypair, keypair_name)
-        return resp, body
-
-    @test.attr(type='gate')
-    def test_keypairs_create_list_delete(self):
-        # Keypairs created should be available in the response list
-        # Create 3 keypairs
-        key_list = list()
-        for i in range(3):
-            k_name = data_utils.rand_name('keypair-')
-            resp, keypair = self._create_keypair(k_name)
-            # Need to pop these keys so that our compare doesn't fail later,
-            # as the keypair dicts from list API doesn't have them.
-            keypair.pop('private_key')
-            keypair.pop('user_id')
-            self.assertEqual(201, resp.status)
-            key_list.append(keypair)
-        # Fetch all keypairs and verify the list
-        # has all created keypairs
-        resp, fetched_list = self.client.list_keypairs()
-        self.assertEqual(200, resp.status)
-        # We need to remove the extra 'keypair' element in the
-        # returned dict. See comment in keypairs_client.list_keypairs()
-        new_list = list()
-        for keypair in fetched_list:
-            new_list.append(keypair['keypair'])
-        fetched_list = new_list
-        # Now check if all the created keypairs are in the fetched list
-        missing_kps = [kp for kp in key_list if kp not in fetched_list]
-        self.assertFalse(missing_kps,
-                         "Failed to find keypairs %s in fetched list"
-                         % ', '.join(m_key['name'] for m_key in missing_kps))
-
-    @test.attr(type='gate')
-    def test_keypair_create_delete(self):
-        # Keypair should be created, verified and deleted
-        k_name = data_utils.rand_name('keypair-')
-        resp, keypair = self._create_keypair(k_name)
-        self.assertEqual(201, resp.status)
-        private_key = keypair['private_key']
-        key_name = keypair['name']
-        self.assertEqual(key_name, k_name,
-                         "The created keypair name is not equal "
-                         "to the requested name")
-        self.assertTrue(private_key is not None,
-                        "Field private_key is empty or not found.")
-
-    @test.attr(type='gate')
-    def test_get_keypair_detail(self):
-        # Keypair should be created, Got details by name and deleted
-        k_name = data_utils.rand_name('keypair-')
-        resp, keypair = self._create_keypair(k_name)
-        resp, keypair_detail = self.client.get_keypair(k_name)
-        self.assertEqual(200, resp.status)
-        self.assertIn('name', keypair_detail)
-        self.assertIn('public_key', keypair_detail)
-        self.assertEqual(keypair_detail['name'], k_name,
-                         "The created keypair name is not equal "
-                         "to requested name")
-        public_key = keypair_detail['public_key']
-        self.assertTrue(public_key is not None,
-                        "Field public_key is empty or not found.")
-
-    @test.attr(type='gate')
-    def test_keypair_create_with_pub_key(self):
-        # Keypair should be created with a given public key
-        k_name = data_utils.rand_name('keypair-')
-        pub_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs"
-                   "Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd"
-                   "aZq7KZEwO0jhglaFjU1mpqq4Gz5RX156sCTNM9vRbw"
-                   "KAxfsdF9laBYVsex3m3Wmui3uYrKyumsoJn2g9GNnG1P"
-                   "I1mrVjZ61i0GY3khna+wzlTpCCmy5HNlrmbj3XLqBUpip"
-                   "TOXmsnr4sChzC53KCd8LXuwc1i/CZPvF+3XipvAgFSE53pCt"
-                   "LOeB1kYMOBaiUPLQTWXR3JpckqFIQwhIH0zoHlJvZE8hh90"
-                   "XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
-                   "snSA8wzBx3A/8y9Pp1B nova@ubuntu")
-        resp, keypair = self._create_keypair(k_name, pub_key)
-        self.assertEqual(201, resp.status)
-        self.assertFalse('private_key' in keypair,
-                         "Field private_key is not empty!")
-        key_name = keypair['name']
-        self.assertEqual(key_name, k_name,
-                         "The created keypair name is not equal "
-                         "to the requested name!")
diff --git a/tempest/api/compute/v3/servers/test_attach_interfaces.py b/tempest/api/compute/v3/servers/test_attach_interfaces.py
index e1c69d9..43440c1 100644
--- a/tempest/api/compute/v3/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/v3/servers/test_attach_interfaces.py
@@ -16,7 +16,7 @@
 from tempest.api.compute import base
 from tempest import config
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 import time
 
@@ -106,7 +106,7 @@
 
         self.assertEqual(sorted(list1), sorted(list2))
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_create_list_show_delete_interfaces(self):
         server, ifs = self._create_server_get_interfaces()
         interface_count = len(ifs)
@@ -127,7 +127,7 @@
         _ifs = self._test_delete_interface(server, ifs)
         self.assertEqual(len(ifs) - 1, len(_ifs))
 
-    @attr(type='gate')
+    @test.attr(type='smoke')
     def test_add_remove_fixed_ip(self):
         # Add and Remove the fixed IP to server.
         server, ifs = self._create_server_get_interfaces()
diff --git a/tempest/api/compute/v3/servers/test_attach_volume.py b/tempest/api/compute/v3/servers/test_attach_volume.py
index 8577aab..e994c7f 100644
--- a/tempest/api/compute/v3/servers/test_attach_volume.py
+++ b/tempest/api/compute/v3/servers/test_attach_volume.py
@@ -24,7 +24,6 @@
 
 
 class AttachVolumeV3Test(base.BaseV3ComputeTest):
-    run_ssh = CONF.compute.run_ssh
 
     def __init__(self, *args, **kwargs):
         super(AttachVolumeV3Test, self).__init__(*args, **kwargs)
@@ -34,6 +33,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(AttachVolumeV3Test, cls).setUpClass()
         cls.device = CONF.compute.volume_device_name
         if not CONF.service_available.cinder:
@@ -76,7 +76,7 @@
         self.attached = True
         self.addCleanup(self._detach, server['id'], volume['id'])
 
-    @testtools.skipIf(not run_ssh, 'SSH required for this test')
+    @testtools.skipUnless(CONF.compute.run_ssh, 'SSH required for this test')
     @test.attr(type='gate')
     def test_attach_detach_volume(self):
         # Stop and Start a server with an attached volume, ensuring that
diff --git a/tempest/api/compute/v3/servers/test_create_server.py b/tempest/api/compute/v3/servers/test_create_server.py
index 14a4338..c59fe91 100644
--- a/tempest/api/compute/v3/servers/test_create_server.py
+++ b/tempest/api/compute/v3/servers/test_create_server.py
@@ -28,11 +28,11 @@
 
 
 class ServersV3Test(base.BaseV3ComputeTest):
-    run_ssh = CONF.compute.run_ssh
     disk_config = 'AUTO'
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersV3Test, cls).setUpClass()
         cls.meta = {'hello': 'world'}
         cls.accessIPv4 = '1.1.1.1'
@@ -54,13 +54,6 @@
         resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
     @test.attr(type='smoke')
-    def test_create_server_response(self):
-        # Check that the required fields are returned with values
-        self.assertEqual(202, self.resp.status)
-        self.assertTrue(self.server_initial['id'] is not None)
-        self.assertTrue(self.server_initial['admin_password'] is not None)
-
-    @test.attr(type='smoke')
     def test_verify_server_details(self):
         # Verify the specified server attributes are set correctly
         self.assertEqual(self.accessIPv4,
@@ -90,7 +83,8 @@
         found = any([i for i in servers if i['id'] == self.server['id']])
         self.assertTrue(found)
 
-    @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+    @testtools.skipUnless(CONF.compute.run_ssh,
+                          'Instance validation tests are disabled.')
     @test.attr(type='gate')
     def test_verify_created_server_vcpus(self):
         # Verify that the number of vcpus reported by the instance matches
@@ -100,7 +94,8 @@
                                                   self.ssh_user, self.password)
         self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
 
-    @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+    @testtools.skipUnless(CONF.compute.run_ssh,
+                          'Instance validation tests are disabled.')
     @test.attr(type='gate')
     def test_host_name_is_same_as_server_name(self):
         # Verify the instance host name is the same as the server name
@@ -110,38 +105,22 @@
 
 
 class ServersWithSpecificFlavorV3Test(base.BaseV3ComputeAdminTest):
-    run_ssh = CONF.compute.run_ssh
     disk_config = 'AUTO'
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersWithSpecificFlavorV3Test, cls).setUpClass()
-        cls.meta = {'hello': 'world'}
-        cls.accessIPv4 = '1.1.1.1'
-        cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
-        cls.name = data_utils.rand_name('server')
-        file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
-                       'contents': base64.b64encode(file_contents)}]
         cls.client = cls.servers_client
         cls.flavor_client = cls.flavors_admin_client
-        cli_resp = cls.create_test_server(name=cls.name,
-                                          meta=cls.meta,
-                                          access_ip_v4=cls.accessIPv4,
-                                          access_ip_v6=cls.accessIPv6,
-                                          personality=personality,
-                                          disk_config=cls.disk_config)
-        cls.resp, cls.server_initial = cli_resp
-        cls.password = cls.server_initial['admin_password']
-        cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
-        resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
-    @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+    @testtools.skipUnless(CONF.compute.run_ssh,
+                          'Instance validation tests are disabled.')
     @test.attr(type='gate')
     def test_verify_created_server_ephemeral_disk(self):
         # Verify that the ephemeral disk is created when creating server
 
-        def create_flavor_with_extra_specs(self):
+        def create_flavor_with_extra_specs():
             flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
             flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
             ram = 512
@@ -153,13 +132,13 @@
                             create_flavor(flavor_with_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_with_eph_disk_id,
-                                          ephemeral=1, swap=1024, rxtx=1))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
-            self.assertEqual(200, resp.status)
+                                          ephemeral=1, rxtx=1))
+            self.addCleanup(flavor_clean_up, flavor['id'])
+            self.assertEqual(201, resp.status)
 
             return flavor['id']
 
-        def create_flavor_without_extra_specs(self):
+        def create_flavor_without_extra_specs():
             flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
             flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
 
@@ -172,18 +151,18 @@
                             create_flavor(flavor_no_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_no_eph_disk_id))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
-            self.assertEqual(200, resp.status)
+            self.addCleanup(flavor_clean_up, flavor['id'])
+            self.assertEqual(201, resp.status)
 
             return flavor['id']
 
-        def flavor_clean_up(self, flavor_id):
+        def flavor_clean_up(flavor_id):
             resp, body = self.flavor_client.delete_flavor(flavor_id)
-            self.assertEqual(resp.status, 202)
+            self.assertEqual(resp.status, 204)
             self.flavor_client.wait_for_resource_deletion(flavor_id)
 
-        flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
-        flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+        flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+        flavor_no_eph_disk_id = create_flavor_without_extra_specs()
 
         admin_pass = self.image_ssh_password
 
@@ -196,13 +175,17 @@
                                       adminPass=admin_pass,
                                       flavor=flavor_with_eph_disk_id))
         # Get partition number of server without extra specs.
+        _, server_no_eph_disk = self.client.get_server(
+            server_no_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_no_eph_disk,
-                                                  self.ssh_user, self.password)
-        partition_num = len(linux_client.get_partitions())
-
+                                                  self.ssh_user, admin_pass)
+        partition_num = len(linux_client.get_partitions().split('\n'))
+        _, server_with_eph_disk = self.client.get_server(
+            server_with_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_with_eph_disk,
-                                                  self.ssh_user, self.password)
-        self.assertEqual(partition_num + 1, linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num_emph = len(linux_client.get_partitions().split('\n'))
+        self.assertEqual(partition_num + 1, partition_num_emph)
 
 
 class ServersV3TestManualDisk(ServersV3Test):
diff --git a/tempest/api/compute/v3/servers/test_delete_server.py b/tempest/api/compute/v3/servers/test_delete_server.py
index d694a33..add69ab 100644
--- a/tempest/api/compute/v3/servers/test_delete_server.py
+++ b/tempest/api/compute/v3/servers/test_delete_server.py
@@ -56,6 +56,8 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type='gate')
     def test_delete_server_while_in_pause_state(self):
         # Delete a server while it's VM state is Pause
@@ -99,6 +101,25 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @test.attr(type='gate')
+    def test_delete_server_while_in_attached_volume(self):
+        # Delete a server while a volume is attached to it
+        device = '/dev/%s' % CONF.compute.volume_device_name
+        resp, server = self.create_test_server(wait_until='ACTIVE')
+
+        resp, volume = self.volumes_client.create_volume(1)
+        self.addCleanup(self.volumes_client.delete_volume, volume['id'])
+        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        resp, body = self.client.attach_volume(server['id'],
+                                               volume['id'],
+                                               device=device)
+        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+        resp, _ = self.client.delete_server(server['id'])
+        self.assertEqual('204', resp['status'])
+        self.client.wait_for_server_termination(server['id'])
+        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+
 
 class DeleteServersAdminV3Test(base.BaseV3ComputeAdminTest):
     # NOTE: Server creations of each test class should be under 10
diff --git a/tempest/api/compute/v3/servers/test_instance_actions.py b/tempest/api/compute/v3/servers/test_instance_actions.py
index 7d25100..399541b 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions.py
@@ -27,25 +27,27 @@
         cls.resp = resp
         cls.server_id = server['id']
 
+    @test.skip_because(bug="1206032")
     @test.attr(type='gate')
-    def test_list_instance_actions(self):
+    def test_list_server_actions(self):
         # List actions of the provided server
         resp, body = self.client.reboot(self.server_id, 'HARD')
         self.client.wait_for_server_status(self.server_id, 'ACTIVE')
 
-        resp, body = self.client.list_instance_actions(self.server_id)
+        resp, body = self.client.list_server_actions(self.server_id)
         self.assertEqual(200, resp.status)
         self.assertTrue(len(body) == 2, str(body))
         self.assertTrue(any([i for i in body if i['action'] == 'create']))
         self.assertTrue(any([i for i in body if i['action'] == 'reboot']))
 
+    @test.skip_because(bug="1206032")
     @test.attr(type='gate')
     @test.skip_because(bug="1281915")
-    def test_get_instance_action(self):
+    def test_get_server_action(self):
         # Get the action details of the provided server
         request_id = self.resp['x-compute-request-id']
-        resp, body = self.client.get_instance_action(self.server_id,
-                                                     request_id)
+        resp, body = self.client.get_server_action(self.server_id,
+                                                   request_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(self.server_id, body['instance_uuid'])
+        self.assertEqual(self.server_id, body['server_uuid'])
         self.assertEqual('create', body['action'])
diff --git a/tempest/api/compute/v3/servers/test_instance_actions_negative.py b/tempest/api/compute/v3/servers/test_instance_actions_negative.py
index b0a7050..0b2c6f9 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions_negative.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions_negative.py
@@ -29,15 +29,15 @@
         cls.server_id = server['id']
 
     @test.attr(type=['negative', 'gate'])
-    def test_list_instance_actions_invalid_server(self):
+    def test_list_server_actions_invalid_server(self):
         # List actions of the invalid server id
         invalid_server_id = data_utils.rand_uuid()
         self.assertRaises(exceptions.NotFound,
-                          self.client.list_instance_actions, invalid_server_id)
+                          self.client.list_server_actions, invalid_server_id)
 
     @test.attr(type=['negative', 'gate'])
-    def test_get_instance_action_invalid_request(self):
+    def test_get_server_action_invalid_request(self):
         # Get the action details of the provided server with invalid request
         invalid_request_id = 'req-' + data_utils.rand_uuid()
-        self.assertRaises(exceptions.NotFound, self.client.get_instance_action,
+        self.assertRaises(exceptions.NotFound, self.client.get_server_action,
                           self.server_id, invalid_request_id)
diff --git a/tempest/api/compute/v3/servers/test_list_server_filters.py b/tempest/api/compute/v3/servers/test_list_server_filters.py
index 2cb176c..778b033 100644
--- a/tempest/api/compute/v3/servers/test_list_server_filters.py
+++ b/tempest/api/compute/v3/servers/test_list_server_filters.py
@@ -28,6 +28,7 @@
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
+        cls.set_network_resources(network=True, subnet=True, dhcp=True)
         super(ListServerFiltersV3Test, cls).setUpClass()
         cls.client = cls.servers_client
 
@@ -70,7 +71,12 @@
                                               flavor=cls.flavor_ref_alt,
                                               wait_until='ACTIVE')
 
-        cls.fixed_network_name = CONF.compute.fixed_network_name
+        if (CONF.service_available.neutron and
+                CONF.compute.allow_tenant_isolation):
+            network = cls.isolated_creds.get_primary_network()
+            cls.fixed_network_name = network['name']
+        else:
+            cls.fixed_network_name = CONF.compute.fixed_network_name
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @test.attr(type='gate')
@@ -226,7 +232,6 @@
         self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
         self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
 
-    @test.skip_because(bug="1170718")
     @test.attr(type='gate')
     def test_list_servers_filtered_by_ip(self):
         # Filter servers by ip
diff --git a/tempest/api/compute/v3/servers/test_list_servers_negative.py b/tempest/api/compute/v3/servers/test_list_servers_negative.py
index 9cbc4e0..18e5c67 100644
--- a/tempest/api/compute/v3/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_list_servers_negative.py
@@ -39,7 +39,7 @@
         cls.deleted_fixtures = []
         cls.start_time = datetime.datetime.utcnow()
         for x in moves.xrange(2):
-            resp, srv = cls.create_test_server()
+            resp, srv = cls.create_test_server(wait_until='ACTIVE')
             cls.existing_fixtures.append(srv)
 
         resp, srv = cls.create_test_server()
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index 2582fa8..e098311 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 import testtools
+import urlparse
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
@@ -26,7 +27,6 @@
 
 
 class ServerActionsV3Test(base.BaseV3ComputeTest):
-    resize_available = CONF.compute_feature_enabled.resize
     run_ssh = CONF.compute.run_ssh
 
     def setUp(self):
@@ -40,8 +40,15 @@
             # Rebuild server if something happened to it during a test
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+        _, server = self.client.get_server(self.server_id)
+        self.assertEqual(self.image_ref, server['image']['id'])
+        self.server_check_teardown()
+        super(ServerActionsV3Test, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServerActionsV3Test, cls).setUpClass()
         cls.client = cls.servers_client
         cls.server_id = cls.rebuild_server(None)
@@ -117,7 +124,6 @@
                                                    name=new_name,
                                                    metadata=meta,
                                                    admin_password=password)
-        self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -138,6 +144,9 @@
                                                       password)
             linux_client.validate_authentication()
 
+        if self.image_ref_alt != self.image_ref:
+            self.client.rebuild(self.server_id, self.image_ref)
+
     @test.attr(type='gate')
     def test_rebuild_server_in_stop_state(self):
         # The server in stop state  should be rebuilt using the provided
@@ -149,11 +158,7 @@
         resp, server = self.client.stop(self.server_id)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
-        self.addCleanup(self.client.start, self.server_id)
         resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
-        self.addCleanup(self.client.wait_for_server_status, self.server_id,
-                        'SHUTOFF')
-        self.addCleanup(self.client.rebuild, self.server_id, old_image)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -167,6 +172,12 @@
         rebuilt_image_id = server['image']['id']
         self.assertEqual(new_image, rebuilt_image_id)
 
+        # Restore to the original image (The tearDown will test it again)
+        if self.image_ref_alt != self.image_ref:
+            self.client.rebuild(self.server_id, old_image)
+            self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+        self.client.start(self.server_id)
+
     def _detect_server_image_flavor(self, server_id):
         # Detects the current server image flavor ref.
         resp, server = self.client.get_server(server_id)
@@ -175,26 +186,48 @@
             if current_flavor == self.flavor_ref else self.flavor_ref
         return current_flavor, new_flavor_ref
 
-    @testtools.skipIf(not resize_available, 'Resize not available.')
-    @test.attr(type='smoke')
-    def test_resize_server_confirm(self):
+    def _test_resize_server_confirm(self, stop=False):
         # The server's RAM and disk space should be modified to that of
         # the provided flavor
 
         previous_flavor_ref, new_flavor_ref = \
             self._detect_server_image_flavor(self.server_id)
 
+        if stop:
+            resp = self.servers_client.stop(self.server_id)[0]
+            self.assertEqual(202, resp.status)
+            self.servers_client.wait_for_server_status(self.server_id,
+                                                       'SHUTOFF')
+
         resp, server = self.client.resize(self.server_id, new_flavor_ref)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
 
         self.client.confirm_resize(self.server_id)
-        self.client.wait_for_server_status(self.server_id, 'ACTIVE')
+        expected_status = 'SHUTOFF' if stop else 'ACTIVE'
+        self.client.wait_for_server_status(self.server_id, expected_status)
 
         resp, server = self.client.get_server(self.server_id)
         self.assertEqual(new_flavor_ref, server['flavor']['id'])
 
-    @testtools.skipIf(not resize_available, 'Resize not available.')
+        if stop:
+            # NOTE(mriedem): tearDown requires the server to be started.
+            self.client.start(self.server_id)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='smoke')
+    def test_resize_server_confirm(self):
+        self._test_resize_server_confirm(stop=False)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='smoke')
+    def test_resize_server_confirm_from_stopped(self):
+        self._test_resize_server_confirm(stop=True)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
     @test.attr(type='gate')
     def test_resize_server_revert(self):
         # The server's RAM and disk space should return to its original
@@ -328,6 +361,8 @@
 
         self.wait_for(self._get_output)
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type='gate')
     def test_pause_unpause_server(self):
         resp, server = self.client.pause_server(self.server_id)
@@ -337,6 +372,8 @@
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type='gate')
     def test_suspend_resume_server(self):
         resp, server = self.client.suspend_server(self.server_id)
@@ -404,6 +441,12 @@
         self.assertEqual(202, resp.status)
         self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
 
+    def _validate_url(self, url):
+        valid_scheme = ['http', 'https']
+        parsed_url = urlparse.urlparse(url)
+        self.assertNotEqual('None', parsed_url.hostname)
+        self.assertIn(parsed_url.scheme, valid_scheme)
+
     @testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
                           'VNC Console feature is disabled')
     @test.attr(type='gate')
@@ -413,6 +456,35 @@
         for console_type in console_types:
             resp, body = self.servers_client.get_vnc_console(self.server_id,
                                                              console_type)
-            self.assertEqual(200, resp.status)
+            self.assertEqual(
+                200, resp.status,
+                "Failed to get Console Type: %s" % (console_type))
             self.assertEqual(console_type, body['type'])
             self.assertNotEqual('', body['url'])
+            self._validate_url(body['url'])
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.spice_console,
+                          'Spice Console feature is disabled.')
+    @test.attr(type='gate')
+    def test_get_spice_console(self):
+        # Get the Spice console of type "spice-html5"
+        console_type = 'spice-html5'
+        resp, body = self.servers_client.get_spice_console(self.server_id,
+                                                           console_type)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(console_type, body['type'])
+        self.assertNotEqual('', body['url'])
+        self._validate_url(body['url'])
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.rdp_console,
+                          'RDP Console feature is disabled.')
+    @test.attr(type='gate')
+    def test_get_rdp_console(self):
+        # Get the RDP console of type "rdp-html5"
+        console_type = 'rdp-html5'
+        resp, body = self.servers_client.get_rdp_console(self.server_id,
+                                                         console_type)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(console_type, body['type'])
+        self.assertNotEqual('', body['url'])
+        self._validate_url(body['url'])
diff --git a/tempest/api/compute/v3/servers/test_server_metadata.py b/tempest/api/compute/v3/servers/test_server_metadata.py
index 0e4ef07..298cd3c 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata.py
@@ -24,10 +24,7 @@
         super(ServerMetadataV3Test, cls).setUpClass()
         cls.client = cls.servers_client
         cls.quotas = cls.quotas_client
-        cls.admin_client = cls._get_identity_admin_client()
-        resp, tenants = cls.admin_client.list_tenants()
-        cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
-                         cls.client.tenant_name][0]
+        cls.tenant_id = cls.client.tenant_id
         resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
 
         cls.server_id = server['id']
diff --git a/tempest/api/compute/v3/servers/test_server_metadata_negative.py b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
index ec2bc8c..f746be3 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
@@ -25,10 +25,7 @@
         super(ServerMetadataV3NegativeTest, cls).setUpClass()
         cls.client = cls.servers_client
         cls.quotas = cls.quotas_client
-        cls.admin_client = cls._get_identity_admin_client()
-        resp, tenants = cls.admin_client.list_tenants()
-        cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
-                         cls.client.tenant_name][0]
+        cls.tenant_id = cls.client.tenant_id
         resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
 
         cls.server_id = server['id']
diff --git a/tempest/api/compute/v3/servers/test_server_rescue_negative.py b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
index 6bb441c..eb6bcdd 100644
--- a/tempest/api/compute/v3/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
@@ -12,12 +12,16 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import testtools
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class ServerRescueNegativeV3Test(base.BaseV3ComputeTest):
 
@@ -44,6 +48,7 @@
         cls.servers_client.rescue_server(
             cls.rescue_id, admin_password=cls.rescue_password)
         cls.servers_client.wait_for_server_status(cls.rescue_id, 'RESCUE')
+        cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
 
     @classmethod
     def tearDownClass(cls):
@@ -65,6 +70,8 @@
         self.assertEqual(202, resp.status)
         self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_rescue_paused_instance(self):
         # Rescue a paused server
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index cb5e93d..90deaa9 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -16,6 +16,8 @@
 import base64
 import sys
 
+import testtools
+
 from tempest.api.compute import base
 from tempest import clients
 from tempest.common.utils import data_utils
@@ -35,11 +37,18 @@
         except Exception:
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+            self.server_check_teardown()
+            super(ServersNegativeV3Test, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
         super(ServersNegativeV3Test, cls).setUpClass()
         cls.client = cls.servers_client
-        cls.alt_os = clients.AltManager()
+        if CONF.compute.allow_tenant_isolation:
+            cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
+        else:
+            cls.alt_os = clients.AltManager()
         cls.alt_client = cls.alt_os.servers_v3_client
         resp, server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
@@ -113,16 +122,17 @@
         self.assertRaises(exceptions.NotFound, self.client.reboot,
                           nonexistent_server, 'SOFT')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_pause_paused_server(self):
         # Pause a paused server.
         self.client.pause_server(self.server_id)
-        self.addCleanup(self.client.unpause_server,
-                        self.server_id)
         self.client.wait_for_server_status(self.server_id, 'PAUSED')
         self.assertRaises(exceptions.Conflict,
                           self.client.pause_server,
                           self.server_id)
+        self.client.unpause_server(self.server_id)
 
     @test.attr(type=['negative', 'gate'])
     def test_rebuild_reboot_deleted_server(self):
@@ -311,6 +321,8 @@
                           self.client.unpause_server,
                           self.server_id)
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_suspend_non_existent_server(self):
         # suspend a non existent server
@@ -318,17 +330,18 @@
         self.assertRaises(exceptions.NotFound, self.client.suspend_server,
                           nonexistent_server)
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_suspend_server_invalid_state(self):
         # suspend a suspended server.
         resp, _ = self.client.suspend_server(self.server_id)
-        self.addCleanup(self.client.resume_server,
-                        self.server_id)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
         self.assertRaises(exceptions.Conflict,
                           self.client.suspend_server,
                           self.server_id)
+        self.client.resume_server(self.server_id)
 
     @test.attr(type=['negative', 'gate'])
     def test_resume_non_existent_server(self):
@@ -337,6 +350,8 @@
         self.assertRaises(exceptions.NotFound, self.client.resume_server,
                           nonexistent_server)
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_resume_server_invalid_state(self):
         # resume an active server.
@@ -394,7 +409,6 @@
         # shelve a shelved server.
         resp, server = self.client.shelve_server(self.server_id)
         self.assertEqual(202, resp.status)
-        self.addCleanup(self.client.unshelve_server, self.server_id)
 
         offload_time = CONF.compute.shelved_offload_time
         if offload_time >= 0:
@@ -415,6 +429,8 @@
                           self.client.shelve_server,
                           self.server_id)
 
+        self.client.unshelve_server(self.server_id)
+
     @test.attr(type=['negative', 'gate'])
     def test_unshelve_non_existent_server(self):
         # unshelve a non existent server
diff --git a/tempest/api/compute/v3/test_live_block_migration.py b/tempest/api/compute/v3/test_live_block_migration.py
index 33d2bd9..6ca37e6 100644
--- a/tempest/api/compute/v3/test_live_block_migration.py
+++ b/tempest/api/compute/v3/test_live_block_migration.py
@@ -17,7 +17,7 @@
 
 from tempest.api.compute import base
 from tempest import config
-from tempest.test import attr
+from tempest import test
 
 CONF = config.CONF
 
@@ -85,7 +85,7 @@
 
     @testtools.skipIf(not CONF.compute_feature_enabled.live_migration,
                       'Live migration not available')
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_live_block_migration(self):
         # Live block migrate an instance to another host
         if len(self._get_compute_hostnames()) < 2:
@@ -105,7 +105,7 @@
     @testtools.skipIf(not CONF.compute_feature_enabled.
                       block_migrate_cinder_iscsi,
                       'Block Live migration not configured for iSCSI')
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_iscsi_volume(self):
         # Live block migrate an instance to another host
         if len(self._get_compute_hostnames()) < 2:
diff --git a/tempest/api/compute/v3/test_quotas.py b/tempest/api/compute/v3/test_quotas.py
index 3fe62e9..62a7556 100644
--- a/tempest/api/compute/v3/test_quotas.py
+++ b/tempest/api/compute/v3/test_quotas.py
@@ -23,13 +23,8 @@
     def setUpClass(cls):
         super(QuotasV3Test, cls).setUpClass()
         cls.client = cls.quotas_client
-        cls.admin_client = cls._get_identity_admin_client()
-        resp, tenants = cls.admin_client.list_tenants()
-        cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
-                         cls.client.tenant_name][0]
-        resp, users = cls.admin_client.list_users_for_tenant(cls.tenant_id)
-        cls.user_id = [user['id'] for user in users if user['name'] ==
-                       cls.client.user][0]
+        cls.tenant_id = cls.client.tenant_id
+        cls.user_id = cls.client.user_id
         cls.default_quota_set = set(('metadata_items',
                                      'ram', 'floating_ips',
                                      'fixed_ips', 'key_pairs',
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 3c5feed..5a64544 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -24,7 +24,6 @@
 
 
 class AttachVolumeTestJSON(base.BaseV2ComputeTest):
-    run_ssh = CONF.compute.run_ssh
 
     def __init__(self, *args, **kwargs):
         super(AttachVolumeTestJSON, self).__init__(*args, **kwargs)
@@ -34,6 +33,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(AttachVolumeTestJSON, cls).setUpClass()
         cls.device = CONF.compute.volume_device_name
         if not CONF.service_available.cinder:
@@ -53,60 +53,62 @@
     def _create_and_attach(self):
         # Start a server and wait for it to become ready
         admin_pass = self.image_ssh_password
-        resp, server = self.create_test_server(wait_until='ACTIVE',
-                                               adminPass=admin_pass)
-        self.server = server
+        _, self.server = self.create_test_server(wait_until='ACTIVE',
+                                                 adminPass=admin_pass)
 
         # Record addresses so that we can ssh later
-        resp, server['addresses'] = \
-            self.servers_client.list_addresses(server['id'])
+        _, self.server['addresses'] = \
+            self.servers_client.list_addresses(self.server['id'])
 
         # Create a volume and wait for it to become ready
-        resp, volume = self.volumes_client.create_volume(1,
-                                                         display_name='test')
-        self.volume = volume
+        _, self.volume = self.volumes_client.create_volume(
+            1, display_name='test')
         self.addCleanup(self._delete_volume)
-        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        self.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                   'available')
 
         # Attach the volume to the server
-        self.servers_client.attach_volume(server['id'], volume['id'],
+        self.servers_client.attach_volume(self.server['id'],
+                                          self.volume['id'],
                                           device='/dev/%s' % self.device)
-        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+        self.volumes_client.wait_for_volume_status(self.volume['id'], 'in-use')
 
         self.attached = True
-        self.addCleanup(self._detach, server['id'], volume['id'])
+        self.addCleanup(self._detach, self.server['id'], self.volume['id'])
 
-    @testtools.skipIf(not run_ssh, 'SSH required for this test')
+    @testtools.skipUnless(CONF.compute.run_ssh, 'SSH required for this test')
     @test.attr(type='gate')
     def test_attach_detach_volume(self):
         # Stop and Start a server with an attached volume, ensuring that
         # the volume remains attached.
         self._create_and_attach()
-        server = self.server
-        volume = self.volume
 
-        self.servers_client.stop(server['id'])
-        self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
+        self.servers_client.stop(self.server['id'])
+        self.servers_client.wait_for_server_status(self.server['id'],
+                                                   'SHUTOFF')
 
-        self.servers_client.start(server['id'])
-        self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+        self.servers_client.start(self.server['id'])
+        self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
 
-        linux_client = remote_client.RemoteClient(server, self.image_ssh_user,
-                                                  server['adminPass'])
+        linux_client = remote_client.RemoteClient(self.server,
+                                                  self.image_ssh_user,
+                                                  self.server['adminPass'])
         partitions = linux_client.get_partitions()
         self.assertIn(self.device, partitions)
 
-        self._detach(server['id'], volume['id'])
+        self._detach(self.server['id'], self.volume['id'])
         self.attached = False
 
-        self.servers_client.stop(server['id'])
-        self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
+        self.servers_client.stop(self.server['id'])
+        self.servers_client.wait_for_server_status(self.server['id'],
+                                                   'SHUTOFF')
 
-        self.servers_client.start(server['id'])
-        self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+        self.servers_client.start(self.server['id'])
+        self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
 
-        linux_client = remote_client.RemoteClient(server, self.image_ssh_user,
-                                                  server['adminPass'])
+        linux_client = remote_client.RemoteClient(self.server,
+                                                  self.image_ssh_user,
+                                                  self.server['adminPass'])
         partitions = linux_client.get_partitions()
         self.assertNotIn(self.device, partitions)
 
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index 9867c64..25a8547 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -16,7 +16,7 @@
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
 from tempest import config
-from tempest.test import attr
+from tempest import test
 
 CONF = config.CONF
 
@@ -75,7 +75,7 @@
             cls.delete_volume(volume['id'])
         super(VolumesTestJSON, cls).tearDownClass()
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_volume_list(self):
         # Should return the list of Volumes
         # Fetch all Volumes
@@ -91,7 +91,7 @@
                          ', '.join(m_vol['displayName']
                                    for m_vol in missing_volumes))
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_volume_list_with_details(self):
         # Should return the list of Volumes with details
         # Fetch all Volumes
@@ -107,7 +107,7 @@
                          ', '.join(m_vol['displayName']
                                    for m_vol in missing_volumes))
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_volume_list_param_limit(self):
         # Return the list of volumes based on limit set
         params = {'limit': 2}
@@ -117,7 +117,7 @@
         self.assertEqual(len(fetched_vol_list), params['limit'],
                          "Failed to list volumes by limit set")
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_volume_list_with_detail_param_limit(self):
         # Return the list of volumes with details based on limit set.
         params = {'limit': 2}
@@ -128,7 +128,7 @@
         self.assertEqual(len(fetched_vol_list), params['limit'],
                          "Failed to list volume details by limit set")
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_volume_list_param_offset_and_limit(self):
         # Return the list of volumes based on offset and limit set.
         # get all volumes list
@@ -146,7 +146,7 @@
                              all_vol_list[index + params['offset']]['id'],
                              "Failed to list volumes by offset and limit")
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_volume_list_with_detail_param_offset_and_limit(self):
         # Return the list of volumes details based on offset and limit set.
         # get all volumes list
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index cecaf62..5dfbad7 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -19,7 +19,7 @@
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 CONF = config.CONF
 
@@ -34,7 +34,7 @@
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_volume_get_nonexistent_volume_id(self):
         # Negative: Should not be able to get details of nonexistent volume
         # Creating a nonexistent volume id
@@ -42,7 +42,7 @@
         self.assertRaises(exceptions.NotFound, self.client.get_volume,
                           str(uuid.uuid4()))
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_volume_delete_nonexistent_volume_id(self):
         # Negative: Should not be able to delete nonexistent Volume
         # Creating nonexistent volume id
@@ -50,7 +50,7 @@
         self.assertRaises(exceptions.NotFound, self.client.delete_volume,
                           str(uuid.uuid4()))
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_volume_with_invalid_size(self):
         # Negative: Should not be able to create volume with invalid size
         # in request
@@ -59,7 +59,7 @@
         self.assertRaises(exceptions.BadRequest, self.client.create_volume,
                           size='#$%', display_name=v_name, metadata=metadata)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_volume_with_out_passing_size(self):
         # Negative: Should not be able to create volume without passing size
         # in request
@@ -68,7 +68,7 @@
         self.assertRaises(exceptions.BadRequest, self.client.create_volume,
                           size='', display_name=v_name, metadata=metadata)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_volume_with_size_zero(self):
         # Negative: Should not be able to create volume with size zero
         v_name = data_utils.rand_name('Volume-')
@@ -76,25 +76,25 @@
         self.assertRaises(exceptions.BadRequest, self.client.create_volume,
                           size='0', display_name=v_name, metadata=metadata)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_invalid_volume_id(self):
         # Negative: Should not be able to get volume with invalid id
         self.assertRaises(exceptions.NotFound,
                           self.client.get_volume, '#$%%&^&^')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_volume_without_passing_volume_id(self):
         # Negative: Should not be able to get volume when empty ID is passed
         self.assertRaises(exceptions.NotFound, self.client.get_volume, '')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_invalid_volume_id(self):
         # Negative: Should not be able to delete volume when invalid ID is
         # passed
         self.assertRaises(exceptions.NotFound,
                           self.client.delete_volume, '!@#$%^&*()')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_volume_without_passing_volume_id(self):
         # Negative: Should not be able to delete volume when empty ID is passed
         self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 73ad22b..0d6773c 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -1,19 +1,19 @@
-# Copyright (c) 2013 Mirantis Inc.
+# Copyright (c) 2014 Mirantis Inc.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
 #
-#    http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
 
 from tempest import config
+from tempest import exceptions
 import tempest.test
 
 
@@ -26,46 +26,45 @@
     @classmethod
     def setUpClass(cls):
         super(BaseDataProcessingTest, cls).setUpClass()
-        os = cls.get_client_manager()
         if not CONF.service_available.sahara:
-            raise cls.skipException("Sahara support is required")
+            raise cls.skipException('Sahara support is required')
+
+        os = cls.get_client_manager()
         cls.client = os.data_processing_client
 
-        # set some constants
         cls.flavor_ref = CONF.compute.flavor_ref
-        cls.simple_node_group_template = {
-            'plugin_name': 'vanilla',
-            'hadoop_version': '1.2.1',
-            'node_processes': [
-                "datanode",
-                "tasktracker"
-            ],
-            'flavor_id': cls.flavor_ref,
-            'node_configs': {
-                'HDFS': {
-                    'Data Node Heap Size': 1024
-                },
-                'MapReduce': {
-                    'Task Tracker Heap Size': 1024
-                }
-            }
-        }
 
         # add lists for watched resources
         cls._node_group_templates = []
+        cls._cluster_templates = []
+        cls._data_sources = []
+        cls._job_binary_internals = []
+        cls._job_binaries = []
 
     @classmethod
     def tearDownClass(cls):
-        # cleanup node group templates
-        for ngt_id in cls._node_group_templates:
-            try:
-                cls.client.delete_node_group_template(ngt_id)
-            except Exception:
-                # ignore errors while auto removing created resource
-                pass
+        cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
+                              cls.client.delete_cluster_template)
+        cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
+                              cls.client.delete_node_group_template)
+        cls.cleanup_resources(getattr(cls, '_data_sources', []),
+                              cls.client.delete_data_source)
+        cls.cleanup_resources(getattr(cls, '_job_binary_internals', []),
+                              cls.client.delete_job_binary_internal)
+        cls.cleanup_resources(getattr(cls, '_job_binaries', []),
+                              cls.client.delete_job_binary)
         cls.clear_isolated_creds()
         super(BaseDataProcessingTest, cls).tearDownClass()
 
+    @staticmethod
+    def cleanup_resources(resource_id_list, method):
+        for resource_id in resource_id_list:
+            try:
+                method(resource_id)
+            except exceptions.NotFound:
+                # ignore errors while auto removing created resource
+                pass
+
     @classmethod
     def create_node_group_template(cls, name, plugin_name, hadoop_version,
                                    node_processes, flavor_id,
@@ -76,16 +75,72 @@
         object. All resources created in this method will be automatically
         removed in tearDownClass method.
         """
-
         resp, body = cls.client.create_node_group_template(name, plugin_name,
                                                            hadoop_version,
                                                            node_processes,
                                                            flavor_id,
                                                            node_configs,
                                                            **kwargs)
-
         # store id of created node group template
-        template_id = body['id']
-        cls._node_group_templates.append(template_id)
+        cls._node_group_templates.append(body['id'])
 
-        return resp, body, template_id
+        return resp, body
+
+    @classmethod
+    def create_cluster_template(cls, name, plugin_name, hadoop_version,
+                                node_groups, cluster_configs=None, **kwargs):
+        """Creates watched cluster template with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object. All resources created in this method will be automatically
+        removed in tearDownClass method.
+        """
+        resp, body = cls.client.create_cluster_template(name, plugin_name,
+                                                        hadoop_version,
+                                                        node_groups,
+                                                        cluster_configs,
+                                                        **kwargs)
+        # store id of created cluster template
+        cls._cluster_templates.append(body['id'])
+
+        return resp, body
+
+    @classmethod
+    def create_data_source(cls, name, type, url, **kwargs):
+        """Creates watched data source with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object. All resources created in this method will be automatically
+        removed in tearDownClass method.
+        """
+        resp, body = cls.client.create_data_source(name, type, url, **kwargs)
+        # store id of created data source
+        cls._data_sources.append(body['id'])
+
+        return resp, body
+
+    @classmethod
+    def create_job_binary_internal(cls, name, data):
+        """Creates watched job binary internal with specified params.
+
+        It returns created object. All resources created in this method will
+        be automatically removed in tearDownClass method.
+        """
+        resp, body = cls.client.create_job_binary_internal(name, data)
+        # store id of created job binary internal
+        cls._job_binary_internals.append(body['id'])
+
+        return resp, body
+
+    def create_job_binary(cls, name, url, extra=None, **kwargs):
+        """Creates watched job binary with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object. All resources created in this method will be automatically
+        removed in tearDownClass method.
+        """
+        resp, body = cls.client.create_job_binary(name, url, extra, **kwargs)
+        # store id of created job binary
+        cls._job_binaries.append(body['id'])
+
+        return resp, body
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
new file mode 100644
index 0000000..c08d6ba
--- /dev/null
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -0,0 +1,146 @@
+# Copyright (c) 2014 Mirantis Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class ClusterTemplateTest(dp_base.BaseDataProcessingTest):
+    """Link to the API documentation is http://docs.openstack.org/developer/
+    sahara/restapi/rest_api_v1.0.html#cluster-templates
+    """
+    @classmethod
+    def setUpClass(cls):
+        super(ClusterTemplateTest, cls).setUpClass()
+        # create node group template
+        node_group_template = {
+            'name': data_utils.rand_name('sahara-ng-template'),
+            'description': 'Test node group template',
+            'plugin_name': 'vanilla',
+            'hadoop_version': '1.2.1',
+            'node_processes': ['datanode'],
+            'flavor_id': cls.flavor_ref,
+            'node_configs': {
+                'HDFS': {
+                    'Data Node Heap Size': 1024
+                }
+            }
+        }
+        resp_body = cls.create_node_group_template(**node_group_template)[1]
+
+        cls.full_cluster_template = {
+            'description': 'Test cluster template',
+            'plugin_name': 'vanilla',
+            'hadoop_version': '1.2.1',
+            'cluster_configs': {
+                'HDFS': {
+                    'dfs.replication': 2
+                },
+                'MapReduce': {
+                    'mapred.map.tasks.speculative.execution': False,
+                    'mapred.child.java.opts': '-Xmx500m'
+                },
+                'general': {
+                    'Enable Swift': False
+                }
+            },
+            'node_groups': [
+                {
+                    'name': 'master-node',
+                    'flavor_id': cls.flavor_ref,
+                    'node_processes': ['namenode'],
+                    'count': 1
+                },
+                {
+                    'name': 'worker-node',
+                    'node_group_template_id': resp_body['id'],
+                    'count': 3
+                }
+            ]
+        }
+        # create cls.cluster_template variable to use for comparison to cluster
+        # template response body. The 'node_groups' field in the response body
+        # has some extra info that post body does not have. The 'node_groups'
+        # field in the response body is something like this
+        #
+        #   'node_groups': [
+        #       {
+        #           'count': 3,
+        #           'name': 'worker-node',
+        #           'volume_mount_prefix': '/volumes/disk',
+        #           'created_at': '2014-05-21 14:31:37',
+        #           'updated_at': None,
+        #           'floating_ip_pool': None,
+        #           ...
+        #       },
+        #       ...
+        #   ]
+        cls.cluster_template = cls.full_cluster_template.copy()
+        del cls.cluster_template['node_groups']
+
+    def _create_cluster_template(self, template_name=None):
+        """Creates Cluster Template with optional name specified.
+
+        It creates template and ensures response status, template name and
+        response body. Returns id and name of created template.
+        """
+        if not template_name:
+            # generate random name if it's not specified
+            template_name = data_utils.rand_name('sahara-cluster-template')
+
+        # create cluster template
+        resp, body = self.create_cluster_template(template_name,
+                                                  **self.full_cluster_template)
+
+        # ensure that template created successfully
+        self.assertEqual(202, resp.status)
+        self.assertEqual(template_name, body['name'])
+        self.assertDictContainsSubset(self.cluster_template, body)
+
+        return body['id'], template_name
+
+    @test.attr(type='smoke')
+    def test_cluster_template_create(self):
+        self._create_cluster_template()
+
+    @test.attr(type='smoke')
+    def test_cluster_template_list(self):
+        template_info = self._create_cluster_template()
+
+        # check for cluster template in list
+        resp, templates = self.client.list_cluster_templates()
+        self.assertEqual(200, resp.status)
+        templates_info = [(template['id'], template['name'])
+                          for template in templates]
+        self.assertIn(template_info, templates_info)
+
+    @test.attr(type='smoke')
+    def test_cluster_template_get(self):
+        template_id, template_name = self._create_cluster_template()
+
+        # check cluster template fetch by id
+        resp, template = self.client.get_cluster_template(template_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(template_name, template['name'])
+        self.assertDictContainsSubset(self.cluster_template, template)
+
+    @test.attr(type='smoke')
+    def test_cluster_template_delete(self):
+        template_id = self._create_cluster_template()[0]
+
+        # delete the cluster template by id
+        resp = self.client.delete_cluster_template(template_id)[0]
+        self.assertEqual(204, resp.status)
+        #TODO(ylobankov): check that cluster template is really deleted
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
new file mode 100644
index 0000000..c72e828
--- /dev/null
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2014 Mirantis Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class DataSourceTest(dp_base.BaseDataProcessingTest):
+    @classmethod
+    def setUpClass(cls):
+        super(DataSourceTest, cls).setUpClass()
+        cls.swift_data_source_with_creds = {
+            'url': 'swift://sahara-container.sahara/input-source',
+            'description': 'Test data source',
+            'credentials': {
+                'user': CONF.identity.username,
+                'password': CONF.identity.password
+            },
+            'type': 'swift'
+        }
+        cls.swift_data_source = cls.swift_data_source_with_creds.copy()
+        del cls.swift_data_source['credentials']
+
+        cls.local_hdfs_data_source = {
+            'url': 'input-source',
+            'description': 'Test data source',
+            'type': 'hdfs'
+        }
+
+        cls.external_hdfs_data_source = {
+            'url': 'hdfs://172.18.168.2:8020/usr/hadoop/input-source',
+            'description': 'Test data source',
+            'type': 'hdfs'
+        }
+
+    def _create_data_source(self, source_body, source_name=None):
+        """Creates Data Source with optional name specified.
+
+        It creates a link to input-source file (it may not exist) and ensures
+        response status and source name. Returns id and name of created source.
+        """
+        if not source_name:
+            # generate random name if it's not specified
+            source_name = data_utils.rand_name('sahara-data-source')
+
+        # create data source
+        resp, body = self.create_data_source(source_name, **source_body)
+
+        # ensure that source created successfully
+        self.assertEqual(202, resp.status)
+        self.assertEqual(source_name, body['name'])
+        if source_body['type'] == 'swift':
+            source_body = self.swift_data_source
+        self.assertDictContainsSubset(source_body, body)
+
+        return body['id'], source_name
+
+    def _list_data_sources(self, source_info):
+        # check for data source in list
+        resp, sources = self.client.list_data_sources()
+        self.assertEqual(200, resp.status)
+        sources_info = [(source['id'], source['name']) for source in sources]
+        self.assertIn(source_info, sources_info)
+
+    def _get_data_source(self, source_id, source_name, source_body):
+        # check data source fetch by id
+        resp, source = self.client.get_data_source(source_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(source_name, source['name'])
+        self.assertDictContainsSubset(source_body, source)
+
+    def _delete_data_source(self, source_id):
+        # delete the data source by id
+        resp = self.client.delete_data_source(source_id)[0]
+        self.assertEqual(204, resp.status)
+
+    @test.attr(type='smoke')
+    def test_swift_data_source_create(self):
+        self._create_data_source(self.swift_data_source_with_creds)
+
+    @test.attr(type='smoke')
+    def test_swift_data_source_list(self):
+        source_info = self._create_data_source(
+            self.swift_data_source_with_creds)
+        self._list_data_sources(source_info)
+
+    @test.attr(type='smoke')
+    def test_swift_data_source_get(self):
+        source_id, source_name = self._create_data_source(
+            self.swift_data_source_with_creds)
+        self._get_data_source(source_id, source_name, self.swift_data_source)
+
+    @test.attr(type='smoke')
+    def test_swift_data_source_delete(self):
+        source_id = self._create_data_source(
+            self.swift_data_source_with_creds)[0]
+        self._delete_data_source(source_id)
+
+    @test.attr(type='smoke')
+    def test_local_hdfs_data_source_create(self):
+        self._create_data_source(self.local_hdfs_data_source)
+
+    @test.attr(type='smoke')
+    def test_local_hdfs_data_source_list(self):
+        source_info = self._create_data_source(self.local_hdfs_data_source)
+        self._list_data_sources(source_info)
+
+    @test.attr(type='smoke')
+    def test_local_hdfs_data_source_get(self):
+        source_id, source_name = self._create_data_source(
+            self.local_hdfs_data_source)
+        self._get_data_source(
+            source_id, source_name, self.local_hdfs_data_source)
+
+    @test.attr(type='smoke')
+    def test_local_hdfs_data_source_delete(self):
+        source_id = self._create_data_source(self.local_hdfs_data_source)[0]
+        self._delete_data_source(source_id)
+
+    @test.attr(type='smoke')
+    def test_external_hdfs_data_source_create(self):
+        self._create_data_source(self.external_hdfs_data_source)
+
+    @test.attr(type='smoke')
+    def test_external_hdfs_data_source_list(self):
+        source_info = self._create_data_source(self.external_hdfs_data_source)
+        self._list_data_sources(source_info)
+
+    @test.attr(type='smoke')
+    def test_external_hdfs_data_source_get(self):
+        source_id, source_name = self._create_data_source(
+            self.external_hdfs_data_source)
+        self._get_data_source(
+            source_id, source_name, self.external_hdfs_data_source)
+
+    @test.attr(type='smoke')
+    def test_external_hdfs_data_source_delete(self):
+        source_id = self._create_data_source(self.external_hdfs_data_source)[0]
+        self._delete_data_source(source_id)
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
new file mode 100644
index 0000000..6d59177
--- /dev/null
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2014 Mirantis Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class JobBinaryInternalTest(dp_base.BaseDataProcessingTest):
+    """Link to the API documentation is http://docs.openstack.org/developer/
+    sahara/restapi/rest_api_v1.1_EDP.html#job-binary-internals
+    """
+    @classmethod
+    def setUpClass(cls):
+        super(JobBinaryInternalTest, cls).setUpClass()
+        cls.job_binary_internal_data = 'Some script may be data'
+
+    def _create_job_binary_internal(self, binary_name=None):
+        """Creates Job Binary Internal with optional name specified.
+
+        It puts data into Sahara database and ensures response status and
+        job binary internal name. Returns id and name of created job binary
+        internal.
+        """
+        if not binary_name:
+            # generate random name if it's not specified
+            binary_name = data_utils.rand_name('sahara-job-binary-internal')
+
+        # create job binary internal
+        resp, body = self.create_job_binary_internal(
+            binary_name, self.job_binary_internal_data)
+
+        # ensure that job binary internal created successfully
+        self.assertEqual(202, resp.status)
+        self.assertEqual(binary_name, body['name'])
+
+        return body['id'], binary_name
+
+    @test.attr(type='smoke')
+    def test_job_binary_internal_create(self):
+        self._create_job_binary_internal()
+
+    @test.attr(type='smoke')
+    def test_job_binary_internal_list(self):
+        binary_info = self._create_job_binary_internal()
+
+        # check for job binary internal in list
+        resp, binaries = self.client.list_job_binary_internals()
+        self.assertEqual(200, resp.status)
+        binaries_info = [(binary['id'], binary['name']) for binary in binaries]
+        self.assertIn(binary_info, binaries_info)
+
+    @test.attr(type='smoke')
+    def test_job_binary_internal_get(self):
+        binary_id, binary_name = self._create_job_binary_internal()
+
+        # check job binary internal fetch by id
+        resp, binary = self.client.get_job_binary_internal(binary_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(binary_name, binary['name'])
+
+    @test.attr(type='smoke')
+    def test_job_binary_internal_delete(self):
+        binary_id = self._create_job_binary_internal()[0]
+
+        # delete the job binary internal by id
+        resp = self.client.delete_job_binary_internal(binary_id)[0]
+        self.assertEqual(204, resp.status)
+
+    @test.attr(type='smoke')
+    def test_job_binary_internal_get_data(self):
+        binary_id = self._create_job_binary_internal()[0]
+
+        # get data of job binary internal by id
+        resp, data = self.client.get_job_binary_internal_data(binary_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(data, self.job_binary_internal_data)
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index a64c345..04f98b4 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -1,83 +1,95 @@
-# Copyright (c) 2013 Mirantis Inc.
+# Copyright (c) 2014 Mirantis Inc.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
 #
-#    http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
 
 from tempest.api.data_processing import base as dp_base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
 
 
 class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
-    def _create_simple_node_group_template(self, template_name=None):
-        """Creates simple Node Group Template with optional name specified.
+    @classmethod
+    def setUpClass(cls):
+        super(NodeGroupTemplateTest, cls).setUpClass()
+        cls.node_group_template = {
+            'description': 'Test node group template',
+            'plugin_name': 'vanilla',
+            'hadoop_version': '1.2.1',
+            'node_processes': [
+                'datanode',
+                'tasktracker'
+            ],
+            'flavor_id': cls.flavor_ref,
+            'node_configs': {
+                'HDFS': {
+                    'Data Node Heap Size': 1024
+                },
+                'MapReduce': {
+                    'Task Tracker Heap Size': 1024
+                }
+            }
+        }
+
+    def _create_node_group_template(self, template_name=None):
+        """Creates Node Group Template with optional name specified.
 
         It creates template and ensures response status and template name.
         Returns id and name of created template.
         """
-
-        if template_name is None:
+        if not template_name:
             # generate random name if it's not specified
-            template_name = data_utils.rand_name('sahara')
+            template_name = data_utils.rand_name('sahara-ng-template')
 
-        # create simple node group template
-        resp, body, template_id = self.create_node_group_template(
-            template_name, **self.simple_node_group_template)
+        # create node group template
+        resp, body = self.create_node_group_template(
+            template_name, **self.node_group_template)
 
         # ensure that template created successfully
         self.assertEqual(202, resp.status)
         self.assertEqual(template_name, body['name'])
+        self.assertDictContainsSubset(self.node_group_template, body)
 
-        return template_id, template_name
+        return body['id'], template_name
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_node_group_template_create(self):
-        # just create and ensure template
-        self._create_simple_node_group_template()
+        self._create_node_group_template()
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_node_group_template_list(self):
-        template_info = self._create_simple_node_group_template()
+        template_info = self._create_node_group_template()
 
         # check for node group template in list
         resp, templates = self.client.list_node_group_templates()
-
         self.assertEqual(200, resp.status)
-        templates_info = list([(template['id'], template['name'])
-                               for template in templates])
+        templates_info = [(template['id'], template['name'])
+                          for template in templates]
         self.assertIn(template_info, templates_info)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_node_group_template_get(self):
-        template_id, template_name = self._create_simple_node_group_template()
+        template_id, template_name = self._create_node_group_template()
 
         # check node group template fetch by id
         resp, template = self.client.get_node_group_template(template_id)
-
         self.assertEqual(200, resp.status)
         self.assertEqual(template_name, template['name'])
-        self.assertEqual(self.simple_node_group_template['plugin_name'],
-                         template['plugin_name'])
-        self.assertEqual(self.simple_node_group_template['node_processes'],
-                         template['node_processes'])
-        self.assertEqual(self.simple_node_group_template['flavor_id'],
-                         template['flavor_id'])
+        self.assertDictContainsSubset(self.node_group_template, template)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_node_group_template_delete(self):
-        template_id, template_name = self._create_simple_node_group_template()
+        template_id = self._create_node_group_template()[0]
 
         # delete the node group template by id
-        resp = self.client.delete_node_group_template(template_id)
-
-        self.assertEqual('204', resp[0]['status'])
+        resp = self.client.delete_node_group_template(template_id)[0]
+        self.assertEqual(204, resp.status)
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index 3b941d8..d643f23 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -1,20 +1,19 @@
-# Copyright (c) 2013 Mirantis Inc.
+# Copyright (c) 2014 Mirantis Inc.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
 #
-#    http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
 
 from tempest.api.data_processing import base as dp_base
-from tempest.test import attr
+from tempest import test
 
 
 class PluginsTest(dp_base.BaseDataProcessingTest):
@@ -24,31 +23,27 @@
         It ensures response status and main plugins availability.
         """
         resp, plugins = self.client.list_plugins()
-
         self.assertEqual(200, resp.status)
-
-        plugins_names = list([plugin['name'] for plugin in plugins])
+        plugins_names = [plugin['name'] for plugin in plugins]
         self.assertIn('vanilla', plugins_names)
         self.assertIn('hdp', plugins_names)
 
         return plugins_names
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_plugin_list(self):
         self._list_all_plugin_names()
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_plugin_get(self):
         for plugin_name in self._list_all_plugin_names():
             resp, plugin = self.client.get_plugin(plugin_name)
-
             self.assertEqual(200, resp.status)
             self.assertEqual(plugin_name, plugin['name'])
 
             for plugin_version in plugin['versions']:
                 resp, detailed_plugin = self.client.get_plugin(plugin_name,
                                                                plugin_version)
-
                 self.assertEqual(200, resp.status)
                 self.assertEqual(plugin_name, detailed_plugin['name'])
 
diff --git a/tempest/api/database/base.py b/tempest/api/database/base.py
index 8add9ba..cf70d11 100644
--- a/tempest/api/database/base.py
+++ b/tempest/api/database/base.py
@@ -36,7 +36,9 @@
 
         cls.catalog_type = CONF.database.catalog_type
         cls.db_flavor_ref = CONF.database.db_flavor_ref
+        cls.db_current_version = CONF.database.db_current_version
 
         os = cls.get_client_manager()
         cls.os = os
         cls.database_flavors_client = cls.os.database_flavors_client
+        cls.database_versions_client = cls.os.database_versions_client
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api/database/versions/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/api/database/versions/__init__.py
diff --git a/tempest/api/database/versions/test_versions.py b/tempest/api/database/versions/test_versions.py
new file mode 100644
index 0000000..6101f47
--- /dev/null
+++ b/tempest/api/database/versions/test_versions.py
@@ -0,0 +1,40 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.database import base
+from tempest import test
+
+
+class DatabaseVersionsTest(base.BaseDatabaseTest):
+    _interface = 'json'
+
+    @classmethod
+    def setUpClass(cls):
+        super(DatabaseVersionsTest, cls).setUpClass()
+        cls.client = cls.database_versions_client
+
+    @test.attr(type='smoke')
+    def test_list_db_versions(self):
+        resp, versions = self.client.list_db_versions()
+        self.assertEqual(200, resp.status)
+        self.assertTrue(len(versions) > 0, "No database versions found")
+        # List of all versions should contain the current version, and there
+        # should only be one 'current' version
+        current_versions = list()
+        for version in versions:
+            if 'CURRENT' == version['status']:
+                current_versions.append(version['id'])
+        self.assertEqual(1, len(current_versions))
+        self.assertIn(self.db_current_version, current_versions)
diff --git a/tempest/api/identity/admin/test_roles.py b/tempest/api/identity/admin/test_roles.py
index 81bc5de..a29f27e 100644
--- a/tempest/api/identity/admin/test_roles.py
+++ b/tempest/api/identity/admin/test_roles.py
@@ -60,8 +60,7 @@
         # Role should be created, verified, and deleted
         role_name = data_utils.rand_name(name='role-test-')
         resp, body = self.client.create_role(role_name)
-        self.assertIn('status', resp)
-        self.assertTrue(resp['status'].startswith('2'))
+        self.assertEqual(200, resp.status)
         self.assertEqual(role_name, body['name'])
 
         resp, body = self.client.list_roles()
@@ -69,14 +68,25 @@
         self.assertTrue(any(found))
 
         resp, body = self.client.delete_role(found[0]['id'])
-        self.assertIn('status', resp)
-        self.assertTrue(resp['status'].startswith('2'))
+        self.assertEqual(204, resp.status)
 
         resp, body = self.client.list_roles()
         found = [role for role in body if role['name'] == role_name]
         self.assertFalse(any(found))
 
     @test.attr(type='gate')
+    def test_get_role_by_id(self):
+        # Get a role by its id
+        self.data.setup_test_role()
+        role_id = self.data.role['id']
+        role_name = self.data.role['name']
+        resp, body = self.client.get_role(role_id)
+        self.assertIn('status', resp)
+        self.assertTrue('200', resp['status'])
+        self.assertEqual(role_id, body['id'])
+        self.assertEqual(role_name, body['name'])
+
+    @test.attr(type='gate')
     def test_assign_user_role(self):
         # Assign a role to a user on a tenant
         (user, tenant, role) = self._get_role_params()
@@ -92,7 +102,7 @@
                                                        user['id'], role['id'])
         resp, body = self.client.remove_user_role(tenant['id'], user['id'],
                                                   user_role['id'])
-        self.assertEqual(resp['status'], '204')
+        self.assertEqual(204, resp.status)
 
     @test.attr(type='gate')
     def test_list_user_roles(self):
diff --git a/tempest/api/identity/admin/test_roles_negative.py b/tempest/api/identity/admin/test_roles_negative.py
index 7a0bdea..6f8f9b5 100644
--- a/tempest/api/identity/admin/test_roles_negative.py
+++ b/tempest/api/identity/admin/test_roles_negative.py
@@ -18,7 +18,7 @@
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class RolesNegativeTestJSON(base.BaseIdentityV2AdminTest):
@@ -32,13 +32,13 @@
         role = self.get_role_by_name(self.data.test_role)
         return (user, tenant, role)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_roles_by_unauthorized_user(self):
         # Non-administrator user should not be able to list roles
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.list_roles)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_roles_request_without_token(self):
         # Request to list roles without a valid token should fail
         token = self.client.auth_provider.get_token()
@@ -46,19 +46,19 @@
         self.assertRaises(exceptions.Unauthorized, self.client.list_roles)
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_role_create_blank_name(self):
         # Should not be able to create a role with a blank name
         self.assertRaises(exceptions.BadRequest, self.client.create_role, '')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_role_by_unauthorized_user(self):
         # Non-administrator user should not be able to create role
         role_name = data_utils.rand_name(name='role-')
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.create_role, role_name)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_role_request_without_token(self):
         # Request to create role without a valid token should fail
         token = self.client.auth_provider.get_token()
@@ -68,19 +68,18 @@
                           self.client.create_role, role_name)
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_role_create_duplicate(self):
         # Role names should be unique
         role_name = data_utils.rand_name(name='role-dup-')
         resp, body = self.client.create_role(role_name)
         role1_id = body.get('id')
-        self.assertIn('status', resp)
-        self.assertTrue(resp['status'].startswith('2'))
+        self.assertEqual(200, resp.status)
         self.addCleanup(self.client.delete_role, role1_id)
         self.assertRaises(exceptions.Conflict, self.client.create_role,
                           role_name)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_role_by_unauthorized_user(self):
         # Non-administrator user should not be able to delete role
         role_name = data_utils.rand_name(name='role-')
@@ -91,7 +90,7 @@
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.delete_role, role_id)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_role_request_without_token(self):
         # Request to delete role without a valid token should fail
         role_name = data_utils.rand_name(name='role-')
@@ -106,14 +105,14 @@
                           role_id)
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_role_non_existent(self):
         # Attempt to delete a non existent role should fail
         non_existent_role = str(uuid.uuid4().hex)
         self.assertRaises(exceptions.NotFound, self.client.delete_role,
                           non_existent_role)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_assign_user_role_by_unauthorized_user(self):
         # Non-administrator user should not be authorized to
         # assign a role to user
@@ -122,7 +121,7 @@
                           self.non_admin_client.assign_user_role,
                           tenant['id'], user['id'], role['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_assign_user_role_request_without_token(self):
         # Request to assign a role to a user without a valid token
         (user, tenant, role) = self._get_role_params()
@@ -133,7 +132,7 @@
                           user['id'], role['id'])
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_assign_user_role_for_non_existent_role(self):
         # Attempt to assign a non existent role to user should fail
         (user, tenant, role) = self._get_role_params()
@@ -141,7 +140,7 @@
         self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
                           tenant['id'], user['id'], non_existent_role)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_assign_user_role_for_non_existent_tenant(self):
         # Attempt to assign a role on a non existent tenant should fail
         (user, tenant, role) = self._get_role_params()
@@ -149,7 +148,7 @@
         self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
                           non_existent_tenant, user['id'], role['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_assign_duplicate_user_role(self):
         # Duplicate user role should not get assigned
         (user, tenant, role) = self._get_role_params()
@@ -157,7 +156,7 @@
         self.assertRaises(exceptions.Conflict, self.client.assign_user_role,
                           tenant['id'], user['id'], role['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_remove_user_role_by_unauthorized_user(self):
         # Non-administrator user should not be authorized to
         # remove a user's role
@@ -169,7 +168,7 @@
                           self.non_admin_client.remove_user_role,
                           tenant['id'], user['id'], role['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_remove_user_role_request_without_token(self):
         # Request to remove a user's role without a valid token
         (user, tenant, role) = self._get_role_params()
@@ -183,7 +182,7 @@
                           user['id'], role['id'])
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_remove_user_role_non_existent_role(self):
         # Attempt to delete a non existent role from a user should fail
         (user, tenant, role) = self._get_role_params()
@@ -194,7 +193,7 @@
         self.assertRaises(exceptions.NotFound, self.client.remove_user_role,
                           tenant['id'], user['id'], non_existent_role)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_remove_user_role_non_existent_tenant(self):
         # Attempt to remove a role from a non existent tenant should fail
         (user, tenant, role) = self._get_role_params()
@@ -205,7 +204,7 @@
         self.assertRaises(exceptions.NotFound, self.client.remove_user_role,
                           non_existent_tenant, user['id'], role['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_user_roles_by_unauthorized_user(self):
         # Non-administrator user should not be authorized to list
         # a user's roles
@@ -215,7 +214,7 @@
                           self.non_admin_client.list_user_roles, tenant['id'],
                           user['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_user_roles_request_without_token(self):
         # Request to list user's roles without a valid token should fail
         (user, tenant, role) = self._get_role_params()
diff --git a/tempest/api/identity/admin/test_services.py b/tempest/api/identity/admin/test_services.py
index 459c44c..0472e07 100644
--- a/tempest/api/identity/admin/test_services.py
+++ b/tempest/api/identity/admin/test_services.py
@@ -18,7 +18,7 @@
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class ServicesTestJSON(base.BaseIdentityV2AdminTest):
@@ -27,12 +27,12 @@
     def _del_service(self, service_id):
         # Deleting the service created in this method
         resp, _ = self.client.delete_service(service_id)
-        self.assertEqual(resp['status'], '204')
+        self.assertEqual(204, resp.status)
         # Checking whether service is deleted successfully
         self.assertRaises(exceptions.NotFound, self.client.get_service,
                           service_id)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_create_get_delete_service(self):
         # GET Service
         # Creating a Service
@@ -43,7 +43,7 @@
             name, type, description=description)
         self.assertFalse(service_data['id'] is None)
         self.addCleanup(self._del_service, service_data['id'])
-        self.assertTrue(resp['status'].startswith('2'))
+        self.assertEqual(200, resp.status)
         # Verifying response body of create service
         self.assertIn('id', service_data)
         self.assertIn('name', service_data)
@@ -54,7 +54,7 @@
         self.assertEqual(description, service_data['description'])
         # Get service
         resp, fetched_service = self.client.get_service(service_data['id'])
-        self.assertTrue(resp['status'].startswith('2'))
+        self.assertEqual(200, resp.status)
         # verifying the existence of service created
         self.assertIn('id', fetched_service)
         self.assertEqual(fetched_service['id'], service_data['id'])
@@ -66,7 +66,21 @@
         self.assertEqual(fetched_service['description'],
                          service_data['description'])
 
-    @attr(type='smoke')
+    @test.attr(type='gate')
+    def test_create_service_without_description(self):
+        # Create a service only with name and type
+        name = data_utils.rand_name('service-')
+        type = data_utils.rand_name('type--')
+        resp, service = self.client.create_service(name, type)
+        self.assertIn('id', service)
+        self.assertTrue('200', resp['status'])
+        self.addCleanup(self._del_service, service['id'])
+        self.assertIn('name', service)
+        self.assertEqual(name, service['name'])
+        self.assertIn('type', service)
+        self.assertEqual(type, service['type'])
+
+    @test.attr(type='smoke')
     def test_list_services(self):
         # Create, List, Verify and Delete Services
         services = []
@@ -86,7 +100,7 @@
         self.addCleanup(delete_services)
         # List and Verify Services
         resp, body = self.client.list_services()
-        self.assertTrue(resp['status'].startswith('2'))
+        self.assertEqual(200, resp.status)
         found = [service for service in body if service['id'] in service_ids]
         self.assertEqual(len(found), len(services), 'Services not found')
 
diff --git a/tempest/api/identity/admin/test_tenant_negative.py b/tempest/api/identity/admin/test_tenant_negative.py
index 44b54b8..622ad81 100644
--- a/tempest/api/identity/admin/test_tenant_negative.py
+++ b/tempest/api/identity/admin/test_tenant_negative.py
@@ -18,19 +18,19 @@
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class TenantsNegativeTestJSON(base.BaseIdentityV2AdminTest):
     _interface = 'json'
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_tenants_by_unauthorized_user(self):
         # Non-administrator user should not be able to list tenants
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.list_tenants)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_tenant_request_without_token(self):
         # Request to list tenants without a valid token should fail
         token = self.client.auth_provider.get_token()
@@ -38,7 +38,7 @@
         self.assertRaises(exceptions.Unauthorized, self.client.list_tenants)
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_tenant_delete_by_unauthorized_user(self):
         # Non-administrator user should not be able to delete a tenant
         tenant_name = data_utils.rand_name(name='tenant-')
@@ -48,7 +48,7 @@
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.delete_tenant, tenant['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_tenant_delete_request_without_token(self):
         # Request to delete a tenant without a valid token should fail
         tenant_name = data_utils.rand_name(name='tenant-')
@@ -61,13 +61,13 @@
                           tenant['id'])
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_non_existent_tenant(self):
         # Attempt to delete a non existent tenant should fail
         self.assertRaises(exceptions.NotFound, self.client.delete_tenant,
                           str(uuid.uuid4().hex))
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_tenant_create_duplicate(self):
         # Tenant names should be unique
         tenant_name = data_utils.rand_name(name='tenant-')
@@ -82,14 +82,14 @@
         self.assertRaises(exceptions.Conflict, self.client.create_tenant,
                           tenant_name)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_tenant_by_unauthorized_user(self):
         # Non-administrator user should not be authorized to create a tenant
         tenant_name = data_utils.rand_name(name='tenant-')
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.create_tenant, tenant_name)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_tenant_request_without_token(self):
         # Create tenant request without a token should not be authorized
         tenant_name = data_utils.rand_name(name='tenant-')
@@ -99,26 +99,26 @@
                           tenant_name)
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_tenant_with_empty_name(self):
         # Tenant name should not be empty
         self.assertRaises(exceptions.BadRequest, self.client.create_tenant,
                           name='')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_tenants_name_length_over_64(self):
         # Tenant name length should not be greater than 64 characters
         tenant_name = 'a' * 65
         self.assertRaises(exceptions.BadRequest, self.client.create_tenant,
                           tenant_name)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_update_non_existent_tenant(self):
         # Attempt to update a non existent tenant should fail
         self.assertRaises(exceptions.NotFound, self.client.update_tenant,
                           str(uuid.uuid4().hex))
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_tenant_update_by_unauthorized_user(self):
         # Non-administrator user should not be able to update a tenant
         tenant_name = data_utils.rand_name(name='tenant-')
@@ -128,7 +128,7 @@
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.update_tenant, tenant['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_tenant_update_request_without_token(self):
         # Request to update a tenant without a valid token should fail
         tenant_name = data_utils.rand_name(name='tenant-')
diff --git a/tempest/api/identity/admin/test_tenants.py b/tempest/api/identity/admin/test_tenants.py
index 257a6d7..b989664 100644
--- a/tempest/api/identity/admin/test_tenants.py
+++ b/tempest/api/identity/admin/test_tenants.py
@@ -17,13 +17,13 @@
 
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
 
 
 class TenantsTestJSON(base.BaseIdentityV2AdminTest):
     _interface = 'json'
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_tenant_list_delete(self):
         # Create several tenants and delete them
         tenants = []
@@ -35,20 +35,20 @@
             tenants.append(tenant)
         tenant_ids = map(lambda x: x['id'], tenants)
         resp, body = self.client.list_tenants()
-        self.assertTrue(resp['status'].startswith('2'))
+        self.assertEqual(200, resp.status)
         found = [tenant for tenant in body if tenant['id'] in tenant_ids]
         self.assertEqual(len(found), len(tenants), 'Tenants not created')
 
         for tenant in tenants:
             resp, body = self.client.delete_tenant(tenant['id'])
-            self.assertTrue(resp['status'].startswith('2'))
+            self.assertEqual(204, resp.status)
             self.data.tenants.remove(tenant)
 
         resp, body = self.client.list_tenants()
         found = [tenant for tenant in body if tenant['id'] in tenant_ids]
         self.assertFalse(any(found), 'Tenants failed to delete')
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_tenant_create_with_description(self):
         # Create tenant with a description
         tenant_name = data_utils.rand_name(name='tenant-')
@@ -57,10 +57,9 @@
                                                description=tenant_desc)
         tenant = body
         self.data.tenants.append(tenant)
-        st1 = resp['status']
         tenant_id = body['id']
         desc1 = body['description']
-        self.assertTrue(st1.startswith('2'))
+        self.assertEqual(200, resp.status)
         self.assertEqual(desc1, tenant_desc, 'Description should have '
                          'been sent in response for create')
         resp, body = self.client.get_tenant(tenant_id)
@@ -70,7 +69,7 @@
         self.client.delete_tenant(tenant_id)
         self.data.tenants.remove(tenant)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_tenant_create_enabled(self):
         # Create a tenant that is enabled
         tenant_name = data_utils.rand_name(name='tenant-')
@@ -78,9 +77,8 @@
         tenant = body
         self.data.tenants.append(tenant)
         tenant_id = body['id']
-        st1 = resp['status']
         en1 = body['enabled']
-        self.assertTrue(st1.startswith('2'))
+        self.assertEqual(200, resp.status)
         self.assertTrue(en1, 'Enable should be True in response')
         resp, body = self.client.get_tenant(tenant_id)
         en2 = body['enabled']
@@ -88,7 +86,7 @@
         self.client.delete_tenant(tenant_id)
         self.data.tenants.remove(tenant)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_tenant_create_not_enabled(self):
         # Create a tenant that is not enabled
         tenant_name = data_utils.rand_name(name='tenant-')
@@ -96,9 +94,8 @@
         tenant = body
         self.data.tenants.append(tenant)
         tenant_id = body['id']
-        st1 = resp['status']
         en1 = body['enabled']
-        self.assertTrue(st1.startswith('2'))
+        self.assertEqual(200, resp.status)
         self.assertEqual('false', str(en1).lower(),
                          'Enable should be False in response')
         resp, body = self.client.get_tenant(tenant_id)
@@ -108,7 +105,7 @@
         self.client.delete_tenant(tenant_id)
         self.data.tenants.remove(tenant)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_tenant_update_name(self):
         # Update name attribute of a tenant
         t_name1 = data_utils.rand_name(name='tenant-')
@@ -122,9 +119,8 @@
 
         t_name2 = data_utils.rand_name(name='tenant2-')
         resp, body = self.client.update_tenant(t_id, name=t_name2)
-        st2 = resp['status']
         resp2_name = body['name']
-        self.assertTrue(st2.startswith('2'))
+        self.assertEqual(200, resp.status)
         self.assertNotEqual(resp1_name, resp2_name)
 
         resp, body = self.client.get_tenant(t_id)
@@ -137,7 +133,7 @@
         self.client.delete_tenant(t_id)
         self.data.tenants.remove(tenant)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_tenant_update_desc(self):
         # Update description attribute of a tenant
         t_name = data_utils.rand_name(name='tenant-')
@@ -152,9 +148,8 @@
 
         t_desc2 = data_utils.rand_name(name='desc2-')
         resp, body = self.client.update_tenant(t_id, description=t_desc2)
-        st2 = resp['status']
         resp2_desc = body['description']
-        self.assertTrue(st2.startswith('2'))
+        self.assertEqual(200, resp.status)
         self.assertNotEqual(resp1_desc, resp2_desc)
 
         resp, body = self.client.get_tenant(t_id)
@@ -167,7 +162,7 @@
         self.client.delete_tenant(t_id)
         self.data.tenants.remove(tenant)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_tenant_update_enable(self):
         # Update the enabled attribute of a tenant
         t_name = data_utils.rand_name(name='tenant-')
@@ -182,9 +177,8 @@
 
         t_en2 = True
         resp, body = self.client.update_tenant(t_id, enabled=t_en2)
-        st2 = resp['status']
         resp2_en = body['enabled']
-        self.assertTrue(st2.startswith('2'))
+        self.assertEqual(200, resp.status)
         self.assertNotEqual(resp1_en, resp2_en)
 
         resp, body = self.client.get_tenant(t_id)
diff --git a/tempest/api/identity/admin/test_tokens.py b/tempest/api/identity/admin/test_tokens.py
index c931bcf..08e12f0 100644
--- a/tempest/api/identity/admin/test_tokens.py
+++ b/tempest/api/identity/admin/test_tokens.py
@@ -15,13 +15,13 @@
 
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
 
 
 class TokensTestJSON(base.BaseIdentityV2AdminTest):
     _interface = 'json'
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_create_get_delete_token(self):
         # get a token by username and password
         user_name = data_utils.rand_name(name='user-')
@@ -56,7 +56,7 @@
         resp, body = self.client.delete_token(token_id)
         self.assertEqual(resp['status'], '204')
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_rescope_token(self):
         """An unscoped token can be requested, that token can be used to
            request a scoped token.
@@ -72,11 +72,16 @@
         self.assertEqual(200, resp.status)
         self.data.users.append(user)
 
-        # Create a tenant.
-        tenant_name = data_utils.rand_name(name='tenant-')
-        resp, tenant = self.client.create_tenant(tenant_name)
+        # Create a couple tenants.
+        tenant1_name = data_utils.rand_name(name='tenant-')
+        resp, tenant1 = self.client.create_tenant(tenant1_name)
         self.assertEqual(200, resp.status)
-        self.data.tenants.append(tenant)
+        self.data.tenants.append(tenant1)
+
+        tenant2_name = data_utils.rand_name(name='tenant-')
+        resp, tenant2 = self.client.create_tenant(tenant2_name)
+        self.assertEqual(200, resp.status)
+        self.data.tenants.append(tenant2)
 
         # Create a role
         role_name = data_utils.rand_name(name='role-')
@@ -84,8 +89,12 @@
         self.assertEqual(200, resp.status)
         self.data.roles.append(role)
 
-        # Grant the user the role on the tenant.
-        resp, _ = self.client.assign_user_role(tenant['id'], user['id'],
+        # Grant the user the role on the tenants.
+        resp, _ = self.client.assign_user_role(tenant1['id'], user['id'],
+                                               role['id'])
+        self.assertEqual(200, resp.status)
+
+        resp, _ = self.client.assign_user_role(tenant2['id'], user['id'],
                                                role['id'])
         self.assertEqual(200, resp.status)
 
@@ -95,10 +104,20 @@
 
         token_id = body['token']['id']
 
-        # Use the unscoped token to get a scoped token.
-        rsp, body = self.token_client.auth_token(token_id, tenant=tenant_name)
+        # Use the unscoped token to get a token scoped to tenant1
+        rsp, body = self.token_client.auth_token(token_id, tenant=tenant1_name)
         self.assertEqual(200, resp.status)
 
+        scoped_token_id = body['token']['id']
+
+        # Revoke the scoped token
+        resp, body = self.client.delete_token(scoped_token_id)
+        self.assertEqual(204, resp.status)
+
+        # Use the unscoped token to get a token scoped to tenant2
+        rsp, body = self.token_client.auth_token(token_id, tenant=tenant2_name)
+        self.assertEqual(204, resp.status)
+
 
 class TokensTestXML(TokensTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index a4e6c17..e2c1066 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -206,6 +206,25 @@
                          "Failed to find user %s in fetched list" %
                          ', '.join(m_user for m_user in missing_users))
 
+    @test.attr(type='smoke')
+    def test_update_user_password(self):
+        # Test case to check if updating of user password is successful.
+        self.data.setup_test_user()
+        # Updating the user with new password
+        new_pass = data_utils.rand_name('pass-')
+        resp, update_user = self.client.update_user_password(
+            self.data.user['id'], new_pass)
+        # Assert response body of update user.
+        self.assertEqual(200, resp.status)
+        self.assertEqual(update_user['id'], self.data.user['id'])
+
+        # Validate the updated password
+        # Get a token
+        resp, body = self.token_client.auth(self.data.test_user, new_pass,
+                                            self.data.test_tenant)
+        self.assertEqual('200', resp['status'])
+        self.assertTrue('id' in body['token'])
+
 
 class UsersTestXML(UsersTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/identity/admin/test_users_negative.py b/tempest/api/identity/admin/test_users_negative.py
index 4e8ebe5..a584a7b 100644
--- a/tempest/api/identity/admin/test_users_negative.py
+++ b/tempest/api/identity/admin/test_users_negative.py
@@ -18,7 +18,7 @@
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class UsersNegativeTestJSON(base.BaseIdentityV2AdminTest):
@@ -31,7 +31,7 @@
         cls.alt_password = data_utils.rand_name('pass_')
         cls.alt_email = cls.alt_user + '@testmail.tm'
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_user_by_unauthorized_user(self):
         # Non-administrator should not be authorized to create a user
         self.data.setup_test_tenant()
@@ -40,7 +40,7 @@
                           self.alt_password, self.data.tenant['id'],
                           self.alt_email)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_user_with_empty_name(self):
         # User with an empty name should not be created
         self.data.setup_test_tenant()
@@ -48,7 +48,7 @@
                           self.alt_password, self.data.tenant['id'],
                           self.alt_email)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_user_with_name_length_over_255(self):
         # Length of user name filed should be restricted to 255 characters
         self.data.setup_test_tenant()
@@ -56,7 +56,7 @@
                           'a' * 256, self.alt_password,
                           self.data.tenant['id'], self.alt_email)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_user_with_duplicate_name(self):
         # Duplicate user should not be created
         self.data.setup_test_user()
@@ -64,14 +64,14 @@
                           self.data.test_user, self.data.test_password,
                           self.data.tenant['id'], self.data.test_email)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_user_for_non_existent_tenant(self):
         # Attempt to create a user in a non-existent tenant should fail
         self.assertRaises(exceptions.NotFound, self.client.create_user,
                           self.alt_user, self.alt_password, '49ffgg99999',
                           self.alt_email)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_user_request_without_a_token(self):
         # Request to create a user without a valid token should fail
         self.data.setup_test_tenant()
@@ -86,7 +86,7 @@
         # Unset the token to allow further tests to generate a new token
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_user_with_enabled_non_bool(self):
         # Attempt to create a user with valid enabled para should fail
         self.data.setup_test_tenant()
@@ -96,7 +96,7 @@
                           self.data.tenant['id'],
                           self.alt_email, enabled=3)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_update_user_for_non_existent_user(self):
         # Attempt to update a user non-existent user should fail
         user_name = data_utils.rand_name('user-')
@@ -104,7 +104,7 @@
         self.assertRaises(exceptions.NotFound, self.client.update_user,
                           non_existent_id, name=user_name)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_update_user_request_without_a_token(self):
         # Request to update a user without a valid token should fail
 
@@ -118,14 +118,14 @@
         # Unset the token to allow further tests to generate a new token
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_update_user_by_unauthorized_user(self):
         # Non-administrator should not be authorized to update user
         self.data.setup_test_tenant()
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.update_user, self.alt_user)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_users_by_unauthorized_user(self):
         # Non-administrator user should not be authorized to delete a user
         self.data.setup_test_user()
@@ -133,13 +133,13 @@
                           self.non_admin_client.delete_user,
                           self.data.user['id'])
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_non_existent_user(self):
         # Attempt to delete a non-existent user should fail
         self.assertRaises(exceptions.NotFound, self.client.delete_user,
                           'junk12345123')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_delete_user_request_without_a_token(self):
         # Request to delete a user without a valid token should fail
 
@@ -153,7 +153,7 @@
         # Unset the token to allow further tests to generate a new token
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_authentication_for_disabled_user(self):
         # Disabled user's token should not get authenticated
         self.data.setup_test_user()
@@ -163,7 +163,7 @@
                           self.data.test_password,
                           self.data.test_tenant)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_authentication_when_tenant_is_disabled(self):
         # User's token for a disabled tenant should not be authenticated
         self.data.setup_test_user()
@@ -173,7 +173,7 @@
                           self.data.test_password,
                           self.data.test_tenant)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_authentication_with_invalid_tenant(self):
         # User's token for an invalid tenant should not be authenticated
         self.data.setup_test_user()
@@ -182,7 +182,7 @@
                           self.data.test_password,
                           'junktenant1234')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_authentication_with_invalid_username(self):
         # Non-existent user's token should not get authenticated
         self.data.setup_test_user()
@@ -190,7 +190,7 @@
                           'junkuser123', self.data.test_password,
                           self.data.test_tenant)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_authentication_with_invalid_password(self):
         # User's token with invalid password should not be authenticated
         self.data.setup_test_user()
@@ -198,14 +198,14 @@
                           self.data.test_user, 'junkpass1234',
                           self.data.test_tenant)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_users_by_unauthorized_user(self):
         # Non-administrator user should not be authorized to get user list
         self.data.setup_test_user()
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.get_users)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_get_users_request_without_token(self):
         # Request to get list of users without a valid token should fail
         token = self.client.auth_provider.get_token()
@@ -213,7 +213,7 @@
         self.assertRaises(exceptions.Unauthorized, self.client.get_users)
         self.client.auth_provider.clear_auth()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_users_with_invalid_tenant(self):
         # Should not be able to return a list of all
         # users for a non-existent tenant
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 086d235..a1e6cde 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -16,7 +16,7 @@
 
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
 
 
 class DomainsTestJSON(base.BaseIdentityV3AdminTest):
@@ -29,7 +29,7 @@
         resp, _ = self.client.delete_domain(domain_id)
         self.assertEqual(204, resp.status)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_domains(self):
         # Test to list domains
         domain_ids = list()
@@ -49,7 +49,7 @@
         missing_doms = [d for d in domain_ids if d not in fetched_ids]
         self.assertEqual(0, len(missing_doms))
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_create_update_delete_domain(self):
         d_name = data_utils.rand_name('domain-')
         d_desc = data_utils.rand_name('domain-desc-')
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 28615a4..5b46f89 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -18,7 +18,7 @@
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class EndpointsNegativeTestJSON(base.BaseIdentityV3AdminTest):
@@ -45,7 +45,7 @@
             cls.service_client.delete_service(s)
         super(EndpointsNegativeTestJSON, cls).tearDownClass()
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_with_enabled_False(self):
         # Enabled should be a boolean, not a string like 'False'
         interface = 'public'
@@ -55,7 +55,7 @@
                           self.service_id, interface, url, region=region,
                           force_enabled='False')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_create_with_enabled_True(self):
         # Enabled should be a boolean, not a string like 'True'
         interface = 'public'
@@ -79,12 +79,12 @@
         self.assertRaises(exceptions.BadRequest, self.client.update_endpoint,
                           endpoint_for_update['id'], force_enabled=enabled)
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_update_with_enabled_False(self):
         # Enabled should be a boolean, not a string like 'False'
         self._assert_update_raises_bad_request('False')
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_update_with_enabled_True(self):
         # Enabled should be a boolean, not a string like 'True'
         self._assert_update_raises_bad_request('True')
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 3e04b5f..0e79440 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -15,7 +15,7 @@
 
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
 
 
 class PoliciesTestJSON(base.BaseIdentityV3AdminTest):
@@ -25,7 +25,7 @@
         resp, _ = self.policy_client.delete_policy(policy_id)
         self.assertEqual(204, resp.status)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_policies(self):
         # Test to list policies
         policy_ids = list()
@@ -46,7 +46,7 @@
         missing_pols = [p for p in policy_ids if p not in fetched_ids]
         self.assertEqual(0, len(missing_pols))
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_create_update_delete_policy(self):
         # Test to update policy
         blob = data_utils.rand_name('BlobName-')
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
new file mode 100644
index 0000000..03974e4
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -0,0 +1,102 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import exceptions
+from tempest import test
+
+
+class RegionsTestJSON(base.BaseIdentityV3AdminTest):
+    _interface = 'json'
+
+    @classmethod
+    @test.safe_setup
+    def setUpClass(cls):
+        super(RegionsTestJSON, cls).setUpClass()
+        cls.setup_regions = list()
+        cls.client = cls.region_client
+        for i in range(2):
+            r_description = data_utils.rand_name('description-')
+            _, region = cls.client.create_region(r_description)
+            cls.setup_regions.append(region)
+
+    @classmethod
+    def tearDownClass(cls):
+        for r in cls.setup_regions:
+            cls.client.delete_region(r['id'])
+        super(RegionsTestJSON, cls).tearDownClass()
+
+    def _delete_region(self, region_id):
+        resp, _ = self.client.delete_region(region_id)
+        self.assertEqual(204, resp.status)
+        self.assertRaises(exceptions.NotFound,
+                          self.client.get_region, region_id)
+
+    @test.attr(type='gate')
+    def test_create_update_get_delete_region(self):
+        r_description = data_utils.rand_name('description-')
+        resp, region = self.client.create_region(
+            r_description, parent_region_id=self.setup_regions[0]['id'])
+        self.assertEqual(201, resp.status)
+        self.addCleanup(self._delete_region, region['id'])
+        self.assertEqual(r_description, region['description'])
+        self.assertEqual(self.setup_regions[0]['id'],
+                         region['parent_region_id'])
+        # Update region with new description and parent ID
+        r_alt_description = data_utils.rand_name('description-')
+        resp, region = self.client.update_region(
+            region['id'],
+            description=r_alt_description,
+            parent_region_id=self.setup_regions[1]['id'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(r_alt_description, region['description'])
+        self.assertEqual(self.setup_regions[1]['id'],
+                         region['parent_region_id'])
+        # Get the details of region
+        resp, region = self.client.get_region(region['id'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(r_alt_description, region['description'])
+        self.assertEqual(self.setup_regions[1]['id'],
+                         region['parent_region_id'])
+
+    @test.attr(type='smoke')
+    def test_create_region_with_specific_id(self):
+        # Create a region with a specific id
+        r_region_id = data_utils.rand_uuid()
+        r_description = data_utils.rand_name('description-')
+        resp, region = self.client.create_region(
+            r_description, unique_region_id=r_region_id)
+        self.addCleanup(self._delete_region, region['id'])
+        # Asserting Create Region with specific id response body
+        self.assertEqual(201, resp.status)
+        self.assertEqual(r_region_id, region['id'])
+        self.assertEqual(r_description, region['description'])
+
+    @test.attr(type='gate')
+    def test_list_regions(self):
+        # Get a list of regions
+        resp, fetched_regions = self.client.list_regions()
+        self.assertEqual(200, resp.status)
+        missing_regions =\
+            [e for e in self.setup_regions if e not in fetched_regions]
+        # Asserting List Regions response
+        self.assertEqual(0, len(missing_regions),
+                         "Failed to find region %s in fetched list" %
+                         ', '.join(str(e) for e in missing_regions))
+
+
+class RegionsTestXML(RegionsTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 24c7b83..90dccca 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -73,7 +73,7 @@
         self.assertIn(role_id, fetched_role_ids)
 
     @test.attr(type='smoke')
-    def test_role_create_update_get(self):
+    def test_role_create_update_get_list(self):
         r_name = data_utils.rand_name('Role-')
         resp, role = self.client.create_role(r_name)
         self.addCleanup(self.client.delete_role, role['id'])
@@ -94,6 +94,10 @@
         self.assertEqual(new_name, new_role['name'])
         self.assertEqual(updated_role['id'], new_role['id'])
 
+        resp, roles = self.client.list_roles()
+        self.assertEqual(resp['status'], '200')
+        self.assertIn(role['id'], [r['id'] for r in roles])
+
     @test.attr(type='smoke')
     def test_grant_list_revoke_role_to_user_on_project(self):
         resp, _ = self.client.assign_user_role_on_project(
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index c5d4ddf..36e5327 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -16,13 +16,13 @@
 
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
 
 
 class ServicesTestJSON(base.BaseIdentityV3AdminTest):
     _interface = 'json'
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_update_service(self):
         # Update description attribute of service
         name = data_utils.rand_name('service-')
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 9629213..fe3eb03 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -16,13 +16,13 @@
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class TokensV3TestJSON(base.BaseIdentityV3AdminTest):
     _interface = 'json'
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_tokens(self):
         # Valid user's token is authenticated
         # Create a User
@@ -33,15 +33,15 @@
         resp, user = self.client.create_user(
             u_name, description=u_desc, password=u_password,
             email=u_email)
-        self.assertTrue(resp['status'].startswith('2'))
+        self.assertEqual(201, resp.status)
         self.addCleanup(self.client.delete_user, user['id'])
         # Perform Authentication
         resp, body = self.token.auth(user['id'], u_password)
-        self.assertEqual(resp['status'], '201')
+        self.assertEqual(201, resp.status)
         subject_token = resp['x-subject-token']
         # Perform GET Token
         resp, token_details = self.client.get_token(subject_token)
-        self.assertEqual(resp['status'], '200')
+        self.assertEqual(200, resp.status)
         self.assertEqual(resp['x-subject-token'], subject_token)
         self.assertEqual(token_details['user']['id'], user['id'])
         self.assertEqual(token_details['user']['name'], u_name)
@@ -50,6 +50,115 @@
         self.assertRaises(exceptions.NotFound, self.client.get_token,
                           subject_token)
 
+    @test.attr(type='gate')
+    def test_rescope_token(self):
+        """Rescope a token.
+
+        An unscoped token can be requested, that token can be used to request a
+        scoped token. The scoped token can be revoked, and the original token
+        used to get a token in a different project.
+
+        """
+
+        # Create a user.
+        user_name = data_utils.rand_name(name='user-')
+        user_password = data_utils.rand_name(name='pass-')
+        resp, user = self.client.create_user(user_name, password=user_password)
+        self.assertEqual(201, resp.status)
+        self.addCleanup(self.client.delete_user, user['id'])
+
+        # Create a couple projects
+        project1_name = data_utils.rand_name(name='project-')
+        resp, project1 = self.client.create_project(project1_name)
+        self.assertEqual(201, resp.status)
+        self.addCleanup(self.client.delete_project, project1['id'])
+
+        project2_name = data_utils.rand_name(name='project-')
+        resp, project2 = self.client.create_project(project2_name)
+        self.assertEqual(201, resp.status)
+        self.addCleanup(self.client.delete_project, project2['id'])
+
+        # Create a role
+        role_name = data_utils.rand_name(name='role-')
+        resp, role = self.client.create_role(role_name)
+        self.assertEqual(201, resp.status)
+        self.addCleanup(self.client.delete_role, role['id'])
+
+        # Grant the user the role on both projects.
+        resp, _ = self.client.assign_user_role(project1['id'], user['id'],
+                                               role['id'])
+        self.assertEqual(204, resp.status)
+
+        resp, _ = self.client.assign_user_role(project2['id'], user['id'],
+                                               role['id'])
+        self.assertEqual(204, resp.status)
+
+        # Get an unscoped token.
+        resp, token_auth = self.token.auth(user=user['id'],
+                                           password=user_password)
+        self.assertEqual(201, resp.status)
+
+        token_id = resp['x-subject-token']
+        orig_expires_at = token_auth['token']['expires_at']
+        orig_issued_at = token_auth['token']['issued_at']
+        orig_user = token_auth['token']['user']
+
+        self.assertIsInstance(token_auth['token']['expires_at'], unicode)
+        self.assertIsInstance(token_auth['token']['issued_at'], unicode)
+        self.assertEqual(['password'], token_auth['token']['methods'])
+        self.assertEqual(user['id'], token_auth['token']['user']['id'])
+        self.assertEqual(user['name'], token_auth['token']['user']['name'])
+        self.assertEqual('default',
+                         token_auth['token']['user']['domain']['id'])
+        self.assertEqual('Default',
+                         token_auth['token']['user']['domain']['name'])
+        self.assertNotIn('catalog', token_auth['token'])
+        self.assertNotIn('project', token_auth['token'])
+        self.assertNotIn('roles', token_auth['token'])
+
+        # Use the unscoped token to get a scoped token.
+        resp, token_auth = self.token.auth(token=token_id,
+                                           tenant=project1_name,
+                                           domain='Default')
+        token1_id = resp['x-subject-token']
+        self.assertEqual(201, resp.status)
+
+        self.assertEqual(orig_expires_at, token_auth['token']['expires_at'],
+                         'Expiration time should match original token')
+        self.assertIsInstance(token_auth['token']['issued_at'], unicode)
+        self.assertNotEqual(orig_issued_at, token_auth['token']['issued_at'])
+        self.assertEqual(set(['password', 'token']),
+                         set(token_auth['token']['methods']))
+        self.assertEqual(orig_user, token_auth['token']['user'],
+                         'User should match original token')
+        self.assertIsInstance(token_auth['token']['catalog'], list)
+        self.assertEqual(project1['id'],
+                         token_auth['token']['project']['id'])
+        self.assertEqual(project1['name'],
+                         token_auth['token']['project']['name'])
+        self.assertEqual('default',
+                         token_auth['token']['project']['domain']['id'])
+        self.assertEqual('Default',
+                         token_auth['token']['project']['domain']['name'])
+        self.assertEqual(1, len(token_auth['token']['roles']))
+        self.assertEqual(role['id'], token_auth['token']['roles'][0]['id'])
+        self.assertEqual(role['name'], token_auth['token']['roles'][0]['name'])
+
+        # Revoke the unscoped token.
+        resp, _ = self.client.delete_token(token1_id)
+        self.assertEqual(204, resp.status)
+
+        # Now get another scoped token using the unscoped token.
+        resp, token_auth = self.token.auth(token=token_id,
+                                           tenant=project2_name,
+                                           domain='Default')
+        self.assertEqual(201, resp.status)
+
+        self.assertEqual(project2['id'],
+                         token_auth['token']['project']['id'])
+        self.assertEqual(project2['name'],
+                         token_auth['token']['project']['name'])
+
 
 class TokensV3TestXML(TokensV3TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index cae20ad..8e3a7d1 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -13,6 +13,7 @@
 import datetime
 import re
 from tempest.api.identity import base
+from tempest import auth
 from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import config
@@ -88,10 +89,13 @@
         self.assertIsNotNone(self.trustee_user_id)
 
         # Initialize a new client with the trustor credentials
-        os = clients.Manager(username=self.trustor_username,
-                             password=self.trustor_password,
-                             tenant_name=self.trustor_project_name,
-                             interface=self._interface)
+        creds = auth.get_credentials(
+            username=self.trustor_username,
+            password=self.trustor_password,
+            tenant_name=self.trustor_project_name)
+        os = clients.Manager(
+            credentials=creds,
+            interface=self._interface)
         self.trustor_client = os.identity_v3_client
 
     def cleanup_user_and_roles(self):
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index e1d1543..7316c7f 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -15,13 +15,13 @@
 
 from tempest.api.identity import base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
 
 
 class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
     _interface = 'json'
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_user_update(self):
         # Test case to check if updating of user attributes is successful.
         # Creating first user
@@ -66,7 +66,7 @@
         self.assertEqual(u_email2, new_user_get['email'])
         self.assertEqual('false', str(new_user_get['enabled']).lower())
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_list_user_projects(self):
         # List the projects that a user has access upon
         assigned_project_ids = list()
@@ -120,7 +120,7 @@
                          ', '.join(m_project for m_project
                                    in missing_projects))
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_get_user(self):
         # Get a user detail
         self.data.setup_test_v3_user()
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index a5bf248..697057f 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 
+from tempest import auth
 from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import config
@@ -95,6 +96,7 @@
         cls.client = cls.os_adm.identity_v3_client
         cls.token = cls.os_adm.token_v3_client
         cls.endpoints_client = cls.os_adm.endpoints_client
+        cls.region_client = cls.os_adm.region_client
         cls.data = DataGenerator(cls.client)
         cls.non_admin_client = cls.os.identity_v3_client
         cls.service_client = cls.os_adm.service_client
@@ -120,6 +122,14 @@
             self.projects = []
             self.v3_roles = []
 
+        @property
+        def test_credentials(self):
+            return auth.get_credentials(username=self.test_user,
+                                        user_id=self.user['id'],
+                                        password=self.test_password,
+                                        tenant_name=self.test_tenant,
+                                        tenant_id=self.tenant['id'])
+
         def setup_test_user(self):
             """Set up a test user."""
             self.setup_test_tenant()
diff --git a/tempest/api/identity/test_extension.py b/tempest/api/identity/test_extension.py
new file mode 100644
index 0000000..67f20f4
--- /dev/null
+++ b/tempest/api/identity/test_extension.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.identity import base
+from tempest import test
+
+
+class ExtensionTestJSON(base.BaseIdentityV2AdminTest):
+    _interface = 'json'
+
+    @test.attr(type='gate')
+    def test_list_extensions(self):
+        # List all the extensions
+        resp, body = self.non_admin_client.list_extensions()
+        self.assertEqual(200, resp.status)
+        self.assertNotEmpty(body)
+        keys = ['name', 'updated', 'alias', 'links',
+                'namespace', 'description']
+        for value in body:
+            for key in keys:
+                self.assertIn(key, value)
+
+
+class ExtensionTestXML(ExtensionTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index e439238..9981292 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -42,11 +42,7 @@
             skip_msg = ("%s skipped as glance is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
         if CONF.compute.allow_tenant_isolation:
-            creds = cls.isolated_creds.get_primary_creds()
-            username, tenant_name, password = creds
-            cls.os = clients.Manager(username=username,
-                                     password=password,
-                                     tenant_name=tenant_name)
+            cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
         else:
             cls.os = clients.Manager()
 
@@ -96,19 +92,12 @@
     def setUpClass(cls):
         super(BaseV1ImageMembersTest, cls).setUpClass()
         if CONF.compute.allow_tenant_isolation:
-            creds = cls.isolated_creds.get_alt_creds()
-            username, tenant_name, password = creds
-            cls.os_alt = clients.Manager(username=username,
-                                         password=password,
-                                         tenant_name=tenant_name)
-            cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
+            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
         else:
             cls.os_alt = clients.AltManager()
-            identity_client = cls._get_identity_admin_client()
-            cls.alt_tenant_id = identity_client.get_tenant_by_name(
-                cls.os_alt.credentials['tenant_name'])['id']
 
         cls.alt_img_cli = cls.os_alt.image_client
+        cls.alt_tenant_id = cls.alt_img_cli.tenant_id
 
     def _create_image(self):
         image_file = StringIO.StringIO('*' * 1024)
@@ -139,20 +128,12 @@
         super(BaseV2MemberImageTest, cls).setUpClass()
         if CONF.compute.allow_tenant_isolation:
             creds = cls.isolated_creds.get_alt_creds()
-            username, tenant_name, password = creds
-            cls.os_alt = clients.Manager(username=username,
-                                         password=password,
-                                         tenant_name=tenant_name,
-                                         interface=cls._interface)
-            cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
+            cls.os_alt = clients.Manager(creds)
         else:
             cls.os_alt = clients.AltManager()
-            alt_tenant_name = cls.os_alt.credentials['tenant_name']
-            identity_client = cls._get_identity_admin_client()
-            cls.alt_tenant_id = identity_client.get_tenant_by_name(
-                alt_tenant_name)['id']
         cls.os_img_client = cls.os.image_client_v2
         cls.alt_img_client = cls.os_alt.image_client_v2
+        cls.alt_tenant_id = cls.alt_img_client.tenant_id
 
     def _list_image_ids_as_alt(self):
         _, image_list = self.alt_img_client.image_list()
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 517123d..2df3f7f 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -33,12 +33,12 @@
         resp, body = self.create_image(name='New Name',
                                        container_format='bare',
                                        disk_format='raw',
-                                       is_public=True,
+                                       is_public=False,
                                        properties=properties)
         self.assertIn('id', body)
         image_id = body.get('id')
         self.assertEqual('New Name', body.get('name'))
-        self.assertTrue(body.get('is_public'))
+        self.assertFalse(body.get('is_public'))
         self.assertEqual('queued', body.get('status'))
         for key, val in properties.items():
             self.assertEqual(val, body.get('properties')[key])
@@ -54,14 +54,14 @@
         # Register a new remote image
         resp, body = self.create_image(name='New Remote Image',
                                        container_format='bare',
-                                       disk_format='raw', is_public=True,
+                                       disk_format='raw', is_public=False,
                                        location='http://example.com'
                                                 '/someimage.iso',
                                        properties={'key1': 'value1',
                                                    'key2': 'value2'})
         self.assertIn('id', body)
         self.assertEqual('New Remote Image', body.get('name'))
-        self.assertTrue(body.get('is_public'))
+        self.assertFalse(body.get('is_public'))
         self.assertEqual('active', body.get('status'))
         properties = body.get('properties')
         self.assertEqual(properties['key1'], 'value1')
@@ -71,12 +71,12 @@
     def test_register_http_image(self):
         resp, body = self.create_image(name='New Http Image',
                                        container_format='bare',
-                                       disk_format='raw', is_public=True,
+                                       disk_format='raw', is_public=False,
                                        copy_from=CONF.image.http_image)
         self.assertIn('id', body)
         image_id = body.get('id')
         self.assertEqual('New Http Image', body.get('name'))
-        self.assertTrue(body.get('is_public'))
+        self.assertFalse(body.get('is_public'))
         self.client.wait_for_image_status(image_id, 'active')
         resp, body = self.client.get_image(image_id)
         self.assertEqual(resp['status'], '200')
@@ -88,12 +88,12 @@
         resp, body = self.create_image(name='New_image_with_min_ram',
                                        container_format='bare',
                                        disk_format='raw',
-                                       is_public=True,
+                                       is_public=False,
                                        min_ram=40,
                                        properties=properties)
         self.assertIn('id', body)
         self.assertEqual('New_image_with_min_ram', body.get('name'))
-        self.assertTrue(body.get('is_public'))
+        self.assertFalse(body.get('is_public'))
         self.assertEqual('queued', body.get('status'))
         self.assertEqual(40, body.get('min_ram'))
         for key, val in properties.items():
@@ -109,6 +109,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ListImagesTest, cls).setUpClass()
         # We add a few images here to test the listing functionality of
@@ -146,7 +147,7 @@
         resp, image = cls.create_image(name=name,
                                        container_format=container_format,
                                        disk_format=disk_format,
-                                       is_public=True,
+                                       is_public=False,
                                        location=location)
         image_id = image['id']
         return image_id
@@ -164,7 +165,7 @@
         resp, image = cls.create_image(name=name,
                                        container_format=container_format,
                                        disk_format=disk_format,
-                                       is_public=True, data=image_file)
+                                       is_public=False, data=image_file)
         image_id = image['id']
         return image_id
 
@@ -244,12 +245,19 @@
 
 class ListSnapshotImagesTest(base.BaseV1ImageTest):
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
-        super(ListSnapshotImagesTest, cls).setUpClass()
+        # This test class only uses nova v3 api to create snapshot
+        # as the similar test which uses nova v2 api already exists
+        # in nova v2 compute images api tests.
+        # Since nova v3 doesn't have images api proxy, this test
+        # class was added in the image api tests.
         if not CONF.compute_feature_enabled.api_v3:
-            cls.servers_client = cls.os.servers_client
-        else:
-            cls.servers_client = cls.os.servers_v3_client
+            skip_msg = ("%s skipped as nova v3 api is not available" %
+                        cls.__name__)
+            raise cls.skipException(skip_msg)
+        super(ListSnapshotImagesTest, cls).setUpClass()
+        cls.servers_client = cls.os.servers_v3_client
         cls.servers = []
         # We add a few images here to test the listing functionality of
         # the images API
@@ -262,13 +270,13 @@
         resp, image = cls.create_image(name="Standard Image",
                                        container_format='ami',
                                        disk_format='ami',
-                                       is_public=True, data=image_file)
+                                       is_public=False, data=image_file)
         cls.image_id = image['id']
         cls.client.wait_for_image_status(image['id'], 'active')
 
     @classmethod
     def tearDownClass(cls):
-        for server in cls.servers:
+        for server in getattr(cls, "servers", []):
             cls.servers_client.delete_server(server['id'])
         super(ListSnapshotImagesTest, cls).tearDownClass()
 
@@ -279,8 +287,7 @@
         cls.servers.append(server)
         cls.servers_client.wait_for_server_status(
             server['id'], 'ACTIVE')
-        resp, image = cls.servers_client.create_image(
-            server['id'], name)
+        resp, _ = cls.servers_client.create_image(server['id'], name)
         image_id = data_utils.parse_image_id(resp['location'])
         cls.created_images.append(image_id)
         cls.client.wait_for_image_status(image_id,
@@ -288,6 +295,7 @@
         return image_id
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_server_id(self):
         # The images should contain images filtered by server id
         resp, images = self.client.image_list_detail(
@@ -297,6 +305,7 @@
         self.assertEqual(self.snapshot_set, result_set)
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_type(self):
         # The list of servers should be filtered by image type
         params = {'image_type': 'snapshot'}
@@ -307,6 +316,7 @@
         self.assertIn(self.snapshot, result_set)
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_limit(self):
         # Verify only the expected number of results are returned
         resp, images = self.client.image_list_detail(limit=1)
@@ -315,6 +325,7 @@
         self.assertEqual(1, len(images))
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_by_change_since(self):
         # Verify an update image is returned
         # Becoming ACTIVE will modify the updated time
@@ -350,7 +361,7 @@
         resp, image = cls.create_image(name=name,
                                        container_format=container_format,
                                        disk_format=disk_format,
-                                       is_public=True, data=image_file,
+                                       is_public=False, data=image_file,
                                        properties={'key1': 'value1'})
         image_id = image['id']
         return image_id
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index abde8f7..37dc163 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -35,17 +35,19 @@
         upload the image file, get image and get image file api's
         """
 
+        uuid = '00000000-1111-2222-3333-444455556666'
         image_name = data_utils.rand_name('image')
         resp, body = self.create_image(name=image_name,
                                        container_format='bare',
                                        disk_format='raw',
-                                       visibility='public')
+                                       visibility='private',
+                                       ramdisk_id=uuid)
         self.assertIn('id', body)
         image_id = body.get('id')
         self.assertIn('name', body)
         self.assertEqual(image_name, body['name'])
         self.assertIn('visibility', body)
-        self.assertEqual('public', body['visibility'])
+        self.assertEqual('private', body['visibility'])
         self.assertIn('status', body)
         self.assertEqual('queued', body['status'])
 
@@ -60,6 +62,7 @@
         self.assertEqual(200, resp.status)
         self.assertEqual(image_id, body['id'])
         self.assertEqual(image_name, body['name'])
+        self.assertEqual(uuid, body['ramdisk_id'])
         self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
 
@@ -77,7 +80,7 @@
         resp, body = self.client.create_image(name=image_name,
                                               container_format='bare',
                                               disk_format='raw',
-                                              visibility='public')
+                                              visibility='private')
         self.assertEqual(201, resp.status)
         image_id = body['id']
 
@@ -99,7 +102,7 @@
         resp, body = self.client.create_image(name=image_name,
                                               container_format='bare',
                                               disk_format='iso',
-                                              visibility='public')
+                                              visibility='private')
         self.assertEqual(201, resp.status)
         self.addCleanup(self.client.delete_image, body['id'])
         self.assertEqual('queued', body['status'])
@@ -113,10 +116,8 @@
 
         # Update Image
         new_image_name = data_utils.rand_name('new-image')
-        new_visibility = 'private'
         resp, body = self.client.update_image(image_id, [
-            dict(replace='/name', value=new_image_name),
-            dict(replace='/visibility', value=new_visibility)])
+            dict(replace='/name', value=new_image_name)])
 
         self.assertEqual(200, resp.status)
 
@@ -126,7 +127,6 @@
         self.assertEqual(200, resp.status)
         self.assertEqual(image_id, body['id'])
         self.assertEqual(new_image_name, body['name'])
-        self.assertEqual(new_visibility, body['visibility'])
 
 
 class ListImagesTest(base.BaseV2ImageTest):
@@ -135,6 +135,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ListImagesTest, cls).setUpClass()
         # We add a few images here to test the listing functionality of
@@ -159,7 +160,7 @@
         resp, body = cls.create_image(name=name,
                                       container_format=container_format,
                                       disk_format=disk_format,
-                                      visibility='public')
+                                      visibility='private')
         image_id = body['id']
         resp, body = cls.client.store_image(image_id, data=image_file)
 
@@ -201,8 +202,8 @@
 
     @test.attr(type='gate')
     def test_list_images_param_visibility(self):
-        # Test to get all images with visibility = public
-        params = {"visibility": "public"}
+        # Test to get all images with visibility = private
+        params = {"visibility": "private"}
         self._list_by_param_value_and_assert(params)
 
     @test.attr(type='gate')
diff --git a/tempest/api/image/v2/test_images_tags.py b/tempest/api/image/v2/test_images_tags.py
index 504c0e8..dec3353 100644
--- a/tempest/api/image/v2/test_images_tags.py
+++ b/tempest/api/image/v2/test_images_tags.py
@@ -23,7 +23,7 @@
     def test_update_delete_tags_for_image(self):
         resp, body = self.create_image(container_format='bare',
                                        disk_format='raw',
-                                       visibility='public')
+                                       visibility='private')
         image_id = body['id']
         tag = data_utils.rand_name('tag-')
         self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/image/v2/test_images_tags_negative.py b/tempest/api/image/v2/test_images_tags_negative.py
index 3233db7..13cfa0a 100644
--- a/tempest/api/image/v2/test_images_tags_negative.py
+++ b/tempest/api/image/v2/test_images_tags_negative.py
@@ -35,7 +35,7 @@
         # Delete non existing tag.
         resp, body = self.create_image(container_format='bare',
                                        disk_format='raw',
-                                       is_public=True,
+                                       visibility='private'
                                        )
         image_id = body['id']
         tag = data_utils.rand_name('non-exist-tag-')
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index 342bc6a..b848994 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -37,8 +37,10 @@
         agents = body['agents']
         # Hearthbeats must be excluded from comparison
         self.agent.pop('heartbeat_timestamp', None)
+        self.agent.pop('configurations', None)
         for agent in agents:
             agent.pop('heartbeat_timestamp', None)
+            agent.pop('configurations', None)
         self.assertIn(self.agent, agents)
 
     @test.attr(type=['smoke'])
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 0e601d1..25e1cc0 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -20,6 +20,7 @@
     _interface = 'json'
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(DHCPAgentSchedulersTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
@@ -59,17 +60,31 @@
         return network_id in network_ids
 
     @test.attr(type='smoke')
-    def test_remove_network_from_dhcp_agent(self):
+    def test_add_remove_network_from_dhcp_agent(self):
         # The agent is now bound to the network, we can free the port
         self.client.delete_port(self.port['id'])
         self.ports.remove(self.port)
-        resp, body = self.admin_client.list_dhcp_agent_hosting_network(
-            self.network['id'])
+        agent = dict()
+        agent['agent_type'] = None
+        resp, body = self.admin_client.list_agents()
         agents = body['agents']
-        self.assertIsNotNone(agents)
-        # Get an agent.
-        agent = agents[0]
-        network_id = self.network['id']
+        for a in agents:
+            if a['agent_type'] == 'DHCP agent':
+                agent = a
+                break
+        self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '
+                         'DHCP agent in agent list though dhcp_agent_scheduler'
+                         ' is enabled.')
+        network = self.create_network()
+        network_id = network['id']
+        if self._check_network_in_dhcp_agent(network_id, agent):
+            self._remove_network_from_dhcp_agent(network_id, agent)
+            self._add_dhcp_agent_to_network(network_id, agent)
+        else:
+            self._add_dhcp_agent_to_network(network_id, agent)
+            self._remove_network_from_dhcp_agent(network_id, agent)
+
+    def _remove_network_from_dhcp_agent(self, network_id, agent):
         resp, body = self.admin_client.remove_network_from_dhcp_agent(
             agent_id=agent['id'],
             network_id=network_id)
@@ -77,6 +92,13 @@
         self.assertFalse(self._check_network_in_dhcp_agent(
             network_id, agent))
 
+    def _add_dhcp_agent_to_network(self, network_id, agent):
+        resp, body = self.admin_client.add_dhcp_agent_to_network(
+            agent['id'], network_id)
+        self.assertEqual(resp['status'], '201')
+        self.assertTrue(self._check_network_in_dhcp_agent(
+            network_id, agent))
+
 
 class DHCPAgentSchedulersTestXML(DHCPAgentSchedulersTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
new file mode 100644
index 0000000..5728432
--- /dev/null
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -0,0 +1,72 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.network import base
+from tempest import clients
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest):
+    _interface = 'json'
+    force_tenant_isolation = True
+
+    @classmethod
+    def setUpClass(cls):
+        super(FloatingIPAdminTestJSON, cls).setUpClass()
+        cls.ext_net_id = CONF.network.public_network_id
+        cls.floating_ip = cls.create_floatingip(cls.ext_net_id)
+        cls.alt_manager = clients.Manager(cls.isolated_creds.get_alt_creds())
+        cls.alt_client = cls.alt_manager.network_client
+
+    @test.attr(type='smoke')
+    def test_list_floating_ips_from_admin_and_nonadmin(self):
+        # Create floating ip from admin user
+        resp, floating_ip_admin = self.admin_client.create_floatingip(
+            floating_network_id=self.ext_net_id)
+        self.assertEqual('201', resp['status'])
+        self.addCleanup(self.admin_client.delete_floatingip,
+                        floating_ip_admin['floatingip']['id'])
+        # Create floating ip from alt user
+        resp, body = self.alt_client.create_floatingip(
+            floating_network_id=self.ext_net_id)
+        self.assertEqual('201', resp['status'])
+        floating_ip_alt = body['floatingip']
+        self.addCleanup(self.alt_client.delete_floatingip,
+                        floating_ip_alt['id'])
+        # List floating ips from admin
+        resp, body = self.admin_client.list_floatingips()
+        self.assertEqual('200', resp['status'])
+        floating_ip_ids_admin = [f['id'] for f in body['floatingips']]
+        # Check that admin sees all floating ips
+        self.assertIn(self.floating_ip['id'], floating_ip_ids_admin)
+        self.assertIn(floating_ip_admin['floatingip']['id'],
+                      floating_ip_ids_admin)
+        self.assertIn(floating_ip_alt['id'], floating_ip_ids_admin)
+        # List floating ips from nonadmin
+        resp, body = self.client.list_floatingips()
+        floating_ip_ids = [f['id'] for f in body['floatingips']]
+        # Check that nonadmin user doesn't see floating ip created from admin
+        # and floating ip that is created in another tenant (alt user)
+        self.assertIn(self.floating_ip['id'], floating_ip_ids)
+        self.assertNotIn(floating_ip_admin['floatingip']['id'],
+                         floating_ip_ids)
+        self.assertNotIn(floating_ip_alt['id'], floating_ip_ids)
+
+
+class FloatingIPAdminTestXML(FloatingIPAdminTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index f4050c5..3b05f42 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -28,8 +28,9 @@
         List L3 agents hosting the given router.
         Add and Remove Router to L3 agent
 
-    v2.0 of the Neutron API is assumed. It is also assumed that the following
-    options are defined in the [network] section of etc/tempest.conf:
+    v2.0 of the Neutron API is assumed.
+
+    The l3_agent_scheduler extension is required for these tests.
     """
 
     @classmethod
diff --git a/tempest/api/network/admin/test_lbaas_agent_scheduler.py b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
index a5ba90f..675c62d 100644
--- a/tempest/api/network/admin/test_lbaas_agent_scheduler.py
+++ b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
@@ -35,6 +35,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(LBaaSAgentSchedulerTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('lbaas_agent_scheduler', 'network'):
diff --git a/tempest/api/network/admin/test_load_balancer_admin_actions.py b/tempest/api/network/admin/test_load_balancer_admin_actions.py
index 34a8e32..fe4fc60 100644
--- a/tempest/api/network/admin/test_load_balancer_admin_actions.py
+++ b/tempest/api/network/admin/test_load_balancer_admin_actions.py
@@ -29,6 +29,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(LoadBalancerAdminTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('lbaas', 'network'):
@@ -37,11 +38,11 @@
         cls.force_tenant_isolation = True
         manager = cls.get_client_manager()
         cls.client = manager.network_client
-        username, tenant_name, passwd = cls.isolated_creds.get_primary_creds()
-        cls.tenant_id = cls.os_adm.identity_client.get_tenant_by_name(
-            tenant_name)['id']
+        cls.tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
+        cls.pool = cls.create_pool(data_utils.rand_name('pool-'),
+                                   "ROUND_ROBIN", "HTTP", cls.subnet)
 
     @test.attr(type='smoke')
     def test_create_vip_as_admin_for_another_tenant(self):
@@ -89,6 +90,29 @@
         show_health_monitor = body['health_monitor']
         self.assertEqual(health_monitor['id'], show_health_monitor['id'])
 
+    @test.attr(type='smoke')
+    def test_create_pool_from_admin_user_other_tenant(self):
+        resp, body = self.admin_client.create_pool(
+            name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN",
+            protocol="HTTP", subnet_id=self.subnet['id'],
+            tenant_id=self.tenant_id)
+        self.assertEqual('201', resp['status'])
+        pool = body['pool']
+        self.addCleanup(self.admin_client.delete_pool, pool['id'])
+        self.assertIsNotNone(pool['id'])
+        self.assertEqual(self.tenant_id, pool['tenant_id'])
+
+    @test.attr(type='smoke')
+    def test_create_member_from_admin_user_other_tenant(self):
+        resp, body = self.admin_client.create_member(
+            address="10.0.9.47", protocol_port=80, pool_id=self.pool['id'],
+            tenant_id=self.tenant_id)
+        self.assertEqual('201', resp['status'])
+        member = body['member']
+        self.addCleanup(self.admin_client.delete_member, member['id'])
+        self.assertIsNotNone(member['id'])
+        self.assertEqual(self.tenant_id, member['tenant_id'])
+
 
 class LoadBalancerAdminTestXML(LoadBalancerAdminTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index f92ad68..cc768fd 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -79,9 +79,21 @@
         cls.floating_ips = []
         cls.metering_labels = []
         cls.metering_label_rules = []
+        cls.fw_rules = []
+        cls.fw_policies = []
+        cls.ipsecpolicies = []
 
     @classmethod
     def tearDownClass(cls):
+        # Clean up ipsec policies
+        for ipsecpolicy in cls.ipsecpolicies:
+            cls.client.delete_ipsecpolicy(ipsecpolicy['id'])
+        # Clean up firewall policies
+        for fw_policy in cls.fw_policies:
+            cls.client.delete_firewall_policy(fw_policy['id'])
+        # Clean up firewall rules
+        for fw_rule in cls.fw_rules:
+            cls.client.delete_firewall_rule(fw_rule['id'])
         # Clean up ike policies
         for ikepolicy in cls.ikepolicies:
             cls.client.delete_ikepolicy(ikepolicy['id'])
@@ -93,12 +105,8 @@
             cls.client.delete_floatingip(floating_ip['id'])
         # Clean up routers
         for router in cls.routers:
-            resp, body = cls.client.list_router_interfaces(router['id'])
-            interfaces = body['ports']
-            for i in interfaces:
-                cls.client.remove_router_interface_with_subnet_id(
-                    router['id'], i['fixed_ips'][0]['subnet_id'])
-            cls.client.delete_router(router['id'])
+            cls.delete_router(router)
+
         # Clean up health monitors
         for health_monitor in cls.health_monitors:
             cls.client.delete_health_monitor(health_monitor['id'])
@@ -141,25 +149,31 @@
         return network
 
     @classmethod
-    def create_subnet(cls, network):
+    def create_subnet(cls, network, gateway=None, cidr=None, mask_bits=None):
         """Wrapper utility that returns a test subnet."""
         # The cidr and mask_bits depend on the ip version.
         if cls._ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
-            mask_bits = CONF.network.tenant_network_mask_bits
+            cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+            mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
         elif cls._ip_version == 6:
-            cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
-            mask_bits = CONF.network.tenant_network_v6_mask_bits
+            cidr = (
+                cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
+            mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
         # Find a cidr that is not in use yet and create a subnet with it
         for subnet_cidr in cidr.subnet(mask_bits):
+            if not gateway:
+                gateway = str(netaddr.IPAddress(subnet_cidr) + 1)
             try:
                 resp, body = cls.client.create_subnet(
                     network_id=network['id'],
                     cidr=str(subnet_cidr),
-                    ip_version=cls._ip_version)
+                    ip_version=cls._ip_version,
+                    gateway_ip=gateway)
                 break
             except exceptions.BadRequest as e:
                 is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+                # Unset gateway value if there is an overlapping subnet
+                gateway = None
                 if not is_overlapping_cidr:
                     raise
         else:
@@ -170,14 +184,22 @@
         return subnet
 
     @classmethod
-    def create_port(cls, network):
+    def create_port(cls, network, **kwargs):
         """Wrapper utility that returns a test port."""
-        resp, body = cls.client.create_port(network_id=network['id'])
+        resp, body = cls.client.create_port(network_id=network['id'],
+                                            **kwargs)
         port = body['port']
         cls.ports.append(port)
         return port
 
     @classmethod
+    def update_port(cls, port, **kwargs):
+        """Wrapper utility that updates a test port."""
+        resp, body = cls.client.update_port(port['id'],
+                                            **kwargs)
+        return body['port']
+
+    @classmethod
     def create_router(cls, router_name=None, admin_state_up=False,
                       external_network_id=None, enable_snat=None):
         ext_gw_info = {}
@@ -282,7 +304,7 @@
     def create_vpnservice(cls, subnet_id, router_id):
         """Wrapper utility that returns a test vpn service."""
         resp, body = cls.client.create_vpnservice(
-            subnet_id, router_id, admin_state_up=True,
+            subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
             name=data_utils.rand_name("vpnservice-"))
         vpnservice = body['vpnservice']
         cls.vpnservices.append(vpnservice)
@@ -291,11 +313,48 @@
     @classmethod
     def create_ikepolicy(cls, name):
         """Wrapper utility that returns a test ike policy."""
-        resp, body = cls.client.create_ikepolicy(name)
+        resp, body = cls.client.create_ikepolicy(name=name)
         ikepolicy = body['ikepolicy']
         cls.ikepolicies.append(ikepolicy)
         return ikepolicy
 
+    @classmethod
+    def create_firewall_rule(cls, action, protocol):
+        """Wrapper utility that returns a test firewall rule."""
+        resp, body = cls.client.create_firewall_rule(
+            name=data_utils.rand_name("fw-rule"),
+            action=action,
+            protocol=protocol)
+        fw_rule = body['firewall_rule']
+        cls.fw_rules.append(fw_rule)
+        return fw_rule
+
+    @classmethod
+    def create_firewall_policy(cls):
+        """Wrapper utility that returns a test firewall policy."""
+        resp, body = cls.client.create_firewall_policy(
+            name=data_utils.rand_name("fw-policy"))
+        fw_policy = body['firewall_policy']
+        cls.fw_policies.append(fw_policy)
+        return fw_policy
+
+    @classmethod
+    def delete_router(cls, router):
+        resp, body = cls.client.list_router_interfaces(router['id'])
+        interfaces = body['ports']
+        for i in interfaces:
+            cls.client.remove_router_interface_with_subnet_id(
+                router['id'], i['fixed_ips'][0]['subnet_id'])
+        cls.client.delete_router(router['id'])
+
+    @classmethod
+    def create_ipsecpolicy(cls, name):
+        """Wrapper utility that returns a test ipsec policy."""
+        _, body = cls.client.create_ipsecpolicy(name=name)
+        ipsecpolicy = body['ipsecpolicy']
+        cls.ipsecpolicies.append(ipsecpolicy)
+        return ipsecpolicy
+
 
 class BaseAdminNetworkTest(BaseNetworkTest):
 
@@ -311,11 +370,7 @@
             raise cls.skipException(msg)
         if (CONF.compute.allow_tenant_isolation or
             cls.force_tenant_isolation is True):
-            creds = cls.isolated_creds.get_admin_creds()
-            admin_username, admin_tenant_name, admin_password = creds
-            cls.os_adm = clients.Manager(username=admin_username,
-                                         password=admin_password,
-                                         tenant_name=admin_tenant_name,
+            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
                                          interface=cls._interface)
         else:
             cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
diff --git a/tempest/api/network/base_routers.py b/tempest/api/network/base_routers.py
index b278002..1303bcf 100644
--- a/tempest/api/network/base_routers.py
+++ b/tempest/api/network/base_routers.py
@@ -37,6 +37,15 @@
             routers_list.append(router['id'])
         self.assertNotIn(router_id, routers_list)
 
+    def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
+        resp, interface = self.client.add_router_interface_with_subnet_id(
+            router_id, subnet_id)
+        self.assertEqual('200', resp['status'])
+        self.addCleanup(self._remove_router_interface_with_subnet_id,
+                        router_id, subnet_id)
+        self.assertEqual(subnet_id, interface['subnet_id'])
+        return interface
+
     def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
         resp, body = self.client.remove_router_interface_with_subnet_id(
             router_id, subnet_id)
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
new file mode 100644
index 0000000..e0e26da
--- /dev/null
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -0,0 +1,83 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.network import base
+from tempest import test
+
+
+class AllowedAddressPairTestJSON(base.BaseNetworkTest):
+    _interface = 'json'
+
+    """
+    Tests the Neutron Allowed Address Pair API extension using the Tempest
+    ReST client. The following API operations are tested with this extension:
+
+        create port
+        list ports
+        update port
+        show port
+
+    v2.0 of the Neutron API is assumed. It is also assumed that the following
+    options are defined in the [network-feature-enabled] section of
+    etc/tempest.conf
+
+        api_extensions
+    """
+
+    @classmethod
+    @test.safe_setup
+    def setUpClass(cls):
+        super(AllowedAddressPairTestJSON, cls).setUpClass()
+        if not test.is_extension_enabled('allowed-address-pairs', 'network'):
+            msg = "Allowed Address Pairs extension not enabled."
+            raise cls.skipException(msg)
+        cls.network = cls.create_network()
+        cls.create_subnet(cls.network)
+        port = cls.create_port(cls.network)
+        cls.ip_address = port['fixed_ips'][0]['ip_address']
+        cls.mac_address = port['mac_address']
+
+    @test.attr(type='smoke')
+    def test_create_list_port_with_address_pair(self):
+        # Create port with allowed address pair attribute
+        allowed_address_pairs = [{'ip_address': self.ip_address,
+                                  'mac_address': self.mac_address}]
+        resp, body = self.client.create_port(
+            network_id=self.network['id'],
+            allowed_address_pairs=allowed_address_pairs)
+        self.assertEqual('201', resp['status'])
+        port_id = body['port']['id']
+        self.addCleanup(self.client.delete_port, port_id)
+
+        # Confirm port was created with allowed address pair attribute
+        resp, body = self.client.list_ports()
+        self.assertEqual('200', resp['status'])
+        ports = body['ports']
+        port = [p for p in ports if p['id'] == port_id]
+        msg = 'Created port not found in list of ports returned by Neutron'
+        self.assertTrue(port, msg)
+        self._confirm_allowed_address_pair(port[0], self.ip_address)
+
+    def _confirm_allowed_address_pair(self, port, ip):
+        msg = 'Port allowed address pairs should not be empty'
+        self.assertTrue(port['allowed_address_pairs'], msg)
+        ip_address = port['allowed_address_pairs'][0]['ip_address']
+        mac_address = port['allowed_address_pairs'][0]['mac_address']
+        self.assertEqual(ip_address, ip)
+        self.assertEqual(mac_address, self.mac_address)
+
+
+class AllowedAddressPairTestXML(AllowedAddressPairTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index ed86d75..371c651 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -36,6 +36,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ExtraDHCPOptionsTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index 06871ad..2463654 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import netaddr
+
 from tempest.api.network import base
 from tempest.common.utils import data_utils
 from tempest import config
@@ -44,6 +46,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(FloatingIPTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('router', 'network'):
@@ -77,6 +80,8 @@
         self.assertEqual(created_floating_ip['port_id'], self.ports[0]['id'])
         self.assertEqual(created_floating_ip['floating_network_id'],
                          self.ext_net_id)
+        self.assertIn(created_floating_ip['fixed_ip_address'],
+                      [ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
         # Verifies the details of a floating_ip
         resp, floating_ip = self.client.show_floatingip(
             created_floating_ip['id'])
@@ -171,6 +176,54 @@
                          port_other_router['id'])
         self.assertIsNotNone(updated_floating_ip['fixed_ip_address'])
 
+    @test.attr(type='smoke')
+    def test_create_floating_ip_specifying_a_fixed_ip_address(self):
+        resp, body = self.client.create_floatingip(
+            floating_network_id=self.ext_net_id,
+            port_id=self.ports[1]['id'],
+            fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
+        self.assertEqual('201', resp['status'])
+        created_floating_ip = body['floatingip']
+        self.addCleanup(self.client.delete_floatingip,
+                        created_floating_ip['id'])
+        self.assertIsNotNone(created_floating_ip['id'])
+        self.assertEqual(created_floating_ip['fixed_ip_address'],
+                         self.ports[1]['fixed_ips'][0]['ip_address'])
+        resp, floating_ip = self.client.update_floatingip(
+            created_floating_ip['id'], port_id=None)
+        self.assertEqual('200', resp['status'])
+        self.assertIsNone(floating_ip['floatingip']['port_id'])
+
+    @test.attr(type='smoke')
+    def test_create_update_floatingip_with_port_multiple_ip_address(self):
+        # Find out ips that can be used for tests
+        ips = list(netaddr.IPNetwork(self.subnet['cidr']))
+        list_ips = [str(ip) for ip in ips[-3:-1]]
+        fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
+        # Create port
+        resp, body = self.client.create_port(network_id=self.network['id'],
+                                             fixed_ips=fixed_ips)
+        self.assertEqual('201', resp['status'])
+        port = body['port']
+        self.addCleanup(self.client.delete_port, port['id'])
+        # Create floating ip
+        resp, body = self.client.create_floatingip(
+            floating_network_id=self.ext_net_id, port_id=port['id'],
+            fixed_ip_address=list_ips[0])
+        self.assertEqual('201', resp['status'])
+        floating_ip = body['floatingip']
+        self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
+        self.assertIsNotNone(floating_ip['id'])
+        self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
+        # Update floating ip
+        resp, body = self.client.update_floatingip(
+            floating_ip['id'], port_id=port['id'],
+            fixed_ip_address=list_ips[1])
+        self.assertEqual('200', resp['status'])
+        update_floating_ip = body['floatingip']
+        self.assertEqual(update_floating_ip['fixed_ip_address'],
+                         list_ips[1])
+
 
 class FloatingIPTestXML(FloatingIPTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
new file mode 100644
index 0000000..555cbda
--- /dev/null
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -0,0 +1,235 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.network import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+CONF = config.CONF
+
+
+class FWaaSExtensionTestJSON(base.BaseNetworkTest):
+    _interface = 'json'
+
+    """
+    Tests the following operations in the Neutron API using the REST client for
+    Neutron:
+
+        List firewall rules
+        Create firewall rule
+        Update firewall rule
+        Delete firewall rule
+        Show firewall rule
+        List firewall policies
+        Create firewall policy
+        Update firewall policy
+        Delete firewall policy
+        Show firewall policy
+        List firewall
+        Create firewall
+        Update firewall
+        Delete firewall
+        Show firewall
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(FWaaSExtensionTestJSON, cls).setUpClass()
+        if not test.is_extension_enabled('fwaas', 'network'):
+            msg = "FWaaS Extension not enabled."
+            raise cls.skipException(msg)
+        cls.fw_rule = cls.create_firewall_rule("allow", "tcp")
+        cls.fw_policy = cls.create_firewall_policy()
+
+    def _try_delete_policy(self, policy_id):
+        # delete policy, if it exists
+        try:
+            self.client.delete_firewall_policy(policy_id)
+        # if policy is not found, this means it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
+    def _try_delete_firewall(self, fw_id):
+        # delete firewall, if it exists
+        try:
+            self.client.delete_firewall(fw_id)
+        # if firewall is not found, this means it was deleted in the test
+        except exceptions.NotFound:
+            pass
+
+        self.client.wait_for_resource_deletion('firewall', fw_id)
+
+    def _wait_for_active(self, fw_id):
+        def _wait():
+            resp, firewall = self.client.show_firewall(fw_id)
+            self.assertEqual('200', resp['status'])
+            firewall = firewall['firewall']
+            return firewall['status'] == 'ACTIVE'
+
+        if not test.call_until_true(_wait, CONF.network.build_timeout,
+                                    CONF.network.build_interval):
+            m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
+            raise exceptions.TimeoutException(m)
+
+    @test.attr(type='smoke')
+    def test_list_firewall_rules(self):
+        # List firewall rules
+        resp, fw_rules = self.client.list_firewall_rules()
+        self.assertEqual('200', resp['status'])
+        fw_rules = fw_rules['firewall_rules']
+        self.assertIn((self.fw_rule['id'],
+                       self.fw_rule['name'],
+                       self.fw_rule['action'],
+                       self.fw_rule['protocol'],
+                       self.fw_rule['ip_version'],
+                       self.fw_rule['enabled']),
+                      [(m['id'],
+                        m['name'],
+                        m['action'],
+                        m['protocol'],
+                        m['ip_version'],
+                        m['enabled']) for m in fw_rules])
+
+    @test.attr(type='smoke')
+    def test_create_update_delete_firewall_rule(self):
+        # Create firewall rule
+        resp, body = self.client.create_firewall_rule(
+            name=data_utils.rand_name("fw-rule"),
+            action="allow",
+            protocol="tcp")
+        self.assertEqual('201', resp['status'])
+        fw_rule_id = body['firewall_rule']['id']
+
+        # Update firewall rule
+        resp, body = self.client.update_firewall_rule(fw_rule_id,
+                                                      shared=True)
+        self.assertEqual('200', resp['status'])
+        self.assertTrue(body["firewall_rule"]['shared'])
+
+        # Delete firewall rule
+        resp, _ = self.client.delete_firewall_rule(fw_rule_id)
+        self.assertEqual('204', resp['status'])
+        # Confirm deletion
+        resp, fw_rules = self.client.list_firewall_rules()
+        self.assertNotIn(fw_rule_id,
+                         [m['id'] for m in fw_rules['firewall_rules']])
+
+    @test.attr(type='smoke')
+    def test_show_firewall_rule(self):
+        # show a created firewall rule
+        resp, fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
+        self.assertEqual('200', resp['status'])
+        for key, value in fw_rule['firewall_rule'].iteritems():
+            self.assertEqual(self.fw_rule[key], value)
+
+    @test.attr(type='smoke')
+    def test_list_firewall_policies(self):
+        resp, fw_policies = self.client.list_firewall_policies()
+        self.assertEqual('200', resp['status'])
+        fw_policies = fw_policies['firewall_policies']
+        self.assertIn((self.fw_policy['id'],
+                       self.fw_policy['name'],
+                       self.fw_policy['firewall_rules']),
+                      [(m['id'],
+                        m['name'],
+                        m['firewall_rules']) for m in fw_policies])
+
+    @test.attr(type='smoke')
+    def test_create_update_delete_firewall_policy(self):
+        # Create firewall policy
+        resp, body = self.client.create_firewall_policy(
+            name=data_utils.rand_name("fw-policy"))
+        self.assertEqual('201', resp['status'])
+        fw_policy_id = body['firewall_policy']['id']
+        self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+        # Update firewall policy
+        resp, body = self.client.update_firewall_policy(fw_policy_id,
+                                                        shared=True,
+                                                        name="updated_policy")
+        self.assertEqual('200', resp['status'])
+        updated_fw_policy = body["firewall_policy"]
+        self.assertTrue(updated_fw_policy['shared'])
+        self.assertEqual("updated_policy", updated_fw_policy['name'])
+
+        # Delete firewall policy
+        resp, _ = self.client.delete_firewall_policy(fw_policy_id)
+        self.assertEqual('204', resp['status'])
+        # Confirm deletion
+        resp, fw_policies = self.client.list_firewall_policies()
+        fw_policies = fw_policies['firewall_policies']
+        self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
+
+    @test.attr(type='smoke')
+    def test_show_firewall_policy(self):
+        # show a created firewall policy
+        resp, fw_policy = self.client.show_firewall_policy(
+            self.fw_policy['id'])
+        self.assertEqual('200', resp['status'])
+        fw_policy = fw_policy['firewall_policy']
+        for key, value in fw_policy.iteritems():
+            self.assertEqual(self.fw_policy[key], value)
+
+    @test.attr(type='smoke')
+    def test_create_show_delete_firewall(self):
+        # Create tenant network resources required for an ACTIVE firewall
+        network = self.create_network()
+        subnet = self.create_subnet(network)
+        router = self.create_router(
+            data_utils.rand_name('router-'),
+            admin_state_up=True)
+        self.client.add_router_interface_with_subnet_id(
+            router['id'], subnet['id'])
+
+        # Create firewall
+        resp, body = self.client.create_firewall(
+            name=data_utils.rand_name("firewall"),
+            firewall_policy_id=self.fw_policy['id'])
+        self.assertEqual('201', resp['status'])
+        created_firewall = body['firewall']
+        firewall_id = created_firewall['id']
+        self.addCleanup(self._try_delete_firewall, firewall_id)
+
+        self._wait_for_active(firewall_id)
+
+        # show a created firewall
+        resp, firewall = self.client.show_firewall(firewall_id)
+        self.assertEqual('200', resp['status'])
+        firewall = firewall['firewall']
+
+        for key, value in firewall.iteritems():
+            if key == 'status':
+                continue
+            self.assertEqual(created_firewall[key], value)
+
+        # list firewall
+        resp, firewalls = self.client.list_firewalls()
+        self.assertEqual('200', resp['status'])
+        firewalls = firewalls['firewalls']
+        self.assertIn((created_firewall['id'],
+                       created_firewall['name'],
+                       created_firewall['firewall_policy_id']),
+                      [(m['id'],
+                        m['name'],
+                        m['firewall_policy_id']) for m in firewalls])
+
+        # Delete firewall
+        resp, _ = self.client.delete_firewall(firewall_id)
+        self.assertEqual('204', resp['status'])
+
+
+class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
index 792d61d..673fc47 100644
--- a/tempest/api/network/test_load_balancer.py
+++ b/tempest/api/network/test_load_balancer.py
@@ -38,6 +38,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(LoadBalancerTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('lbaas', 'network'):
@@ -156,10 +157,14 @@
         # Verification of pool update
         new_name = "New_pool"
         resp, body = self.client.update_pool(pool['id'],
-                                             name=new_name)
+                                             name=new_name,
+                                             description="new_description",
+                                             lb_method='LEAST_CONNECTIONS')
         self.assertEqual('200', resp['status'])
         updated_pool = body['pool']
-        self.assertEqual(updated_pool['name'], new_name)
+        self.assertEqual(new_name, updated_pool['name'])
+        self.assertEqual('new_description', updated_pool['description'])
+        self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
         # Verification of pool delete
         resp, body = self.client.delete_pool(pool['id'])
         self.assertEqual('204', resp['status'])
@@ -377,6 +382,92 @@
         self.assertIn("active_connections", stats)
         self.assertIn("bytes_out", stats)
 
+    @test.attr(type='smoke')
+    def test_update_list_of_health_monitors_associated_with_pool(self):
+        resp, _ = (self.client.associate_health_monitor_with_pool
+                   (self.health_monitor['id'], self.pool['id']))
+        self.assertEqual('201', resp['status'])
+        resp, _ = self.client.update_health_monitor(
+            self.health_monitor['id'], admin_state_up=False)
+        self.assertEqual('200', resp['status'])
+        resp, body = self.client.show_pool(self.pool['id'])
+        self.assertEqual('200', resp['status'])
+        health_monitors = body['pool']['health_monitors']
+        for health_monitor_id in health_monitors:
+            resp, body = self.client.show_health_monitor(health_monitor_id)
+            self.assertEqual('200', resp['status'])
+            self.assertFalse(body['health_monitor']['admin_state_up'])
+        resp, _ = (self.client.disassociate_health_monitor_with_pool
+                   (self.health_monitor['id'], self.pool['id']))
+        self.assertEqual('204', resp['status'])
+
+    @test.attr(type='smoke')
+    def test_update_admin_state_up_of_pool(self):
+        resp, _ = self.client.update_pool(self.pool['id'],
+                                          admin_state_up=False)
+        self.assertEqual('200', resp['status'])
+        resp, body = self.client.show_pool(self.pool['id'])
+        self.assertEqual('200', resp['status'])
+        pool = body['pool']
+        self.assertFalse(pool['admin_state_up'])
+
+    @test.attr(type='smoke')
+    def test_show_vip_associated_with_pool(self):
+        resp, body = self.client.show_pool(self.pool['id'])
+        self.assertEqual('200', resp['status'])
+        pool = body['pool']
+        resp, body = self.client.show_vip(pool['vip_id'])
+        self.assertEqual('200', resp['status'])
+        vip = body['vip']
+        self.assertEqual(self.vip['name'], vip['name'])
+        self.assertEqual(self.vip['id'], vip['id'])
+
+    @test.attr(type='smoke')
+    def test_show_members_associated_with_pool(self):
+        resp, body = self.client.show_pool(self.pool['id'])
+        self.assertEqual('200', resp['status'])
+        members = body['pool']['members']
+        for member_id in members:
+            resp, body = self.client.show_member(member_id)
+            self.assertEqual('200', resp['status'])
+            self.assertIsNotNone(body['member']['status'])
+            self.assertEqual(member_id, body['member']['id'])
+            self.assertIsNotNone(body['member']['admin_state_up'])
+
+    @test.attr(type='smoke')
+    def test_update_pool_related_to_member(self):
+        # Create new pool
+        resp, body = self.client.create_pool(
+            name=data_utils.rand_name("pool-"),
+            lb_method='ROUND_ROBIN',
+            protocol='HTTP',
+            subnet_id=self.subnet['id'])
+        self.assertEqual('201', resp['status'])
+        new_pool = body['pool']
+        self.addCleanup(self.client.delete_pool, new_pool['id'])
+        # Update member with new pool's id
+        resp, body = self.client.update_member(self.member['id'],
+                                               pool_id=new_pool['id'])
+        self.assertEqual('200', resp['status'])
+        # Confirm with show that pool_id change
+        resp, body = self.client.show_member(self.member['id'])
+        member = body['member']
+        self.assertEqual(member['pool_id'], new_pool['id'])
+        # Update member with old pool id, this is needed for clean up
+        resp, body = self.client.update_member(self.member['id'],
+                                               pool_id=self.pool['id'])
+        self.assertEqual('200', resp['status'])
+
+    @test.attr(type='smoke')
+    def test_update_member_weight(self):
+        resp, _ = self.client.update_member(self.member['id'],
+                                            weight=2)
+        self.assertEqual('200', resp['status'])
+        resp, body = self.client.show_member(self.member['id'])
+        self.assertEqual('200', resp['status'])
+        member = body['member']
+        self.assertEqual(2, member['weight'])
+
 
 class LoadBalancerTestXML(LoadBalancerTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index b9041ee..ac0fd11 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -19,7 +19,7 @@
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 CONF = config.CONF
 
@@ -37,15 +37,9 @@
         create a subnet for a tenant
         list tenant's subnets
         show a tenant subnet details
-        port create
-        port delete
-        port list
-        port show
-        port update
         network update
         subnet update
         delete a network also deletes its subnets
-        create a port with no IP address associated with it
 
         All subnet tests are run once with ipv4 and once with ipv6.
 
@@ -64,6 +58,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(NetworksTestJSON, cls).setUpClass()
         cls.network = cls.create_network()
@@ -71,7 +66,7 @@
         cls.subnet = cls.create_subnet(cls.network)
         cls.cidr = cls.subnet['cidr']
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_create_update_delete_network_subnet(self):
         # Create a network
         name = data_utils.rand_name('network-')
@@ -102,7 +97,7 @@
         resp, body = self.client.delete_network(net_id)
         self.assertEqual('204', resp['status'])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_show_network(self):
         # Verify the details of a network
         resp, body = self.client.show_network(self.network['id'])
@@ -111,19 +106,19 @@
         for key in ['id', 'name']:
             self.assertEqual(network[key], self.network[key])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_show_network_fields(self):
         # Verify specific fields of a network
-        field_list = [('fields', 'id'), ('fields', 'name'), ]
+        fields = ['id', 'name']
         resp, body = self.client.show_network(self.network['id'],
-                                              field_list=field_list)
+                                              fields=fields)
         self.assertEqual('200', resp['status'])
         network = body['network']
-        self.assertEqual(len(network), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(network.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(network[field_name], self.network[field_name])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_networks(self):
         # Verify the network exists in the list of all networks
         resp, body = self.client.list_networks()
@@ -132,18 +127,18 @@
                     if network['id'] == self.network['id']]
         self.assertNotEmpty(networks, "Created network not found in the list")
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_networks_fields(self):
         # Verify specific fields of the networks
-        resp, body = self.client.list_networks(fields='id')
+        fields = ['id', 'name']
+        resp, body = self.client.list_networks(fields=fields)
         self.assertEqual('200', resp['status'])
         networks = body['networks']
         self.assertNotEmpty(networks, "Network list returned is empty")
         for network in networks:
-            self.assertEqual(len(network), 1)
-            self.assertIn('id', network)
+            self.assertEqual(sorted(network.keys()), sorted(fields))
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_show_subnet(self):
         # Verify the details of a subnet
         resp, body = self.client.show_subnet(self.subnet['id'])
@@ -154,19 +149,19 @@
             self.assertIn(key, subnet)
             self.assertEqual(subnet[key], self.subnet[key])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_show_subnet_fields(self):
         # Verify specific fields of a subnet
-        field_list = [('fields', 'id'), ('fields', 'cidr'), ]
+        fields = ['id', 'network_id']
         resp, body = self.client.show_subnet(self.subnet['id'],
-                                             field_list=field_list)
+                                             fields=fields)
         self.assertEqual('200', resp['status'])
         subnet = body['subnet']
-        self.assertEqual(len(subnet), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(subnet.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(subnet[field_name], self.subnet[field_name])
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_subnets(self):
         # Verify the subnet exists in the list of all subnets
         resp, body = self.client.list_subnets()
@@ -175,16 +170,16 @@
                    if subnet['id'] == self.subnet['id']]
         self.assertNotEmpty(subnets, "Created subnet not found in the list")
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_list_subnets_fields(self):
         # Verify specific fields of subnets
-        resp, body = self.client.list_subnets(fields='id')
+        fields = ['id', 'network_id']
+        resp, body = self.client.list_subnets(fields=fields)
         self.assertEqual('200', resp['status'])
         subnets = body['subnets']
         self.assertNotEmpty(subnets, "Subnet list returned is empty")
         for subnet in subnets:
-            self.assertEqual(len(subnet), 1)
-            self.assertIn('id', subnet)
+            self.assertEqual(sorted(subnet.keys()), sorted(fields))
 
     def _try_delete_network(self, net_id):
         # delete network, if it exists
@@ -194,7 +189,7 @@
         except exceptions.NotFound:
             pass
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_delete_network_with_subnet(self):
         # Creates a network
         name = data_utils.rand_name('network-')
@@ -221,31 +216,38 @@
         # it from the list.
         self.subnets.pop()
 
-    @attr(type='smoke')
-    def test_create_port_with_no_ip(self):
-        # For this test create a new network - do not use any previously
-        # created networks.
-        name = data_utils.rand_name('network-nosubnet-')
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_with_gw(self):
+        gateway = '10.100.0.13'
+        name = data_utils.rand_name('network-')
         resp, body = self.client.create_network(name=name)
         self.assertEqual('201', resp['status'])
         network = body['network']
         net_id = network['id']
-        self.networks.append(network)
+        subnet = self.create_subnet(network, gateway)
+        # Verifies Subnet GW in IPv4
+        self.assertEqual(subnet['gateway_ip'], gateway)
+        # Delete network and subnet
+        resp, body = self.client.delete_network(net_id)
+        self.assertEqual('204', resp['status'])
+        self.subnets.pop()
 
-        # Now create a port for this network - without creating any
-        # subnets for this network - this ensures no IP for the port
-        resp, body = self.client.create_port(network_id=net_id)
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_without_gw(self):
+        net = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+        gateway_ip = str(netaddr.IPAddress(net.first + 1))
+        name = data_utils.rand_name('network-')
+        resp, body = self.client.create_network(name=name)
         self.assertEqual('201', resp['status'])
-        port = body['port']
-        port_id = port['id']
-        self.addCleanup(self.client.delete_port, port_id)
-
-        # Verify that the port does not have any IP address
-        resp, body = self.client.show_port(port_id)
-        self.assertEqual('200', resp['status'])
-        port_resp = body['port']
-        self.assertEqual(port_id, port_resp['id'])
-        self.assertEqual(port_resp['fixed_ips'], [])
+        network = body['network']
+        net_id = network['id']
+        subnet = self.create_subnet(network)
+        # Verifies Subnet GW in IPv4
+        self.assertEqual(subnet['gateway_ip'], gateway_ip)
+        # Delete network and subnet
+        resp, body = self.client.delete_network(net_id)
+        self.assertEqual('204', resp['status'])
+        self.subnets.pop()
 
 
 class NetworksTestXML(NetworksTestJSON):
@@ -275,6 +277,7 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(BulkNetworkOpsTestJSON, cls).setUpClass()
         cls.network1 = cls.create_network()
@@ -310,7 +313,7 @@
         for n in created_ports:
             self.assertNotIn(n['id'], ports_list)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_bulk_create_delete_network(self):
         # Creates 2 networks in one request
         network_names = [data_utils.rand_name('network-'),
@@ -326,7 +329,7 @@
             self.assertIsNotNone(n['id'])
             self.assertIn(n['id'], networks_list)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_bulk_create_delete_subnet(self):
         # Creates 2 subnets in one request
         cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
@@ -358,7 +361,7 @@
             self.assertIsNotNone(n['id'])
             self.assertIn(n['id'], subnets_list)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_bulk_create_delete_port(self):
         # Creates 2 ports in one request
         networks = [self.network1['id'], self.network2['id']]
@@ -394,11 +397,41 @@
 
     @classmethod
     def setUpClass(cls):
-        super(NetworksIpV6TestJSON, cls).setUpClass()
         if not CONF.network_feature_enabled.ipv6:
-            cls.tearDownClass()
             skip_msg = "IPv6 Tests are disabled."
             raise cls.skipException(skip_msg)
+        super(NetworksIpV6TestJSON, cls).setUpClass()
+
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_with_gw(self):
+        gateway = '2003::2'
+        name = data_utils.rand_name('network-')
+        resp, body = self.client.create_network(name=name)
+        self.assertEqual('201', resp['status'])
+        network = body['network']
+        net_id = network['id']
+        subnet = self.create_subnet(network, gateway)
+        # Verifies Subnet GW in IPv6
+        self.assertEqual(subnet['gateway_ip'], gateway)
+        # Delete network and subnet
+        resp, body = self.client.delete_network(net_id)
+        self.assertEqual('204', resp['status'])
+        self.subnets.pop()
+
+    @test.attr(type='smoke')
+    def test_create_delete_subnet_without_gw(self):
+        name = data_utils.rand_name('network-')
+        resp, body = self.client.create_network(name=name)
+        self.assertEqual('201', resp['status'])
+        network = body['network']
+        net_id = network['id']
+        subnet = self.create_subnet(network)
+        # Verifies Subnet GW in IPv6
+        self.assertEqual(subnet['gateway_ip'], '2003::1')
+        # Delete network and subnet
+        resp, body = self.client.delete_network(net_id)
+        self.assertEqual('204', resp['status'])
+        self.subnets.pop()
 
 
 class NetworksIpV6TestXML(NetworksIpV6TestJSON):
diff --git a/tempest/api/network/test_networks_negative.py b/tempest/api/network/test_networks_negative.py
index 89c8a9f..53dfc52 100644
--- a/tempest/api/network/test_networks_negative.py
+++ b/tempest/api/network/test_networks_negative.py
@@ -17,37 +17,37 @@
 from tempest.api.network import base
 from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class NetworksNegativeTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
-    @attr(type=['negative', 'smoke'])
+    @test.attr(type=['negative', 'smoke'])
     def test_show_non_existent_network(self):
         non_exist_id = data_utils.rand_name('network')
         self.assertRaises(exceptions.NotFound, self.client.show_network,
                           non_exist_id)
 
-    @attr(type=['negative', 'smoke'])
+    @test.attr(type=['negative', 'smoke'])
     def test_show_non_existent_subnet(self):
         non_exist_id = data_utils.rand_name('subnet')
         self.assertRaises(exceptions.NotFound, self.client.show_subnet,
                           non_exist_id)
 
-    @attr(type=['negative', 'smoke'])
+    @test.attr(type=['negative', 'smoke'])
     def test_show_non_existent_port(self):
         non_exist_id = data_utils.rand_name('port')
         self.assertRaises(exceptions.NotFound, self.client.show_port,
                           non_exist_id)
 
-    @attr(type=['negative', 'smoke'])
+    @test.attr(type=['negative', 'smoke'])
     def test_update_non_existent_network(self):
         non_exist_id = data_utils.rand_name('network')
         self.assertRaises(exceptions.NotFound, self.client.update_network,
                           non_exist_id, name="new_name")
 
-    @attr(type=['negative', 'smoke'])
+    @test.attr(type=['negative', 'smoke'])
     def test_delete_non_existent_network(self):
         non_exist_id = data_utils.rand_name('network')
         self.assertRaises(exceptions.NotFound, self.client.delete_network,
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index fbb25a8..e6e6ea1 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -26,7 +26,18 @@
 class PortsTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
+    """
+    Test the following operations for ports:
+
+        port create
+        port delete
+        port list
+        port show
+        port update
+    """
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(PortsTestJSON, cls).setUpClass()
         cls.network = cls.create_network()
@@ -46,6 +57,8 @@
         resp, body = self.client.create_port(network_id=self.network['id'])
         self.assertEqual('201', resp['status'])
         port = body['port']
+        # Schedule port deletion with verification upon test completion
+        self.addCleanup(self._delete_port, port['id'])
         self.assertTrue(port['admin_state_up'])
         # Verify port update
         new_name = "New_Port"
@@ -57,9 +70,6 @@
         updated_port = body['port']
         self.assertEqual(updated_port['name'], new_name)
         self.assertFalse(updated_port['admin_state_up'])
-        # Verify port deletion
-        resp, body = self.client.delete_port(port['id'])
-        self.assertEqual('204', resp['status'])
 
     @test.attr(type='smoke')
     def test_show_port(self):
@@ -79,17 +89,18 @@
         self.assertEqual(self.port['network_id'], port['network_id'])
         self.assertEqual(self.port['security_groups'],
                          port['security_groups'])
+        self.assertEqual(port['fixed_ips'], [])
 
     @test.attr(type='smoke')
     def test_show_port_fields(self):
         # Verify specific fields of a port
-        field_list = [('fields', 'id'), ]
+        fields = ['id', 'mac_address']
         resp, body = self.client.show_port(self.port['id'],
-                                           field_list=field_list)
+                                           fields=fields)
         self.assertEqual('200', resp['status'])
         port = body['port']
-        self.assertEqual(len(port), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(port.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(port[field_name], self.port[field_name])
 
     @test.attr(type='smoke')
@@ -125,14 +136,37 @@
     @test.attr(type='smoke')
     def test_list_ports_fields(self):
         # Verify specific fields of ports
-        resp, body = self.client.list_ports(fields='id')
+        fields = ['id', 'mac_address']
+        resp, body = self.client.list_ports(fields=fields)
         self.assertEqual('200', resp['status'])
         ports = body['ports']
         self.assertNotEmpty(ports, "Port list returned is empty")
         # Asserting the fields returned are correct
         for port in ports:
-            self.assertEqual(len(port), 1)
-            self.assertIn('id', port)
+            self.assertEqual(sorted(fields), sorted(port.keys()))
+
+    @test.attr(type='smoke')
+    def test_update_port_with_second_ip(self):
+        # Create a network with two subnets
+        network = self.create_network()
+        subnet_1 = self.create_subnet(network)
+        subnet_2 = self.create_subnet(network)
+        fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
+        fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
+
+        # Create a port with a single IP address from first subnet
+        port = self.create_port(network,
+                                fixed_ips=fixed_ip_1)
+        self.assertEqual(1, len(port['fixed_ips']))
+
+        # Update the port with a second IP address from second subnet
+        fixed_ips = fixed_ip_1 + fixed_ip_2
+        port = self.update_port(port, fixed_ips=fixed_ips)
+        self.assertEqual(2, len(port['fixed_ips']))
+
+        # Update the port to return to a single IP address
+        port = self.update_port(port, fixed_ips=fixed_ip_1)
+        self.assertEqual(1, len(port['fixed_ips']))
 
 
 class PortsTestXML(PortsTestJSON):
@@ -143,6 +177,7 @@
     _interface = 'json'
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(PortsAdminExtendedAttrsTestJSON, cls).setUpClass()
         cls.identity_client = cls._get_identity_admin_client()
@@ -180,15 +215,31 @@
 
     @test.attr(type='smoke')
     def test_list_ports_binding_ext_attr(self):
-        resp, body = self.admin_client.list_ports(
-            **{'tenant_id': self.tenant['id']})
+        # Create a new port
+        post_body = {"network_id": self.network['id']}
+        resp, body = self.admin_client.create_port(**post_body)
+        self.assertEqual('201', resp['status'])
+        port = body['port']
+        self.addCleanup(self.admin_client.delete_port, port['id'])
+
+        # Update the port's binding attributes so that is now 'bound'
+        # to a host
+        update_body = {"binding:host_id": self.host_id}
+        resp, _ = self.admin_client.update_port(port['id'], **update_body)
+        self.assertEqual('200', resp['status'])
+
+        # List all ports, ensure new port is part of list and its binding
+        # attributes are set and accurate
+        resp, body = self.admin_client.list_ports()
         self.assertEqual('200', resp['status'])
         ports_list = body['ports']
-        for port in ports_list:
-            vif_type = port['binding:vif_type']
-            self.assertIsNotNone(vif_type)
-            vif_details = port['binding:vif_details']['port_filter']
-            self.assertIsNotNone(vif_details)
+        pids_list = [p['id'] for p in ports_list]
+        self.assertIn(port['id'], pids_list)
+        listed_port = [p for p in ports_list if p['id'] == port['id']]
+        self.assertEqual(1, len(listed_port),
+                         'Multiple ports listed with id %s in ports listing: '
+                         '%s' % (port['id'], ports_list))
+        self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
 
     @test.attr(type='smoke')
     def test_show_port_binding_ext_attr(self):
@@ -237,11 +288,10 @@
 
     @classmethod
     def setUpClass(cls):
-        super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
         if not CONF.network_feature_enabled.ipv6:
-            cls.tearDownClass()
             skip_msg = "IPv6 Tests are disabled."
             raise cls.skipException(skip_msg)
+        super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
 
 
 class PortsAdminExtendedAttrsIpV6TestXML(
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 2657031..d38633f 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -36,6 +36,18 @@
         admin_manager = clients.AdminManager()
         cls.identity_admin_client = admin_manager.identity_client
 
+    def _cleanup_router(self, router):
+        self.delete_router(router)
+        self.routers.remove(router)
+
+    def _create_router(self, name, admin_state_up=False,
+                       external_network_id=None, enable_snat=None):
+        # associate a cleanup with created routers to avoid quota limits
+        router = self.create_router(name, admin_state_up,
+                                    external_network_id, enable_snat)
+        self.addCleanup(self._cleanup_router, router)
+        return router
+
     @test.attr(type='smoke')
     def test_create_show_list_update_delete_router(self):
         # Create a router
@@ -102,7 +114,7 @@
     def test_add_remove_router_interface_with_subnet_id(self):
         network = self.create_network()
         subnet = self.create_subnet(network)
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         # Add router interface with subnet id
         resp, interface = self.client.add_router_interface_with_subnet_id(
             router['id'], subnet['id'])
@@ -121,7 +133,7 @@
     def test_add_remove_router_interface_with_port_id(self):
         network = self.create_network()
         self.create_subnet(network)
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         resp, port_body = self.client.create_port(
             network_id=network['id'])
         # add router interface to port created above
@@ -164,7 +176,7 @@
 
     @test.attr(type='smoke')
     def test_update_router_set_gateway(self):
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         self.client.update_router(
             router['id'],
             external_gateway_info={
@@ -180,7 +192,7 @@
     @test.requires_ext(extension='ext-gw-mode', service='network')
     @test.attr(type='smoke')
     def test_update_router_set_gateway_with_snat_explicit(self):
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         self.admin_client.update_router_with_snat_gw_info(
             router['id'],
             external_gateway_info={
@@ -195,7 +207,7 @@
     @test.requires_ext(extension='ext-gw-mode', service='network')
     @test.attr(type='smoke')
     def test_update_router_set_gateway_without_snat(self):
-        router = self.create_router(data_utils.rand_name('router-'))
+        router = self._create_router(data_utils.rand_name('router-'))
         self.admin_client.update_router_with_snat_gw_info(
             router['id'],
             external_gateway_info={
@@ -209,7 +221,7 @@
 
     @test.attr(type='smoke')
     def test_update_router_unset_gateway(self):
-        router = self.create_router(
+        router = self._create_router(
             data_utils.rand_name('router-'),
             external_network_id=CONF.network.public_network_id)
         self.client.update_router(router['id'], external_gateway_info={})
@@ -223,7 +235,7 @@
     @test.requires_ext(extension='ext-gw-mode', service='network')
     @test.attr(type='smoke')
     def test_update_router_reset_gateway_without_snat(self):
-        router = self.create_router(
+        router = self._create_router(
             data_utils.rand_name('router-'),
             external_network_id=CONF.network.public_network_id)
         self.admin_client.update_router_with_snat_gw_info(
@@ -244,22 +256,38 @@
         self.name = self.network['name']
         self.subnet = self.create_subnet(self.network)
         # Add router interface with subnet id
-        self.router = self.create_router(data_utils.rand_name('router-'), True)
+        self.router = self._create_router(
+            data_utils.rand_name('router-'), True)
         self.create_router_interface(self.router['id'], self.subnet['id'])
         self.addCleanup(
             self._delete_extra_routes,
             self.router['id'])
-        # Update router extra route
+        # Update router extra route, second ip of the range is
+        # used as next hop
         cidr = netaddr.IPNetwork(self.subnet['cidr'])
+        next_hop = str(cidr[2])
+        destination = str(self.subnet['cidr'])
         resp, extra_route = self.client.update_extra_routes(
-            self.router['id'], str(cidr[0]), str(self.subnet['cidr']))
+            self.router['id'], next_hop, destination)
+        self.assertEqual('200', resp['status'])
+        self.assertEqual(1, len(extra_route['router']['routes']))
+        self.assertEqual(destination,
+                         extra_route['router']['routes'][0]['destination'])
+        self.assertEqual(next_hop,
+                         extra_route['router']['routes'][0]['nexthop'])
+        resp, show_body = self.client.show_router(self.router['id'])
+        self.assertEqual('200', resp['status'])
+        self.assertEqual(destination,
+                         show_body['router']['routes'][0]['destination'])
+        self.assertEqual(next_hop,
+                         show_body['router']['routes'][0]['nexthop'])
 
     def _delete_extra_routes(self, router_id):
         resp, _ = self.client.delete_extra_routes(router_id)
 
     @test.attr(type='smoke')
     def test_update_router_admin_state(self):
-        self.router = self.create_router(data_utils.rand_name('router-'))
+        self.router = self._create_router(data_utils.rand_name('router-'))
         self.assertFalse(self.router['admin_state_up'])
         # Update router admin state
         resp, update_body = self.client.update_router(self.router['id'],
@@ -272,10 +300,14 @@
 
     @test.attr(type='smoke')
     def test_add_multiple_router_interfaces(self):
-        network = self.create_network()
-        subnet01 = self.create_subnet(network)
-        subnet02 = self.create_subnet(network)
-        router = self.create_router(data_utils.rand_name('router-'))
+        network01 = self.create_network(
+            network_name=data_utils.rand_name('router-network01-'))
+        network02 = self.create_network(
+            network_name=data_utils.rand_name('router-network02-'))
+        subnet01 = self.create_subnet(network01)
+        sub02_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr).next()
+        subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
+        router = self._create_router(data_utils.rand_name('router-'))
         interface01 = self._add_router_interface_with_subnet_id(router['id'],
                                                                 subnet01['id'])
         self._verify_router_interface(router['id'], subnet01['id'],
@@ -285,15 +317,6 @@
         self._verify_router_interface(router['id'], subnet02['id'],
                                       interface02['port_id'])
 
-    def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
-        resp, interface = self.client.add_router_interface_with_subnet_id(
-            router_id, subnet_id)
-        self.assertEqual('200', resp['status'])
-        self.addCleanup(self._remove_router_interface_with_subnet_id,
-                        router_id, subnet_id)
-        self.assertEqual(subnet_id, interface['subnet_id'])
-        return interface
-
     def _verify_router_interface(self, router_id, subnet_id, port_id):
         resp, show_port_body = self.client.show_port(port_id)
         self.assertEqual('200', resp['status'])
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index e6ad4de..feee51b 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -13,16 +13,22 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import netaddr
+
 from tempest.api.network import base_routers as base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class RoutersNegativeTest(base.BaseRouterTest):
     _interface = 'json'
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(RoutersNegativeTest, cls).setUpClass()
         if not test.is_extension_enabled('router', 'network'):
@@ -42,12 +48,30 @@
 
     @test.attr(type=['negative', 'smoke'])
     def test_router_add_gateway_net_not_external_returns_400(self):
-        self.create_subnet(self.network)
+        alt_network = self.create_network(
+            network_name=data_utils.rand_name('router-negative-'))
+        sub_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr).next()
+        self.create_subnet(alt_network, cidr=sub_cidr)
         self.assertRaises(exceptions.BadRequest,
                           self.client.update_router,
                           self.router['id'],
                           external_gateway_info={
-                              'network_id': self.network['id']})
+                              'network_id': alt_network['id']})
+
+    @test.attr(type=['negative', 'smoke'])
+    def test_add_router_interfaces_on_overlapping_subnets_returns_400(self):
+        network01 = self.create_network(
+            network_name=data_utils.rand_name('router-network01-'))
+        network02 = self.create_network(
+            network_name=data_utils.rand_name('router-network02-'))
+        subnet01 = self.create_subnet(network01)
+        subnet02 = self.create_subnet(network02)
+        self._add_router_interface_with_subnet_id(self.router['id'],
+                                                  subnet01['id'])
+        self.assertRaises(exceptions.BadRequest,
+                          self._add_router_interface_with_subnet_id,
+                          self.router['id'],
+                          subnet02['id'])
 
     @test.attr(type=['negative', 'smoke'])
     def test_router_remove_interface_in_use_returns_409(self):
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 3e26f46..b98cea1 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import six
+
 from tempest.api.network import base_security_groups as base
 from tempest.common.utils import data_utils
 from tempest import test
@@ -84,22 +86,25 @@
                 direction='ingress'
             )
             self.assertEqual('201', resp['status'])
-            self.addCleanup(self._delete_security_group_rule,
-                            rule_create_body['security_group_rule']['id']
-                            )
 
-        # Show details of the created security rule
-        resp, show_rule_body = self.client.show_security_group_rule(
-            rule_create_body['security_group_rule']['id']
-        )
-        self.assertEqual('200', resp['status'])
+            # Show details of the created security rule
+            resp, show_rule_body = self.client.show_security_group_rule(
+                rule_create_body['security_group_rule']['id']
+            )
+            self.assertEqual('200', resp['status'])
+            create_dict = rule_create_body['security_group_rule']
+            for key, value in six.iteritems(create_dict):
+                self.assertEqual(value,
+                                 show_rule_body['security_group_rule'][key],
+                                 "%s does not match." % key)
 
-        # List rules and verify created rule is in response
-        resp, rule_list_body = self.client.list_security_group_rules()
-        self.assertEqual('200', resp['status'])
-        rule_list = [rule['id']
-                     for rule in rule_list_body['security_group_rules']]
-        self.assertIn(rule_create_body['security_group_rule']['id'], rule_list)
+            # List rules and verify created rule is in response
+            resp, rule_list_body = self.client.list_security_group_rules()
+            self.assertEqual('200', resp['status'])
+            rule_list = [rule['id']
+                         for rule in rule_list_body['security_group_rules']]
+            self.assertIn(rule_create_body['security_group_rule']['id'],
+                          rule_list)
 
     @test.attr(type='smoke')
     def test_create_security_group_rule_with_additional_args(self):
@@ -122,9 +127,6 @@
 
         self.assertEqual('201', resp['status'])
         sec_group_rule = rule_create_body['security_group_rule']
-        self.addCleanup(self._delete_security_group_rule,
-                        sec_group_rule['id']
-                        )
 
         self.assertEqual(sec_group_rule['direction'], direction)
         self.assertEqual(sec_group_rule['protocol'], protocol)
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 78bc80a..d1fe15c 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -16,12 +16,13 @@
 from tempest.api.network import base
 from tempest.common.utils import data_utils
 from tempest import config
+from tempest import exceptions
 from tempest import test
 
 CONF = config.CONF
 
 
-class VPNaaSJSON(base.BaseNetworkTest):
+class VPNaaSTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
     """
@@ -37,11 +38,12 @@
     """
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         if not test.is_extension_enabled('vpnaas', 'network'):
             msg = "vpnaas extension not enabled."
             raise cls.skipException(msg)
-        super(VPNaaSJSON, cls).setUpClass()
+        super(VPNaaSTestJSON, cls).setUpClass()
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
         cls.router = cls.create_router(
@@ -52,6 +54,8 @@
                                                cls.router['id'])
         cls.ikepolicy = cls.create_ikepolicy(
             data_utils.rand_name("ike-policy-"))
+        cls.ipsecpolicy = cls.create_ipsecpolicy(
+            data_utils.rand_name("ipsec-policy-"))
 
     def _delete_ike_policy(self, ike_policy_id):
         # Deletes a ike policy and verifies if it is deleted or not
@@ -69,6 +73,20 @@
                 ike_id_list.append(i['id'])
             self.assertNotIn(ike_policy_id, ike_id_list)
 
+    def _delete_ipsec_policy(self, ipsec_policy_id):
+        # Deletes an ike policy if it exists
+        try:
+            self.client.delete_ipsecpolicy(ipsec_policy_id)
+
+        except exceptions.NotFound:
+            pass
+
+    def _assertExpected(self, expected, actual):
+        # Check if not expected keys/values exists in actual response body
+        for key, value in expected.iteritems():
+            self.assertIn(key, actual)
+            self.assertEqual(value, actual[key])
+
     @test.attr(type='smoke')
     def test_list_vpn_services(self):
         # Verify the VPN service exists in the list of all VPN services
@@ -81,8 +99,8 @@
     def test_create_update_delete_vpn_service(self):
         # Creates a VPN service
         name = data_utils.rand_name('vpn-service-')
-        resp, body = self.client.create_vpnservice(self.subnet['id'],
-                                                   self.router['id'],
+        resp, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
+                                                   router_id=self.router['id'],
                                                    name=name,
                                                    admin_state_up=True)
         self.assertEqual('201', resp['status'])
@@ -133,7 +151,7 @@
         # Creates a IKE policy
         name = data_utils.rand_name('ike-policy-')
         resp, body = (self.client.create_ikepolicy(
-                      name,
+                      name=name,
                       ike_version="v1",
                       encryption_algorithm="aes-128",
                       auth_algorithm="sha1"))
@@ -175,3 +193,52 @@
                          ikepolicy['phase1_negotiation_mode'])
         self.assertEqual(self.ikepolicy['ike_version'],
                          ikepolicy['ike_version'])
+
+    @test.attr(type='smoke')
+    def test_list_ipsec_policies(self):
+        # Verify the ipsec policy exists in the list of all ipsec policies
+        resp, body = self.client.list_ipsecpolicies()
+        self.assertEqual('200', resp['status'])
+        ipsecpolicies = body['ipsecpolicies']
+        self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
+
+    @test.attr(type='smoke')
+    def test_create_update_delete_ipsec_policy(self):
+        # Creates an ipsec policy
+        ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
+                             'pfs': 'group5',
+                             'encryption_algorithm': "aes-128",
+                             'auth_algorithm': 'sha1'}
+        resp, resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
+        self.assertEqual('201', resp['status'])
+        ipsecpolicy = resp_body['ipsecpolicy']
+        self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
+        self._assertExpected(ipsec_policy_body, ipsecpolicy)
+        # Verification of ipsec policy update
+        new_ipsec = {'description': 'Updated ipsec policy',
+                     'pfs': 'group2',
+                     'name': data_utils.rand_name("New-IPSec"),
+                     'encryption_algorithm': "aes-256",
+                     'lifetime': {'units': "seconds", 'value': '2000'}}
+        resp, body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
+                                                    **new_ipsec)
+        self.assertEqual('200', resp['status'])
+        updated_ipsec_policy = body['ipsecpolicy']
+        self._assertExpected(new_ipsec, updated_ipsec_policy)
+        # Verification of ipsec policy delete
+        resp, _ = self.client.delete_ipsecpolicy(ipsecpolicy['id'])
+        self.assertEqual('204', resp['status'])
+        self.assertRaises(exceptions.NotFound,
+                          self.client.delete_ipsecpolicy, ipsecpolicy['id'])
+
+    @test.attr(type='smoke')
+    def test_show_ipsec_policy(self):
+        # Verifies the details of an ipsec policy
+        resp, body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
+        self.assertEqual('200', resp['status'])
+        ipsecpolicy = body['ipsecpolicy']
+        self._assertExpected(self.ipsecpolicy, ipsecpolicy)
+
+
+class VPNaaSTestXML(VPNaaSTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 45c895b..6b18182 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -38,23 +38,12 @@
             cls.__name__, network_resources=cls.network_resources)
         if CONF.compute.allow_tenant_isolation:
             # Get isolated creds for normal user
-            creds = cls.isolated_creds.get_primary_creds()
-            username, tenant_name, password = creds
-            cls.os = clients.Manager(username=username,
-                                     password=password,
-                                     tenant_name=tenant_name)
+            cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
             # Get isolated creds for admin user
-            admin_creds = cls.isolated_creds.get_admin_creds()
-            admin_username, admin_tenant_name, admin_password = admin_creds
-            cls.os_admin = clients.Manager(username=admin_username,
-                                           password=admin_password,
-                                           tenant_name=admin_tenant_name)
+            cls.os_admin = clients.Manager(
+                cls.isolated_creds.get_admin_creds())
             # Get isolated creds for alt user
-            alt_creds = cls.isolated_creds.get_alt_creds()
-            alt_username, alt_tenant, alt_password = alt_creds
-            cls.os_alt = clients.Manager(username=alt_username,
-                                         password=alt_password,
-                                         tenant_name=alt_tenant)
+            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
             # Add isolated users to operator role so that they can create a
             # container in swift.
             cls._assign_member_role()
@@ -92,8 +81,8 @@
 
     @classmethod
     def _assign_member_role(cls):
-        primary_user = cls.isolated_creds.get_primary_user()
-        alt_user = cls.isolated_creds.get_alt_user()
+        primary_creds = cls.isolated_creds.get_primary_creds()
+        alt_creds = cls.isolated_creds.get_alt_creds()
         swift_role = CONF.object_storage.operator_role
         try:
             resp, roles = cls.os_admin.identity_client.list_roles()
@@ -101,9 +90,9 @@
         except StopIteration:
             msg = "No role named %s found" % swift_role
             raise exceptions.NotFound(msg)
-        for user in [primary_user, alt_user]:
-            cls.os_admin.identity_client.assign_user_role(user['tenantId'],
-                                                          user['id'],
+        for creds in [primary_creds, alt_creds]:
+            cls.os_admin.identity_client.assign_user_role(creds.tenant_id,
+                                                          creds.user_id,
                                                           role['id'])
 
     @classmethod
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index a3098a5..19e3068 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -27,6 +27,7 @@
 class AccountQuotasTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountQuotasTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -34,10 +35,7 @@
 
         cls.data.setup_test_user()
 
-        cls.os_reselleradmin = clients.Manager(
-            cls.data.test_user,
-            cls.data.test_password,
-            cls.data.test_tenant)
+        cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
 
         # Retrieve the ResellerAdmin role id
         reseller_role_id = None
@@ -49,15 +47,11 @@
             msg = "No ResellerAdmin role found"
             raise exceptions.NotFound(msg)
 
-        # Retrieve the ResellerAdmin tenant id
-        _, users = cls.os_admin.identity_client.get_users()
-        reseller_user_id = next(usr['id'] for usr in users if usr['name']
-                                == cls.data.test_user)
+        # Retrieve the ResellerAdmin user id
+        reseller_user_id = cls.data.test_credentials.user_id
 
         # Retrieve the ResellerAdmin tenant id
-        _, tenants = cls.os_admin.identity_client.list_tenants()
-        reseller_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
-                                  == cls.data.test_tenant)
+        reseller_tenant_id = cls.data.test_credentials.tenant_id
 
         # Assign the newly created user the appropriate ResellerAdmin role
         cls.os_admin.identity_client.assign_user_role(
@@ -82,7 +76,8 @@
         # Set a quota of 20 bytes on the user's account before each test
         headers = {"X-Account-Meta-Quota-Bytes": "20"}
 
-        self.os.custom_account_client.request("POST", "", headers, "")
+        self.os.custom_account_client.request("POST", url="", headers=headers,
+                                              body="")
 
     def tearDown(self):
         # Set the reselleradmin auth in headers for next custom_account_client
@@ -94,12 +89,14 @@
         # remove the quota from the container
         headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
 
-        self.os.custom_account_client.request("POST", "", headers, "")
+        self.os.custom_account_client.request("POST", url="", headers=headers,
+                                              body="")
         super(AccountQuotasTest, self).tearDown()
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(AccountQuotasTest, cls).tearDownClass()
 
@@ -135,8 +132,9 @@
             )
             headers = {"X-Account-Meta-Quota-Bytes": quota}
 
-            resp, _ = self.os.custom_account_client.request("POST", "",
-                                                            headers, "")
+            resp, _ = self.os.custom_account_client.request("POST", url="",
+                                                            headers=headers,
+                                                            body="")
 
             self.assertEqual(resp["status"], "204")
             self.assertHeaders(resp, 'Account', 'POST')
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 7648ea1..6afd381 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -27,6 +27,7 @@
 class AccountQuotasNegativeTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountQuotasNegativeTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -34,10 +35,7 @@
 
         cls.data.setup_test_user()
 
-        cls.os_reselleradmin = clients.Manager(
-            cls.data.test_user,
-            cls.data.test_password,
-            cls.data.test_tenant)
+        cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
 
         # Retrieve the ResellerAdmin role id
         reseller_role_id = None
@@ -50,14 +48,10 @@
             raise exceptions.NotFound(msg)
 
         # Retrieve the ResellerAdmin tenant id
-        _, users = cls.os_admin.identity_client.get_users()
-        reseller_user_id = next(usr['id'] for usr in users if usr['name']
-                                == cls.data.test_user)
+        reseller_user_id = cls.data.test_credentials.user_id
 
         # Retrieve the ResellerAdmin tenant id
-        _, tenants = cls.os_admin.identity_client.list_tenants()
-        reseller_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
-                                  == cls.data.test_tenant)
+        reseller_tenant_id = cls.data.test_credentials.tenant_id
 
         # Assign the newly created user the appropriate ResellerAdmin role
         cls.os_admin.identity_client.assign_user_role(
@@ -81,7 +75,8 @@
         # Set a quota of 20 bytes on the user's account before each test
         headers = {"X-Account-Meta-Quota-Bytes": "20"}
 
-        self.os.custom_account_client.request("POST", "", headers, "")
+        self.os.custom_account_client.request("POST", url="", headers=headers,
+                                              body="")
 
     def tearDown(self):
         # Set the reselleradmin auth in headers for next custom_account_client
@@ -93,12 +88,14 @@
         # remove the quota from the container
         headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
 
-        self.os.custom_account_client.request("POST", "", headers, "")
+        self.os.custom_account_client.request("POST", url="", headers=headers,
+                                              body="")
         super(AccountQuotasNegativeTest, self).tearDown()
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(AccountQuotasNegativeTest, cls).tearDownClass()
 
@@ -120,6 +117,7 @@
                           {"Quota-Bytes": "100"})
 
     @test.attr(type=["negative", "smoke"])
+    @test.skip_because(bug="1310597")
     @test.requires_ext(extension='account_quotas', service='object')
     def test_upload_large_object(self):
         object_name = data_utils.rand_name(name="TestObject")
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 4b895d8..d615374 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -29,10 +29,13 @@
 
 
 class AccountTest(base.BaseObjectTest):
+
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountTest, cls).setUpClass()
-        cls.containers = []
         for i in moves.xrange(ord('a'), ord('f') + 1):
             name = data_utils.rand_name(name='%s-' % chr(i))
             cls.container_client.create_container(name)
@@ -64,9 +67,7 @@
         self.data.setup_test_user()
 
         os_test_user = clients.Manager(
-            self.data.test_user,
-            self.data.test_password,
-            self.data.test_tenant)
+            self.data.test_credentials)
 
         # Retrieve the id of an operator role of object storage
         test_role_id = None
diff --git a/tempest/api/object_storage/test_account_services_negative.py b/tempest/api/object_storage/test_account_services_negative.py
index 71eaab5..490672d 100644
--- a/tempest/api/object_storage/test_account_services_negative.py
+++ b/tempest/api/object_storage/test_account_services_negative.py
@@ -17,20 +17,18 @@
 from tempest.api.object_storage import base
 from tempest import clients
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class AccountNegativeTest(base.BaseObjectTest):
 
-    @attr(type=['negative', 'gate'])
+    @test.attr(type=['negative', 'gate'])
     def test_list_containers_with_non_authorized_user(self):
         # list containers using non-authorized user
 
         # create user
         self.data.setup_test_user()
-        test_os = clients.Manager(self.data.test_user,
-                                  self.data.test_password,
-                                  self.data.test_tenant)
+        test_os = clients.Manager(self.data.test_credentials)
         test_auth_provider = test_os.auth_provider
         # Get auth for the test user
         test_auth_provider.auth_data
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index c865ee1..fc51504 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -24,9 +24,7 @@
     def setUpClass(cls):
         super(ObjectTestACLs, cls).setUpClass()
         cls.data.setup_test_user()
-        test_os = clients.Manager(cls.data.test_user,
-                                  cls.data.test_password,
-                                  cls.data.test_tenant)
+        test_os = clients.Manager(cls.data.test_credentials)
         cls.test_auth_data = test_os.auth_provider.auth_data
 
     @classmethod
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 547bf87..ca53876 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -26,9 +26,7 @@
     def setUpClass(cls):
         super(ObjectACLsNegativeTest, cls).setUpClass()
         cls.data.setup_test_user()
-        test_os = clients.Manager(cls.data.test_user,
-                                  cls.data.test_password,
-                                  cls.data.test_tenant)
+        test_os = clients.Manager(cls.data.test_credentials)
         cls.test_auth_data = test_os.auth_provider.auth_data
 
     @classmethod
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 6c71340..581c6d9 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -23,6 +23,7 @@
 class StaticWebTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(StaticWebTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -45,7 +46,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(StaticWebTest, cls).tearDownClass()
 
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 9bd986f..5f46d01 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -31,8 +31,10 @@
 
 
 class ContainerSyncTest(base.BaseObjectTest):
+    clients = {}
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ContainerSyncTest, cls).setUpClass()
         cls.containers = []
@@ -50,7 +52,6 @@
             int(container_sync_timeout / cls.container_sync_interval)
 
         # define container and object clients
-        cls.clients = {}
         cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
             (cls.container_client, cls.object_client)
         cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
@@ -66,6 +67,7 @@
         super(ContainerSyncTest, cls).tearDownClass()
 
     @test.attr(type='slow')
+    @test.skip_because(bug='1317133')
     def test_container_synchronization(self):
         # container to container synchronization
         # to allow/accept sync requests to/from other accounts
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index 4f399b4..d1541b9 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -29,10 +29,7 @@
         # endpoint and test the healthcheck feature.
         cls.data.setup_test_user()
 
-        cls.os_test_user = clients.Manager(
-            cls.data.test_user,
-            cls.data.test_password,
-            cls.data.test_tenant)
+        cls.os_test_user = clients.Manager(cls.data.test_credentials)
 
         cls.xml_start = '<?xml version="1.0"?>\n' \
                         '<!DOCTYPE cross-domain-policy SYSTEM ' \
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index e0d15ac..dc5585e 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -26,7 +26,11 @@
 
 class ObjectFormPostTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectFormPostTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name='TestContainer')
@@ -39,6 +43,18 @@
         cls.metadata = {'Temp-URL-Key': cls.key}
         cls.account_client.create_account_metadata(metadata=cls.metadata)
 
+    def setUp(self):
+        super(ObjectFormPostTest, self).setUp()
+
+        # make sure the metadata has been set
+        account_client_metadata, _ = \
+            self.account_client.list_account_metadata()
+        self.assertIn('x-account-meta-temp-url-key',
+                      account_client_metadata)
+        self.assertEqual(
+            account_client_metadata['x-account-meta-temp-url-key'],
+            self.key)
+
     @classmethod
     def tearDownClass(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
@@ -100,13 +116,9 @@
         headers = {'Content-Type': content_type,
                    'Content-Length': str(len(body))}
 
-        url = "%s/%s/%s" % (self.container_client.base_url,
-                            self.container_name,
-                            self.object_name)
+        url = "%s/%s" % (self.container_name, self.object_name)
 
-        # Use a raw request, otherwise authentication headers are used
-        resp, body = self.object_client.http_obj.request(url, "POST",
-                                                         body, headers=headers)
+        resp, body = self.object_client.post(url, body, headers=headers)
         self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
         self.assertHeaders(resp, "Object", "POST")
 
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index a52c248..878bf6d 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -20,12 +20,17 @@
 
 from tempest.api.object_storage import base
 from tempest.common.utils import data_utils
+from tempest import exceptions
 from tempest import test
 
 
 class ObjectFormPostNegativeTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectFormPostNegativeTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name='TestContainer')
@@ -38,6 +43,18 @@
         cls.metadata = {'Temp-URL-Key': cls.key}
         cls.account_client.create_account_metadata(metadata=cls.metadata)
 
+    def setUp(self):
+        super(ObjectFormPostNegativeTest, self).setUp()
+
+        # make sure the metadata has been set
+        account_client_metadata, _ = \
+            self.account_client.list_account_metadata()
+        self.assertIn('x-account-meta-temp-url-key',
+                      account_client_metadata)
+        self.assertEqual(
+            account_client_metadata['x-account-meta-temp-url-key'],
+            self.key)
+
     @classmethod
     def tearDownClass(cls):
         cls.account_client.delete_account_metadata(metadata=cls.metadata)
@@ -100,12 +117,25 @@
         headers = {'Content-Type': content_type,
                    'Content-Length': str(len(body))}
 
-        url = "%s/%s/%s" % (self.container_client.base_url,
-                            self.container_name,
-                            self.object_name)
+        url = "%s/%s" % (self.container_name, self.object_name)
+        exc = self.assertRaises(
+            exceptions.Unauthorized,
+            self.object_client.post,
+            url, body, headers=headers)
+        self.assertIn('FormPost: Form Expired', str(exc))
 
-        # Use a raw request, otherwise authentication headers are used
-        resp, body = self.object_client.http_obj.request(url, "POST",
-                                                         body, headers=headers)
-        self.assertEqual(int(resp['status']), 401)
-        self.assertIn('FormPost: Form Expired', body)
+    @test.requires_ext(extension='formpost', service='object')
+    @test.attr(type='gate')
+    def test_post_object_using_form_invalid_signature(self):
+        self.key = "Wrong"
+        body, content_type = self.get_multipart_form()
+
+        headers = {'Content-Type': content_type,
+                   'Content-Length': str(len(body))}
+
+        url = "%s/%s" % (self.container_name, self.object_name)
+        exc = self.assertRaises(
+            exceptions.Unauthorized,
+            self.object_client.post,
+            url, body, headers=headers)
+        self.assertIn('FormPost: Invalid Signature', str(exc))
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 91df292..06e63a4 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -14,7 +14,10 @@
 #    under the License.
 
 import hashlib
+import random
+import re
 from six import moves
+import time
 
 from tempest.api.object_storage import base
 from tempest.common import custom_matchers
@@ -35,6 +38,29 @@
         cls.delete_containers(cls.containers)
         super(ObjectTest, cls).tearDownClass()
 
+    def _create_object(self, metadata=None):
+        # setup object
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        self.object_client.create_object(self.container_name,
+                                         object_name, data, metadata=metadata)
+
+        return object_name, data
+
+    def _upload_segments(self):
+        # create object
+        object_name = data_utils.rand_name(name='LObject')
+        data = data_utils.arbitrary_string()
+        segments = 10
+        data_segments = [data + str(i) for i in moves.xrange(segments)]
+        # uploading segments
+        for i in moves.xrange(segments):
+            resp, _ = self.object_client.create_object_segments(
+                self.container_name, object_name, i, data_segments[i])
+            self.assertEqual(resp['status'], '201')
+
+        return object_name, data_segments
+
     @test.attr(type='smoke')
     def test_create_object(self):
         # create object
@@ -64,42 +90,227 @@
         self.assertHeaders(resp, 'Object', 'DELETE')
 
     @test.attr(type='smoke')
-    def test_object_metadata(self):
-        # add metadata to storage object, test if metadata is retrievable
+    def test_update_object_metadata(self):
+        # update object metadata
+        object_name, data = self._create_object()
 
-        # create Object
-        object_name = data_utils.rand_name(name='TestObject')
-        data = data_utils.arbitrary_string()
-        resp, _ = self.object_client.create_object(self.container_name,
-                                                   object_name, data)
-        # set object metadata
-        meta_key = data_utils.rand_name(name='test-')
-        meta_value = data_utils.rand_name(name='MetaValue-')
-        orig_metadata = {meta_key: meta_value}
+        metadata = {'X-Object-Meta-test-meta': 'Meta'}
         resp, _ = self.object_client.update_object_metadata(
-            self.container_name, object_name, orig_metadata)
+            self.container_name,
+            object_name,
+            metadata,
+            metadata_prefix='')
         self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
         self.assertHeaders(resp, 'Object', 'POST')
 
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+
+    def test_update_object_metadata_with_remove_metadata(self):
+        # update object metadata with remove metadata
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        update_metadata = {'X-Remove-Object-Meta-test-meta1': 'Meta1'}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertNotIn('x-object-meta-test-meta1', resp)
+
+    @test.attr(type='smoke')
+    def test_update_object_metadata_with_create_and_remove_metadata(self):
+        # creation and deletion of metadata with one request
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        update_metadata = {'X-Object-Meta-test-meta2': 'Meta2',
+                           'X-Remove-Object-Meta-test-meta1': 'Meta1'}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertNotIn('x-object-meta-test-meta1', resp)
+        self.assertIn('x-object-meta-test-meta2', resp)
+        self.assertEqual(resp['x-object-meta-test-meta2'], 'Meta2')
+
+    @test.attr(type='smoke')
+    def test_update_object_metadata_with_x_object_manifest(self):
+        # update object metadata with x_object_manifest
+
+        # uploading segments
+        object_name, data_segments = self._upload_segments()
+        # creating a manifest file
+        data_empty = ''
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data_empty,
+                                         metadata=None)
+        object_prefix = '%s/%s' % (self.container_name, object_name)
+        update_metadata = {'X-Object-Manifest': object_prefix}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn('x-object-manifest', resp)
+        self.assertNotEqual(len(resp['x-object-manifest']), 0)
+
+    def test_update_object_metadata_with_x_object_metakey(self):
+        # update object metadata with a blenk value of metadata
+        object_name, data = self._create_object()
+
+        update_metadata = {'X-Object-Meta-test-meta': ''}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], '')
+
+    @test.attr(type='smoke')
+    def test_update_object_metadata_with_x_remove_object_metakey(self):
+        # update object metadata with a blank value of remove metadata
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        update_metadata = {'X-Remove-Object-Meta-test-meta': ''}
+        resp, _ = self.object_client.update_object_metadata(
+            self.container_name,
+            object_name,
+            update_metadata,
+            metadata_prefix='')
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'POST')
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertNotIn('x-object-meta-test-meta', resp)
+
+    @test.attr(type='smoke')
+    def test_list_object_metadata(self):
         # get object metadata
-        resp, resp_metadata = self.object_client.list_object_metadata(
-            self.container_name, object_name)
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        metadata = {'X-Object-Meta-test-meta': 'Meta'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=metadata)
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
         self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
         self.assertHeaders(resp, 'Object', 'HEAD')
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
 
-        actual_meta_key = 'x-object-meta-' + meta_key
-        self.assertIn(actual_meta_key, resp)
-        self.assertEqual(resp[actual_meta_key], meta_value)
+    @test.attr(type='smoke')
+    def test_list_no_object_metadata(self):
+        # get empty list of object metadata
+        object_name, data = self._create_object()
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'HEAD')
+        self.assertNotIn('x-object-meta-', str(resp))
+
+    @test.attr(type='smoke')
+    def test_list_object_metadata_with_x_object_manifest(self):
+        # get object metadata with x_object_manifest
+
+        # uploading segments
+        object_name, data_segments = self._upload_segments()
+        # creating a manifest file
+        object_prefix = '%s/%s' % (self.container_name, object_name)
+        metadata = {'X-Object-Manifest': object_prefix}
+        data_empty = ''
+        resp, _ = self.object_client.create_object(
+            self.container_name,
+            object_name,
+            data_empty,
+            metadata=metadata)
+
+        resp, _ = self.object_client.list_object_metadata(
+            self.container_name,
+            object_name)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+
+        # Check only the existence of common headers with custom matcher
+        self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
+                        'Object', 'HEAD'))
+        self.assertIn('x-object-manifest', resp)
+
+        # Etag value of a large object is enclosed in double-quotations.
+        # This is a special case, therefore the formats of response headers
+        # are checked without a custom matcher.
+        self.assertTrue(resp['etag'].startswith('\"'))
+        self.assertTrue(resp['etag'].endswith('\"'))
+        self.assertTrue(resp['etag'].strip('\"').isalnum())
+        self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
+        self.assertNotEqual(len(resp['content-type']), 0)
+        self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+                                 resp['x-trans-id']))
+        self.assertNotEqual(len(resp['date']), 0)
+        self.assertEqual(resp['accept-ranges'], 'bytes')
+        self.assertEqual(resp['x-object-manifest'],
+                         '%s/%s' % (self.container_name, object_name))
 
     @test.attr(type='smoke')
     def test_get_object(self):
         # retrieve object's data (in response body)
 
         # create object
-        object_name = data_utils.rand_name(name='TestObject')
-        data = data_utils.arbitrary_string()
-        resp, _ = self.object_client.create_object(self.container_name,
-                                                   object_name, data)
+        object_name, data = self._create_object()
         # get object
         resp, body = self.object_client.get_object(self.container_name,
                                                    object_name)
@@ -109,6 +320,183 @@
         self.assertEqual(body, data)
 
     @test.attr(type='smoke')
+    def test_get_object_with_metadata(self):
+        # get object with metadata
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        metadata = {'X-Object-Meta-test-meta': 'Meta'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=metadata)
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=None)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_range(self):
+        # get object with range
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(100)
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=None)
+        rand_num = random.randint(3, len(data) - 1)
+        metadata = {'Range': 'bytes=%s-%s' % (rand_num - 3, rand_num - 1)}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data[rand_num - 3: rand_num])
+
+    @test.attr(type='smoke')
+    def test_get_object_with_x_object_manifest(self):
+        # get object with x_object_manifest
+
+        # uploading segments
+        object_name, data_segments = self._upload_segments()
+        # creating a manifest file
+        object_prefix = '%s/%s' % (self.container_name, object_name)
+        metadata = {'X-Object-Manifest': object_prefix}
+        data_empty = ''
+        resp, body = self.object_client.create_object(
+            self.container_name,
+            object_name,
+            data_empty,
+            metadata=metadata)
+
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=None)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+
+        # Check only the existence of common headers with custom matcher
+        self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
+                        'Object', 'GET'))
+        self.assertIn('x-object-manifest', resp)
+
+        # Etag value of a large object is enclosed in double-quotations.
+        # This is a special case, therefore the formats of response headers
+        # are checked without a custom matcher.
+        self.assertTrue(resp['etag'].startswith('\"'))
+        self.assertTrue(resp['etag'].endswith('\"'))
+        self.assertTrue(resp['etag'].strip('\"').isalnum())
+        self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
+        self.assertNotEqual(len(resp['content-type']), 0)
+        self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+                                 resp['x-trans-id']))
+        self.assertNotEqual(len(resp['date']), 0)
+        self.assertEqual(resp['accept-ranges'], 'bytes')
+        self.assertEqual(resp['x-object-manifest'],
+                         '%s/%s' % (self.container_name, object_name))
+
+        self.assertEqual(''.join(data_segments), body)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_match(self):
+        # get object with if_match
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(10)
+        create_md5 = hashlib.md5(data).hexdigest()
+        create_metadata = {'Etag': create_md5}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        list_metadata = {'If-Match': create_md5}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_modified_since(self):
+        # get object with if_modified_since
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        time_now = time.time()
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=None)
+
+        http_date = time.ctime(time_now - 86400)
+        list_metadata = {'If-Modified-Since': http_date}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    def test_get_object_with_if_none_match(self):
+        # get object with if_none_match
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(10)
+        create_md5 = hashlib.md5(data).hexdigest()
+        create_metadata = {'Etag': create_md5}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        list_data = data_utils.arbitrary_string(15)
+        list_md5 = hashlib.md5(list_data).hexdigest()
+        list_metadata = {'If-None-Match': list_md5}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_unmodified_since(self):
+        # get object with if_unmodified_since
+        object_name, data = self._create_object()
+
+        time_now = time.time()
+        http_date = time.ctime(time_now + 86400)
+        list_metadata = {'If-Unmodified-Since': http_date}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_x_newest(self):
+        # get object with x_newest
+        object_name, data = self._create_object()
+
+        list_metadata = {'X-Newest': 'true'}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
     def test_copy_object_in_same_container(self):
         # create source object
         src_object_name = data_utils.rand_name(name='SrcObject')
@@ -286,10 +674,7 @@
         # Make a conditional request for an object using the If-None-Match
         # header, it should get downloaded only if the local file is different,
         # otherwise the response code should be 304 Not Modified
-        object_name = data_utils.rand_name(name='TestObject')
-        data = data_utils.arbitrary_string()
-        self.object_client.create_object(self.container_name,
-                                         object_name, data)
+        object_name, data = self._create_object()
         # local copy is identical, no download
         md5 = hashlib.md5(data).hexdigest()
         headers = {'If-None-Match': md5}
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index cf24f66..7d26433 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -27,7 +27,11 @@
 
 class ObjectTempUrlNegativeTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectTempUrlNegativeTest, cls).setUpClass()
 
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 9bf9568..446f4ab 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -10,6 +10,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import os.path
+
 from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import config
@@ -28,50 +30,53 @@
     @classmethod
     def setUpClass(cls):
         super(BaseOrchestrationTest, cls).setUpClass()
-        os = clients.OrchestrationManager()
+        cls.os = clients.Manager()
         if not CONF.service_available.heat:
             raise cls.skipException("Heat support is required")
         cls.build_timeout = CONF.orchestration.build_timeout
         cls.build_interval = CONF.orchestration.build_interval
 
-        cls.os = os
-        cls.orchestration_client = os.orchestration_client
-        cls.client = os.orchestration_client
-        cls.servers_client = os.servers_client
-        cls.keypairs_client = os.keypairs_client
-        cls.network_client = os.network_client
+        cls.orchestration_client = cls.os.orchestration_client
+        cls.client = cls.orchestration_client
+        cls.servers_client = cls.os.servers_client
+        cls.keypairs_client = cls.os.keypairs_client
+        cls.network_client = cls.os.network_client
+        cls.volumes_client = cls.os.volumes_client
+        cls.images_v2_client = cls.os.image_client_v2
         cls.stacks = []
         cls.keypairs = []
+        cls.images = []
 
     @classmethod
     def _get_default_network(cls):
-        resp, networks = cls.network_client.list_networks()
+        __, networks = cls.network_client.list_networks()
         for net in networks['networks']:
             if net['name'] == CONF.compute.fixed_network_name:
                 return net
 
     @classmethod
     def _get_identity_admin_client(cls):
-        """
-        Returns an instance of the Identity Admin API client
-        """
-        os = clients.AdminManager(interface=cls._interface)
-        admin_client = os.identity_client
+        """Returns an instance of the Identity Admin API client."""
+        manager = clients.AdminManager(interface=cls._interface)
+        admin_client = manager.identity_client
         return admin_client
 
     @classmethod
-    def create_stack(cls, stack_name, template_data, parameters={}):
+    def create_stack(cls, stack_name, template_data, parameters={},
+                     environment=None, files=None):
         resp, body = cls.client.create_stack(
             stack_name,
             template=template_data,
-            parameters=parameters)
+            parameters=parameters,
+            environment=environment,
+            files=files)
         stack_id = resp['location'].split('/')[-1]
         stack_identifier = '%s/%s' % (stack_name, stack_id)
         cls.stacks.append(stack_identifier)
         return stack_identifier
 
     @classmethod
-    def clear_stacks(cls):
+    def _clear_stacks(cls):
         for stack_identifier in cls.stacks:
             try:
                 cls.client.delete_stack(stack_identifier)
@@ -88,12 +93,12 @@
     @classmethod
     def _create_keypair(cls, name_start='keypair-heat-'):
         kp_name = data_utils.rand_name(name_start)
-        resp, body = cls.keypairs_client.create_keypair(kp_name)
+        __, body = cls.keypairs_client.create_keypair(kp_name)
         cls.keypairs.append(kp_name)
         return body
 
     @classmethod
-    def clear_keypairs(cls):
+    def _clear_keypairs(cls):
         for kp_name in cls.keypairs:
             try:
                 cls.keypairs_client.delete_keypair(kp_name)
@@ -101,13 +106,64 @@
                 pass
 
     @classmethod
+    def _create_image(cls, name_start='image-heat-', container_format='bare',
+                      disk_format='iso'):
+        image_name = data_utils.rand_name(name_start)
+        __, body = cls.images_v2_client.create_image(image_name,
+                                                     container_format,
+                                                     disk_format)
+        image_id = body['id']
+        cls.images.append(image_id)
+        return body
+
+    @classmethod
+    def _clear_images(cls):
+        for image_id in cls.images:
+            try:
+                cls.images_v2_client.delete_image(image_id)
+            except exceptions.NotFound:
+                pass
+
+    @classmethod
+    def load_template(cls, name, ext='yaml'):
+        loc = ["stacks", "templates", "%s.%s" % (name, ext)]
+        fullpath = os.path.join(os.path.dirname(__file__), *loc)
+
+        with open(fullpath, "r") as f:
+            content = f.read()
+            return content
+
+    @classmethod
     def tearDownClass(cls):
-        cls.clear_stacks()
-        cls.clear_keypairs()
+        cls._clear_stacks()
+        cls._clear_keypairs()
+        cls._clear_images()
         super(BaseOrchestrationTest, cls).tearDownClass()
 
     @staticmethod
     def stack_output(stack, output_key):
-        """Return a stack output value for a give key."""
+        """Return a stack output value for a given key."""
         return next((o['output_value'] for o in stack['outputs']
                     if o['output_key'] == output_key), None)
+
+    def assert_fields_in_dict(self, obj, *fields):
+        for field in fields:
+            self.assertIn(field, obj)
+
+    def list_resources(self, stack_identifier):
+        """Get a dict mapping of resource names to types."""
+        resp, resources = self.client.list_resources(stack_identifier)
+        self.assertEqual('200', resp['status'])
+        self.assertIsInstance(resources, list)
+        for res in resources:
+            self.assert_fields_in_dict(res, 'logical_resource_id',
+                                       'resource_type', 'resource_status',
+                                       'updated_time')
+
+        return dict((r['resource_name'], r['resource_type'])
+                    for r in resources)
+
+    def get_stack_output(self, stack_identifier, output_key):
+        resp, body = self.client.get_stack(stack_identifier)
+        self.assertEqual('200', resp['status'])
+        return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
new file mode 100644
index 0000000..ffff580
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
@@ -0,0 +1,28 @@
+heat_template_version: 2013-05-23
+
+resources:
+    volume:
+        type: OS::Cinder::Volume
+        properties:
+            size: 1
+            description: a descriptive description
+            name: volume_name
+
+outputs:
+  status:
+    description: status
+    value: { get_attr: ['volume', 'status'] }
+
+  size:
+    description: size
+    value: { get_attr: ['volume', 'size'] }
+
+  display_description:
+    description: display_description
+    value: { get_attr: ['volume', 'display_description'] }
+
+  display_name:
+    value: { get_attr: ['volume', 'display_name'] }
+
+  volume_id:
+    value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
new file mode 100644
index 0000000..b660c19
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
@@ -0,0 +1,29 @@
+heat_template_version: 2013-05-23
+
+resources:
+    volume:
+        deletion_policy: 'Retain'
+        type: OS::Cinder::Volume
+        properties:
+            size: 1
+            description: a descriptive description
+            name: volume_name
+
+outputs:
+  status:
+    description: status
+    value: { get_attr: ['volume', 'status'] }
+
+  size:
+    description: size
+    value: { get_attr: ['volume', 'size'] }
+
+  display_description:
+    description: display_description
+    value: { get_attr: ['volume', 'display_description'] }
+
+  display_name:
+    value: { get_attr: ['volume', 'display_name'] }
+
+  volume_id:
+    value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
new file mode 100644
index 0000000..878ff68
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
@@ -0,0 +1,70 @@
+heat_template_version: '2013-05-23'
+description: |
+  Template which creates single EC2 instance
+parameters:
+  KeyName:
+    type: string
+  InstanceType:
+    type: string
+  ImageId:
+    type: string
+  SubNetCidr:
+    type: string
+  ExternalNetworkId:
+    type: string
+  DNSServers:
+    type: comma_delimited_list
+  timeout:
+    type: number
+resources:
+  Network:
+    type: OS::Neutron::Net
+    properties:
+      name: NewNetwork
+  Subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network_id: {Ref: Network}
+      name: NewSubnet
+      ip_version: 4
+      cidr: { get_param: SubNetCidr }
+      dns_nameservers: { get_param: DNSServers }
+  Router:
+    type: OS::Neutron::Router
+    properties:
+      name: NewRouter
+      admin_state_up: true
+      external_gateway_info:
+        network: {get_param: ExternalNetworkId}
+  RouterInterface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router_id: {get_resource: Router}
+      subnet_id: {get_resource: Subnet}
+  Server:
+    type: OS::Nova::Server
+    metadata:
+      Name: SmokeServerNeutron
+    properties:
+      image: {get_param: ImageId}
+      flavor: {get_param: InstanceType}
+      key_name: {get_param: KeyName}
+      networks:
+      - network: {get_resource: Network}
+      user_data:
+        str_replace:
+          template: |
+            #!/bin/bash -v
+
+            while ! /opt/aws/bin/cfn-signal -e 0 -r "SmokeServerNeutron created" \
+            'wait_handle' ; do sleep 3; done
+          params:
+            wait_handle: {get_resource: WaitHandleNeutron}
+  WaitHandleNeutron:
+    type: AWS::CloudFormation::WaitConditionHandle
+  WaitCondition:
+    type: AWS::CloudFormation::WaitCondition
+    depends_on: Server
+    properties:
+      Handle: {get_resource: WaitHandleNeutron}
+      Timeout: {get_param: timeout}
diff --git a/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
new file mode 100644
index 0000000..8690941
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
@@ -0,0 +1,34 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+  Template which creates some simple resources
+Parameters:
+  trigger:
+    Type: String
+    Default: not_yet
+  image:
+    Type: String
+Resources:
+  fluffy:
+    Type: AWS::AutoScaling::LaunchConfiguration
+    Metadata:
+      kittens:
+      - Tom
+      - Stinky
+    Properties:
+      ImageId: {Ref: image}
+      InstanceType: not_used
+      UserData:
+        Fn::Replace:
+        - variable_a: {Ref: trigger}
+          variable_b: bee
+        - |
+          A == variable_a
+          B == variable_b
+Outputs:
+  fluffy:
+    Description: "fluffies irc nick"
+    Value:
+      Fn::Replace:
+      - nick: {Ref: fluffy}
+      - |
+        #nick
diff --git a/tempest/api/orchestration/stacks/templates/nova_keypair.json b/tempest/api/orchestration/stacks/templates/nova_keypair.json
new file mode 100644
index 0000000..63d3817
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/nova_keypair.json
@@ -0,0 +1,48 @@
+{
+  "AWSTemplateFormatVersion" : "2010-09-09",
+  "Description" : "Template which create two key pairs.",
+  "Parameters" : {
+    "KeyPairName1": {
+      "Type": "String",
+      "Default": "testkey1"
+      },
+    "KeyPairName2": {
+      "Type": "String",
+      "Default": "testkey2"
+      }
+   },
+   "Resources" : {
+     "KeyPairSavePrivate": {
+       "Type": "OS::Nova::KeyPair",
+       "Properties": {
+         "name" : { "Ref" : "KeyPairName1" },
+         "save_private_key": true
+       }
+     },
+     "KeyPairDontSavePrivate": {
+       "Type": "OS::Nova::KeyPair",
+       "Properties": {
+         "name" : { "Ref" : "KeyPairName2" },
+         "save_private_key": false
+      }
+     }
+  },
+ "Outputs": {
+   "KeyPair_PublicKey": {
+     "Description": "Public Key of generated keypair.",
+     "Value": { "Fn::GetAtt" : ["KeyPairSavePrivate", "public_key"] }
+    },
+   "KeyPair_PrivateKey": {
+     "Description": "Private Key of generated keypair.",
+     "Value": { "Fn::GetAtt" : ["KeyPairSavePrivate", "private_key"] }
+   },
+   "KeyPairDontSavePrivate_PublicKey": {
+     "Description": "Public Key of generated keypair.",
+     "Value": { "Fn::GetAtt" : ["KeyPairDontSavePrivate", "public_key"] }
+   },
+  "KeyPairDontSavePrivate_PrivateKey": {
+     "Description": "Private Key of generated keypair.",
+     "Value": { "Fn::GetAtt" : ["KeyPairDontSavePrivate", "private_key"] }
+   }
+  }
+}
diff --git a/tempest/api/orchestration/stacks/templates/nova_keypair.yaml b/tempest/api/orchestration/stacks/templates/nova_keypair.yaml
new file mode 100644
index 0000000..81ad99c
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/nova_keypair.yaml
@@ -0,0 +1,43 @@
+heat_template_version: 2013-05-23
+
+description: >
+  Template which creates two key pairs.
+
+parameters:
+  KeyPairName1:
+    type: string
+    default: testkey
+
+  KeyPairName2:
+    type: string
+    default: testkey2
+
+resources:
+  KeyPairSavePrivate:
+    type: OS::Nova::KeyPair
+    properties:
+      name: { get_param: KeyPairName1 }
+      save_private_key: true
+
+  KeyPairDontSavePrivate:
+    type: OS::Nova::KeyPair
+    properties:
+      name: { get_param: KeyPairName2 }
+      save_private_key: false
+
+outputs:
+  KeyPair_PublicKey:
+    description: Public Key of generated keypair
+    value: { get_attr: [KeyPairSavePrivate, public_key] }
+
+  KeyPair_PrivateKey:
+    description: Private Key of generated keypair
+    value: { get_attr: [KeyPairSavePrivate, private_key] }
+
+  KeyPairDontSavePrivate_PublicKey:
+    description: Public Key of generated keypair
+    value: { get_attr: [KeyPairDontSavePrivate, public_key] }
+
+  KeyPairDontSavePrivate_PrivateKey:
+    description: Private Key of generated keypair
+    value: { get_attr: [KeyPairDontSavePrivate, private_key] }
diff --git a/tempest/api/orchestration/stacks/templates/random_string.yaml b/tempest/api/orchestration/stacks/templates/random_string.yaml
new file mode 100644
index 0000000..dfd2342
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/random_string.yaml
@@ -0,0 +1,18 @@
+heat_template_version: 2013-05-23
+
+parameters:
+  random_length:
+    type: number
+    default: 10
+
+resources:
+  random:
+    type: OS::Heat::RandomString
+    properties:
+        length: {get_param: random_length}
+
+outputs:
+  random_length:
+    value: {get_param: random_length}
+  random_value:
+    value: {get_attr: [random, value]}
diff --git a/tempest/api/orchestration/stacks/templates/swift_basic.yaml b/tempest/api/orchestration/stacks/templates/swift_basic.yaml
new file mode 100644
index 0000000..713f8bc
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/swift_basic.yaml
@@ -0,0 +1,23 @@
+heat_template_version: 2013-05-23
+description: Template which creates a Swift container resource
+
+resources:
+  SwiftContainerWebsite:
+    deletion_policy: "Delete"
+    type: OS::Swift::Container
+    properties:
+      X-Container-Read: ".r:*"
+      X-Container-Meta:
+        web-index: "index.html"
+        web-error: "error.html"
+
+  SwiftContainer:
+    type: OS::Swift::Container
+
+outputs:
+  WebsiteURL:
+    description: "URL for website hosted on S3"
+    value: { get_attr: [SwiftContainer, WebsiteURL] }
+  DomainName:
+    description: "Domain of Swift host"
+    value: { get_attr: [SwiftContainer, DomainName] }
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
new file mode 100644
index 0000000..3911e72
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -0,0 +1,93 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class StackEnvironmentTest(base.BaseOrchestrationTest):
+
+    @test.attr(type='gate')
+    def test_environment_parameter(self):
+        """Test passing a stack parameter via the environment."""
+        stack_name = data_utils.rand_name('heat')
+        template = self.load_template('random_string')
+        environment = {'parameters': {'random_length': 20}}
+
+        stack_identifier = self.create_stack(stack_name, template,
+                                             environment=environment)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        random_len = self.get_stack_output(stack_identifier, 'random_length')
+        self.assertEqual(20, random_len)
+
+        random_value = self.get_stack_output(stack_identifier, 'random_value')
+        self.assertEqual(20, len(random_value))
+
+    @test.attr(type='gate')
+    def test_environment_provider_resource(self):
+        """Test passing resource_registry defining a provider resource."""
+        stack_name = data_utils.rand_name('heat')
+        template = '''
+heat_template_version: 2013-05-23
+resources:
+  random:
+    type: My:Random::String
+outputs:
+    random_value:
+        value: {get_attr: [random, random_value]}
+'''
+        environment = {'resource_registry':
+                       {'My:Random::String': 'my_random.yaml'}}
+        files = {'my_random.yaml': self.load_template('random_string')}
+
+        stack_identifier = self.create_stack(stack_name, template,
+                                             environment=environment,
+                                             files=files)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        # random_string.yaml specifies a length of 10
+        random_value = self.get_stack_output(stack_identifier, 'random_value')
+        self.assertEqual(10, len(random_value))
+
+    @test.attr(type='gate')
+    def test_files_provider_resource(self):
+        """Test untyped defining of a provider resource via "files"."""
+        # It's also possible to specify the filename directly in the template.
+        # without adding the type alias to resource_registry
+        stack_name = data_utils.rand_name('heat')
+        template = '''
+heat_template_version: 2013-05-23
+resources:
+  random:
+    type: my_random.yaml
+outputs:
+    random_value:
+        value: {get_attr: [random, random_value]}
+'''
+        files = {'my_random.yaml': self.load_template('random_string')}
+
+        stack_identifier = self.create_stack(stack_name, template,
+                                             files=files)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        # random_string.yaml specifies a length of 10
+        random_value = self.get_stack_output(stack_identifier, 'random_value')
+        self.assertEqual(10, len(random_value))
diff --git a/tempest/api/orchestration/stacks/test_limits.py b/tempest/api/orchestration/stacks/test_limits.py
index 893dcc4..8ee62ab 100644
--- a/tempest/api/orchestration/stacks/test_limits.py
+++ b/tempest/api/orchestration/stacks/test_limits.py
@@ -16,7 +16,7 @@
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 CONF = config.CONF
 
@@ -25,7 +25,7 @@
 
 class TestServerStackLimits(base.BaseOrchestrationTest):
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_exceed_max_template_size_fails(self):
         stack_name = data_utils.rand_name('heat')
         fill = 'A' * CONF.orchestration.max_template_size
@@ -37,3 +37,17 @@
         ex = self.assertRaises(exceptions.BadRequest, self.create_stack,
                                stack_name, template)
         self.assertIn('Template exceeds maximum allowed size', str(ex))
+
+    @test.attr(type='gate')
+    def test_exceed_max_resources_per_stack(self):
+        stack_name = data_utils.rand_name('heat')
+        # Create a big template, one resource more than the limit
+        template = 'heat_template_version: \'2013-05-23\'\nresources:\n'
+        rsrc_snippet = '  random%s:\n    type: \'OS::Heat::RandomString\'\n'
+        num_resources = CONF.orchestration.max_resources_per_stack + 1
+        for i in range(num_resources):
+            template += rsrc_snippet % i
+
+        ex = self.assertRaises(exceptions.BadRequest, self.create_stack,
+                               stack_name, template)
+        self.assertIn('Maximum resources per stack exceeded', str(ex))
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index dee26b1..e92b945 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -12,6 +12,7 @@
 
 
 import logging
+import netaddr
 
 from tempest.api.orchestration import base
 from tempest import clients
@@ -26,78 +27,6 @@
 
 
 class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
-    _interface = 'json'
-
-    template = """
-heat_template_version: '2013-05-23'
-description: |
-  Template which creates single EC2 instance
-parameters:
-  KeyName:
-    type: string
-  InstanceType:
-    type: string
-  ImageId:
-    type: string
-  ExternalRouterId:
-    type: string
-  ExternalNetworkId:
-    type: string
-resources:
-  Network:
-    type: OS::Neutron::Net
-    properties:
-      name: NewNetwork
-  Subnet:
-    type: OS::Neutron::Subnet
-    properties:
-      network_id: {Ref: Network}
-      name: NewSubnet
-      ip_version: 4
-      cidr: 10.0.3.0/24
-      dns_nameservers: ["8.8.8.8"]
-      allocation_pools:
-      - {end: 10.0.3.150, start: 10.0.3.20}
-  Router:
-    type: OS::Neutron::Router
-    properties:
-      name: NewRouter
-      admin_state_up: false
-      external_gateway_info:
-        network: {get_param: ExternalNetworkId}
-        enable_snat: false
-  RouterInterface:
-    type: OS::Neutron::RouterInterface
-    properties:
-      router_id: {get_param: ExternalRouterId}
-      subnet_id: {get_resource: Subnet}
-  Server:
-    type: AWS::EC2::Instance
-    metadata:
-      Name: SmokeServerNeutron
-    properties:
-      ImageId: {get_param: ImageId}
-      InstanceType: {get_param: InstanceType}
-      KeyName: {get_param: KeyName}
-      SubnetId: {get_resource: Subnet}
-      UserData:
-        str_replace:
-          template: |
-            #!/bin/bash -v
-
-            /opt/aws/bin/cfn-signal -e 0 -r "SmokeServerNeutron created" \
-            'wait_handle'
-          params:
-            wait_handle: {get_resource: WaitHandleNeutron}
-  WaitHandleNeutron:
-    type: AWS::CloudFormation::WaitConditionHandle
-  WaitCondition:
-    type: AWS::CloudFormation::WaitCondition
-    depends_on: Server
-    properties:
-      Handle: {get_resource: WaitHandleNeutron}
-      Timeout: '600'
-"""
 
     @classmethod
     @test.safe_setup
@@ -105,27 +34,32 @@
         super(NeutronResourcesTestJSON, cls).setUpClass()
         if not CONF.orchestration.image_ref:
             raise cls.skipException("No image available to test")
-        cls.client = cls.orchestration_client
         os = clients.Manager()
         if not CONF.service_available.neutron:
             raise cls.skipException("Neutron support is required")
         cls.network_client = os.network_client
         cls.stack_name = data_utils.rand_name('heat')
+        template = cls.load_template('neutron_basic')
         cls.keypair_name = (CONF.orchestration.keypair_name or
                             cls._create_keypair()['name'])
-        cls.external_router_id = cls._get_external_router_id()
         cls.external_network_id = CONF.network.public_network_id
 
+        tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+        mask_bits = CONF.network.tenant_network_mask_bits
+        cls.subnet_cidr = tenant_cidr.subnet(mask_bits).next()
+
         # create the stack
         cls.stack_identifier = cls.create_stack(
             cls.stack_name,
-            cls.template,
+            template,
             parameters={
                 'KeyName': cls.keypair_name,
                 'InstanceType': CONF.orchestration.instance_type,
                 'ImageId': CONF.orchestration.image_ref,
-                'ExternalRouterId': cls.external_router_id,
-                'ExternalNetworkId': cls.external_network_id
+                'ExternalNetworkId': cls.external_network_id,
+                'timeout': CONF.orchestration.build_timeout,
+                'DNSServers': CONF.network.dns_servers,
+                'SubNetCidr': str(cls.subnet_cidr)
             })
         cls.stack_id = cls.stack_identifier.split('/')[1]
         try:
@@ -148,21 +82,13 @@
         for resource in resources:
             cls.test_resources[resource['logical_resource_id']] = resource
 
-    @classmethod
-    def _get_external_router_id(cls):
-        resp, body = cls.network_client.list_ports()
-        ports = body['ports']
-        router_ports = filter(lambda port: port['device_owner'] ==
-                              'network:router_interface', ports)
-        return router_ports[0]['device_id']
-
     @test.attr(type='slow')
     def test_created_resources(self):
         """Verifies created neutron resources."""
         resources = [('Network', 'OS::Neutron::Net'),
                      ('Subnet', 'OS::Neutron::Subnet'),
                      ('RouterInterface', 'OS::Neutron::RouterInterface'),
-                     ('Server', 'AWS::EC2::Instance')]
+                     ('Server', 'OS::Nova::Server')]
         for resource_name, resource_type in resources:
             resource = self.test_resources.get(resource_name, None)
             self.assertIsInstance(resource, dict)
@@ -192,11 +118,10 @@
         self.assertEqual(subnet_id, subnet['id'])
         self.assertEqual(network_id, subnet['network_id'])
         self.assertEqual('NewSubnet', subnet['name'])
-        self.assertEqual('8.8.8.8', subnet['dns_nameservers'][0])
-        self.assertEqual('10.0.3.20', subnet['allocation_pools'][0]['start'])
-        self.assertEqual('10.0.3.150', subnet['allocation_pools'][0]['end'])
+        self.assertEqual(sorted(CONF.network.dns_servers),
+                         sorted(subnet['dns_nameservers']))
         self.assertEqual(4, subnet['ip_version'])
-        self.assertEqual('10.0.3.0/24', subnet['cidr'])
+        self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
 
     @test.attr(type='slow')
     def test_created_router(self):
@@ -208,20 +133,19 @@
         self.assertEqual('NewRouter', router['name'])
         self.assertEqual(self.external_network_id,
                          router['external_gateway_info']['network_id'])
-        self.assertEqual(False,
-                         router['external_gateway_info']['enable_snat'])
-        self.assertEqual(False, router['admin_state_up'])
+        self.assertEqual(True, router['admin_state_up'])
 
     @test.attr(type='slow')
     def test_created_router_interface(self):
         """Verifies created router interface."""
+        router_id = self.test_resources.get('Router')['physical_resource_id']
         network_id = self.test_resources.get('Network')['physical_resource_id']
         subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
         resp, body = self.network_client.list_ports()
         self.assertEqual('200', resp['status'])
         ports = body['ports']
         router_ports = filter(lambda port: port['device_id'] ==
-                              self.external_router_id, ports)
+                              router_id, ports)
         created_network_ports = filter(lambda port: port['network_id'] ==
                                        network_id, router_ports)
         self.assertEqual(1, len(created_network_ports))
@@ -231,7 +155,8 @@
                                   subnet_id, fixed_ips)
         self.assertEqual(1, len(subnet_fixed_ips))
         router_interface_ip = subnet_fixed_ips[0]['ip_address']
-        self.assertEqual('10.0.3.1', router_interface_ip)
+        self.assertEqual(str(self.subnet_cidr.iter_hosts().next()),
+                         router_interface_ip)
 
     @test.attr(type='slow')
     def test_created_server(self):
@@ -243,8 +168,4 @@
         self.assertEqual('ACTIVE', server['status'])
         network = server['addresses']['NewNetwork'][0]
         self.assertEqual(4, network['version'])
-        ip_addr_prefix = network['addr'][:7]
-        ip_addr_suffix = int(network['addr'].split('.')[3])
-        self.assertEqual('10.0.3.', ip_addr_prefix)
-        self.assertTrue(ip_addr_suffix >= 20)
-        self.assertTrue(ip_addr_suffix <= 150)
+        self.assertIn(netaddr.IPAddress(network['addr']), self.subnet_cidr)
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 11d01f7..585c90b 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -14,82 +14,52 @@
 
 from tempest.api.orchestration import base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import config
+from tempest import test
 
+CONF = config.CONF
 
 LOG = logging.getLogger(__name__)
 
 
 class StacksTestJSON(base.BaseOrchestrationTest):
-    _interface = 'json'
-
-    template = """
-HeatTemplateFormatVersion: '2012-12-12'
-Description: |
-  Template which creates some simple resources
-Parameters:
-  trigger:
-    Type: String
-    Default: not_yet
-Resources:
-  fluffy:
-    Type: AWS::AutoScaling::LaunchConfiguration
-    Metadata:
-      kittens:
-      - Tom
-      - Stinky
-    Properties:
-      ImageId: not_used
-      InstanceType: not_used
-      UserData:
-        Fn::Replace:
-        - variable_a: {Ref: trigger}
-          variable_b: bee
-        - |
-          A == variable_a
-          B == variable_b
-Outputs:
-  fluffy:
-    Description: "fluffies irc nick"
-    Value:
-      Fn::Replace:
-      - nick: {Ref: fluffy}
-      - |
-        #nick
-"""
 
     @classmethod
     def setUpClass(cls):
         super(StacksTestJSON, cls).setUpClass()
-        cls.client = cls.orchestration_client
         cls.stack_name = data_utils.rand_name('heat')
-
+        template = cls.load_template('non_empty_stack')
+        image_id = (CONF.orchestration.image_ref or
+                    cls._create_image()['id'])
         # create the stack
         cls.stack_identifier = cls.create_stack(
             cls.stack_name,
-            cls.template,
+            template,
             parameters={
-                'trigger': 'start'
+                'trigger': 'start',
+                'image': image_id
             })
         cls.stack_id = cls.stack_identifier.split('/')[1]
         cls.resource_name = 'fluffy'
         cls.resource_type = 'AWS::AutoScaling::LaunchConfiguration'
         cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
 
-    def assert_fields_in_dict(self, obj, *fields):
-        for field in fields:
-            self.assertIn(field, obj)
-
-    @attr(type='gate')
-    def test_stack_list(self):
-        """Created stack should be on the list of existing stacks."""
-        resp, stacks = self.client.list_stacks()
+    def _list_stacks(self, expected_num=None, **filter_kwargs):
+        resp, stacks = self.client.list_stacks(params=filter_kwargs)
         self.assertEqual('200', resp['status'])
         self.assertIsInstance(stacks, list)
+        if expected_num is not None:
+            self.assertEqual(expected_num, len(stacks))
+        return stacks
+
+    @test.attr(type='gate')
+    def test_stack_list(self):
+        """Created stack should be in the list of existing stacks."""
+        stacks = self._list_stacks()
         stacks_names = map(lambda stack: stack['stack_name'], stacks)
         self.assertIn(self.stack_name, stacks_names)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_stack_show(self):
         """Getting details about created stack should be possible."""
         resp, stack = self.client.get_stack(self.stack_name)
@@ -109,9 +79,9 @@
         self.assertEqual(self.stack_id, stack['id'])
         self.assertEqual('fluffy', stack['outputs'][0]['output_key'])
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_suspend_resume_stack(self):
-        """suspend and resume a stack."""
+        """Suspend and resume a stack."""
         resp, suspend_stack = self.client.suspend_stack(self.stack_identifier)
         self.assertEqual('200', resp['status'])
         self.client.wait_for_stack_status(self.stack_identifier,
@@ -121,26 +91,14 @@
         self.client.wait_for_stack_status(self.stack_identifier,
                                           'RESUME_COMPLETE')
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_list_resources(self):
         """Getting list of created resources for the stack should be possible.
         """
-        resp, resources = self.client.list_resources(self.stack_identifier)
-        self.assertEqual('200', resp['status'])
-        self.assertIsInstance(resources, list)
-        for res in resources:
-            self.assert_fields_in_dict(res, 'logical_resource_id',
-                                       'resource_type', 'resource_status',
-                                       'updated_time')
+        resources = self.list_resources(self.stack_identifier)
+        self.assertEqual({self.resource_name: self.resource_type}, resources)
 
-        resources_names = map(lambda resource: resource['logical_resource_id'],
-                              resources)
-        self.assertIn(self.resource_name, resources_names)
-        resources_types = map(lambda resource: resource['resource_type'],
-                              resources)
-        self.assertIn(self.resource_type, resources_types)
-
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_show_resource(self):
         """Getting details about created resource should be possible."""
         resp, resource = self.client.get_resource(self.stack_identifier,
@@ -154,9 +112,9 @@
         self.assertEqual(self.resource_name, resource['logical_resource_id'])
         self.assertEqual(self.resource_type, resource['resource_type'])
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_resource_metadata(self):
-        """Getting metadata for created resource should be possible."""
+        """Getting metadata for created resources should be possible."""
         resp, metadata = self.client.show_resource_metadata(
             self.stack_identifier,
             self.resource_name)
@@ -164,7 +122,7 @@
         self.assertIsInstance(metadata, dict)
         self.assertEqual(['Tom', 'Stinky'], metadata.get('kittens', None))
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_list_events(self):
         """Getting list of created events for the stack should be possible."""
         resp, events = self.client.list_events(self.stack_identifier)
@@ -180,9 +138,9 @@
         self.assertIn('CREATE_IN_PROGRESS', resource_statuses)
         self.assertIn('CREATE_COMPLETE', resource_statuses)
 
-    @attr(type='gate')
+    @test.attr(type='gate')
     def test_show_event(self):
-        """Getting details about existing event should be possible."""
+        """Getting details about an event should be possible."""
         resp, events = self.client.list_resource_events(self.stack_identifier,
                                                         self.resource_name)
         self.assertNotEqual([], events)
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 9d3bf13..cb70d07 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -15,70 +15,25 @@
 
 from tempest.api.orchestration import base
 from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
 
 
 LOG = logging.getLogger(__name__)
 
 
 class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
-    _interface = 'json'
-    template = """
-heat_template_version: 2013-05-23
-
-description: >
-  Template which creates two key pairs.
-
-parameters:
-  KeyPairName1:
-    type: string
-    default: testkey
-
-  KeyPairName2:
-    type: string
-    default: testkey2
-
-resources:
-  KeyPairSavePrivate:
-    type: OS::Nova::KeyPair
-    properties:
-      name: { get_param: KeyPairName1 }
-      save_private_key: true
-
-  KeyPairDontSavePrivate:
-    type: OS::Nova::KeyPair
-    properties:
-      name: { get_param: KeyPairName2 }
-      save_private_key: false
-
-outputs:
-  KeyPair_PublicKey:
-    description: Public Key of generated keypair
-    value: { get_attr: [KeyPairSavePrivate, public_key] }
-
-  KeyPair_PrivateKey:
-    description: Private Key of generated keypair
-    value: { get_attr: [KeyPairSavePrivate, private_key] }
-
-  KeyPairDontSavePrivate_PublicKey:
-    description: Public Key of generated keypair
-    value: { get_attr: [KeyPairDontSavePrivate, public_key] }
-
-  KeyPairDontSavePrivate_PrivateKey:
-    description: Private Key of generated keypair
-    value: { get_attr: [KeyPairDontSavePrivate, private_key] }
-"""
+    _tpl_type = 'yaml'
 
     @classmethod
     def setUpClass(cls):
         super(NovaKeyPairResourcesYAMLTest, cls).setUpClass()
-        cls.client = cls.orchestration_client
         cls.stack_name = data_utils.rand_name('heat')
+        template = cls.load_template('nova_keypair', ext=cls._tpl_type)
 
         # create the stack, avoid any duplicated key.
         cls.stack_identifier = cls.create_stack(
             cls.stack_name,
-            cls.template,
+            template,
             parameters={
                 'KeyPairName1': cls.stack_name + '_1',
                 'KeyPairName2': cls.stack_name + '_2'
@@ -91,7 +46,7 @@
         for resource in resources:
             cls.test_resources[resource['logical_resource_id']] = resource
 
-    @attr(type='slow')
+    @test.attr(type='slow')
     def test_created_resources(self):
         """Verifies created keypair resource."""
         resources = [('KeyPairSavePrivate', 'OS::Nova::KeyPair'),
@@ -104,7 +59,7 @@
             self.assertEqual(resource_type, resource['resource_type'])
             self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
 
-    @attr(type='slow')
+    @test.attr(type='slow')
     def test_stack_keypairs_output(self):
         resp, stack = self.client.get_stack(self.stack_name)
         self.assertEqual('200', resp['status'])
@@ -115,13 +70,13 @@
             output_map[outputs['output_key']] = outputs['output_value']
         #Test that first key generated public and private keys
         self.assertTrue('KeyPair_PublicKey' in output_map)
-        self.assertTrue("Generated by" in output_map['KeyPair_PublicKey'])
+        self.assertTrue("Generated" in output_map['KeyPair_PublicKey'])
         self.assertTrue('KeyPair_PrivateKey' in output_map)
         self.assertTrue('-----BEGIN' in output_map['KeyPair_PrivateKey'])
         #Test that second key generated public key, and private key is not
         #in the output due to save_private_key = false
         self.assertTrue('KeyPairDontSavePrivate_PublicKey' in output_map)
-        self.assertTrue('Generated by' in
+        self.assertTrue('Generated' in
                         output_map['KeyPairDontSavePrivate_PublicKey'])
         self.assertTrue(u'KeyPairDontSavePrivate_PrivateKey' in output_map)
         private_key = output_map['KeyPairDontSavePrivate_PrivateKey']
@@ -129,53 +84,4 @@
 
 
 class NovaKeyPairResourcesAWSTest(NovaKeyPairResourcesYAMLTest):
-    template = """
-{
-  "AWSTemplateFormatVersion" : "2010-09-09",
-  "Description" : "Template which create two key pairs.",
-  "Parameters" : {
-    "KeyPairName1": {
-      "Type": "String",
-      "Default": "testkey1"
-      },
-    "KeyPairName2": {
-      "Type": "String",
-      "Default": "testkey2"
-      }
-   },
-   "Resources" : {
-     "KeyPairSavePrivate": {
-       "Type": "OS::Nova::KeyPair",
-       "Properties": {
-         "name" : { "Ref" : "KeyPairName1" },
-         "save_private_key": true
-       }
-     },
-     "KeyPairDontSavePrivate": {
-       "Type": "OS::Nova::KeyPair",
-       "Properties": {
-         "name" : { "Ref" : "KeyPairName2" },
-         "save_private_key": false
-      }
-     }
-  },
- "Outputs": {
-   "KeyPair_PublicKey": {
-     "Description": "Public Key of generated keypair.",
-     "Value": { "Fn::GetAtt" : ["KeyPairSavePrivate", "public_key"] }
-    },
-   "KeyPair_PrivateKey": {
-     "Description": "Private Key of generated keypair.",
-     "Value": { "Fn::GetAtt" : ["KeyPairSavePrivate", "private_key"] }
-   },
-   "KeyPairDontSavePrivate_PublicKey": {
-     "Description": "Public Key of generated keypair.",
-     "Value": { "Fn::GetAtt" : ["KeyPairDontSavePrivate", "public_key"] }
-   },
-  "KeyPairDontSavePrivate_PrivateKey": {
-     "Description": "Private Key of generated keypair.",
-     "Value": { "Fn::GetAtt" : ["KeyPairDontSavePrivate", "private_key"] }
-   }
-  }
-}
-"""
+    _tpl_type = 'json'
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
deleted file mode 100644
index a6f74b6..0000000
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ /dev/null
@@ -1,212 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import json
-import testtools
-
-from tempest.api.orchestration import base
-from tempest.common.utils import data_utils
-from tempest.common.utils.linux import remote_client
-from tempest import config
-from tempest import exceptions
-from tempest.openstack.common import log as logging
-from tempest import test
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-class ServerCfnInitTestJSON(base.BaseOrchestrationTest):
-    _interface = 'json'
-    existing_keypair = CONF.orchestration.keypair_name is not None
-
-    template = """
-HeatTemplateFormatVersion: '2012-12-12'
-Description: |
-  Template which uses a wait condition to confirm that a minimal
-  cfn-init and cfn-signal has worked
-Parameters:
-  key_name:
-    Type: String
-  flavor:
-    Type: String
-  image:
-    Type: String
-  network:
-    Type: String
-Resources:
-  CfnUser:
-    Type: AWS::IAM::User
-  SmokeSecurityGroup:
-    Type: AWS::EC2::SecurityGroup
-    Properties:
-      GroupDescription: Enable only ping and SSH access
-      SecurityGroupIngress:
-      - {CidrIp: 0.0.0.0/0, FromPort: '-1', IpProtocol: icmp, ToPort: '-1'}
-      - {CidrIp: 0.0.0.0/0, FromPort: '22', IpProtocol: tcp, ToPort: '22'}
-  SmokeKeys:
-    Type: AWS::IAM::AccessKey
-    Properties:
-      UserName: {Ref: CfnUser}
-  SmokeServer:
-    Type: OS::Nova::Server
-    Metadata:
-      AWS::CloudFormation::Init:
-        config:
-          files:
-            /tmp/smoke-status:
-              content: smoke test complete
-            /etc/cfn/cfn-credentials:
-              content:
-                Fn::Replace:
-                - SmokeKeys: {Ref: SmokeKeys}
-                  SecretAccessKey:
-                    'Fn::GetAtt': [SmokeKeys, SecretAccessKey]
-                - |
-                  AWSAccessKeyId=SmokeKeys
-                  AWSSecretKey=SecretAccessKey
-              mode: '000400'
-              owner: root
-              group: root
-    Properties:
-      image: {Ref: image}
-      flavor: {Ref: flavor}
-      key_name: {Ref: key_name}
-      security_groups:
-      - {Ref: SmokeSecurityGroup}
-      networks:
-      - uuid: {Ref: network}
-      user_data:
-        Fn::Replace:
-        - WaitHandle: {Ref: WaitHandle}
-        - |
-          #!/bin/bash -v
-          /opt/aws/bin/cfn-init
-          /opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" \
-              "WaitHandle"
-  WaitHandle:
-    Type: AWS::CloudFormation::WaitConditionHandle
-  WaitCondition:
-    Type: AWS::CloudFormation::WaitCondition
-    DependsOn: SmokeServer
-    Properties:
-      Handle: {Ref: WaitHandle}
-      Timeout: '600'
-Outputs:
-  WaitConditionStatus:
-    Description: Contents of /tmp/smoke-status on SmokeServer
-    Value:
-      Fn::GetAtt: [WaitCondition, Data]
-  SmokeServerIp:
-    Description: IP address of server
-    Value:
-      Fn::GetAtt: [SmokeServer, first_address]
-"""
-
-    @classmethod
-    @test.safe_setup
-    def setUpClass(cls):
-        super(ServerCfnInitTestJSON, cls).setUpClass()
-        if not CONF.orchestration.image_ref:
-            raise cls.skipException("No image available to test")
-        cls.client = cls.orchestration_client
-
-        stack_name = data_utils.rand_name('heat')
-        if CONF.orchestration.keypair_name:
-            keypair_name = CONF.orchestration.keypair_name
-        else:
-            cls.keypair = cls._create_keypair()
-            keypair_name = cls.keypair['name']
-
-        # create the stack
-        cls.stack_identifier = cls.create_stack(
-            stack_name,
-            cls.template,
-            parameters={
-                'key_name': keypair_name,
-                'flavor': CONF.orchestration.instance_type,
-                'image': CONF.orchestration.image_ref,
-                'network': cls._get_default_network()['id']
-            })
-
-    @test.attr(type='slow')
-    @testtools.skipIf(existing_keypair, 'Server ssh tests are disabled.')
-    def test_can_log_into_created_server(self):
-
-        sid = self.stack_identifier
-        rid = 'SmokeServer'
-
-        # wait for create to complete.
-        self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
-
-        resp, body = self.client.get_resource(sid, rid)
-        self.assertEqual('CREATE_COMPLETE', body['resource_status'])
-
-        # fetch the IP address from servers client, since we can't get it
-        # from the stack until stack create is complete
-        resp, server = self.servers_client.get_server(
-            body['physical_resource_id'])
-
-        # Check that the user can authenticate with the generated password
-        linux_client = remote_client.RemoteClient(server, 'ec2-user',
-                                                  pkey=self.keypair[
-                                                      'private_key'])
-        linux_client.validate_authentication()
-
-    @test.attr(type='slow')
-    def test_all_resources_created(self):
-        sid = self.stack_identifier
-        self.client.wait_for_resource_status(
-            sid, 'WaitHandle', 'CREATE_COMPLETE')
-        self.client.wait_for_resource_status(
-            sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
-        self.client.wait_for_resource_status(
-            sid, 'SmokeKeys', 'CREATE_COMPLETE')
-        self.client.wait_for_resource_status(
-            sid, 'CfnUser', 'CREATE_COMPLETE')
-        self.client.wait_for_resource_status(
-            sid, 'SmokeServer', 'CREATE_COMPLETE')
-        try:
-            self.client.wait_for_resource_status(
-                sid, 'WaitCondition', 'CREATE_COMPLETE')
-        except exceptions.TimeoutException as e:
-            # attempt to log the server console to help with debugging
-            # the cause of the server not signalling the waitcondition
-            # to heat.
-            resp, body = self.client.get_resource(sid, 'SmokeServer')
-            server_id = body['physical_resource_id']
-            LOG.debug('Console output for %s', server_id)
-            resp, output = self.servers_client.get_console_output(
-                server_id, None)
-            LOG.debug(output)
-            raise e
-
-        # wait for create to complete.
-        self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
-
-        # fetch the stack
-        resp, body = self.client.get_stack(sid)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
-        # fetch the stack
-        resp, body = self.client.get_stack(sid)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
-        # This is an assert of great significance, as it means the following
-        # has happened:
-        # - cfn-init read the provided metadata and wrote out a file
-        # - a user was created and credentials written to the server
-        # - a cfn-signal was built which was signed with provided credentials
-        # - the wait condition was fulfilled and the stack has changed state
-        wait_status = json.loads(
-            self.stack_output(body, 'WaitConditionStatus'))
-        self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index fc2dda8..5b45d82 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -13,29 +13,26 @@
 from tempest.api.orchestration import base
 from tempest.common.utils import data_utils
 from tempest.openstack.common import log as logging
-from tempest.test import attr
+from tempest import test
 
 
 LOG = logging.getLogger(__name__)
 
 
 class StacksTestJSON(base.BaseOrchestrationTest):
-    _interface = 'json'
-
     empty_template = "HeatTemplateFormatVersion: '2012-12-12'\n"
 
     @classmethod
     def setUpClass(cls):
         super(StacksTestJSON, cls).setUpClass()
-        cls.client = cls.orchestration_client
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_stack_list_responds(self):
         resp, stacks = self.client.list_stacks()
         self.assertEqual('200', resp['status'])
         self.assertIsInstance(stacks, list)
 
-    @attr(type='smoke')
+    @test.attr(type='smoke')
     def test_stack_crud_no_resources(self):
         stack_name = data_utils.rand_name('heat')
 
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index fcf357a..6d53fb2 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -25,40 +25,12 @@
 
 
 class SwiftResourcesTestJSON(base.BaseOrchestrationTest):
-    _interface = 'json'
-    template = """
-heat_template_version: 2013-05-23
-description: Template which creates a Swift container resource
-
-resources:
-  SwiftContainerWebsite:
-    deletion_policy: "Delete"
-    type: OS::Swift::Container
-    properties:
-      X-Container-Read: ".r:*"
-      X-Container-Meta:
-        web-index: "index.html"
-        web-error: "error.html"
-
-  SwiftContainer:
-    type: OS::Swift::Container
-
-outputs:
-  WebsiteURL:
-    description: "URL for website hosted on S3"
-    value: { get_attr: [SwiftContainer, WebsiteURL] }
-  DomainName:
-    description: "Domain of Swift host"
-    value: { get_attr: [SwiftContainer, DomainName] }
-
-"""
-
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
         super(SwiftResourcesTestJSON, cls).setUpClass()
-        cls.client = cls.orchestration_client
         cls.stack_name = data_utils.rand_name('heat')
+        template = cls.load_template('swift_basic')
         os = clients.Manager()
         if not CONF.service_available.swift:
             raise cls.skipException("Swift support is required")
@@ -67,7 +39,7 @@
         # create the stack
         cls.stack_identifier = cls.create_stack(
             cls.stack_name,
-            cls.template)
+            template)
         cls.stack_id = cls.stack_identifier.split('/')[1]
         cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
         cls.test_resources = {}
@@ -76,7 +48,7 @@
             cls.test_resources[resource['logical_resource_id']] = resource
 
     def test_created_resources(self):
-        """Created stack should be on the list of existing stacks."""
+        """Created stack should be in the list of existing stacks."""
         resources = [('SwiftContainer', 'OS::Swift::Container'),
                      ('SwiftContainerWebsite', 'OS::Swift::Container')]
         for resource_name, resource_type in resources:
diff --git a/tempest/api/orchestration/stacks/test_templates.py b/tempest/api/orchestration/stacks/test_templates.py
index 2dc29fc..74950a9 100644
--- a/tempest/api/orchestration/stacks/test_templates.py
+++ b/tempest/api/orchestration/stacks/test_templates.py
@@ -16,8 +16,6 @@
 
 
 class TemplateYAMLTestJSON(base.BaseOrchestrationTest):
-    _interface = 'json'
-
     template = """
 HeatTemplateFormatVersion: '2012-12-12'
 Description: |
@@ -27,13 +25,10 @@
     Type: AWS::IAM::User
 """
 
-    invalid_template_url = 'http://www.example.com/template.yaml'
-
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
         super(TemplateYAMLTestJSON, cls).setUpClass()
-        cls.client = cls.orchestration_client
         cls.stack_name = data_utils.rand_name('heat')
         cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
         cls.client.wait_for_stack_status(cls.stack_identifier,
@@ -67,5 +62,3 @@
   }
 }
 """
-
-    invalid_template_url = 'http://www.example.com/template.template'
diff --git a/tempest/api/orchestration/stacks/test_templates_negative.py b/tempest/api/orchestration/stacks/test_templates_negative.py
index c55f6ee..b325104 100644
--- a/tempest/api/orchestration/stacks/test_templates_negative.py
+++ b/tempest/api/orchestration/stacks/test_templates_negative.py
@@ -18,8 +18,6 @@
 
 
 class TemplateYAMLNegativeTestJSON(base.BaseOrchestrationTest):
-    _interface = 'json'
-
     template = """
 HeatTemplateFormatVersion: '2012-12-12'
 Description: |
@@ -34,7 +32,6 @@
     @classmethod
     def setUpClass(cls):
         super(TemplateYAMLNegativeTestJSON, cls).setUpClass()
-        cls.client = cls.orchestration_client
         cls.parameters = {}
 
     @test.attr(type=['gate', 'negative'])
diff --git a/tempest/api/orchestration/stacks/test_update.py b/tempest/api/orchestration/stacks/test_update.py
new file mode 100644
index 0000000..a9a43b6
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_update.py
@@ -0,0 +1,84 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+LOG = logging.getLogger(__name__)
+
+
+class UpdateStackTestJSON(base.BaseOrchestrationTest):
+    _interface = 'json'
+
+    template = '''
+heat_template_version: 2013-05-23
+resources:
+  random1:
+    type: OS::Heat::RandomString
+'''
+    update_template = '''
+heat_template_version: 2013-05-23
+resources:
+  random1:
+    type: OS::Heat::RandomString
+  random2:
+    type: OS::Heat::RandomString
+'''
+
+    def update_stack(self, stack_identifier, template):
+        stack_name = stack_identifier.split('/')[0]
+        resp = self.client.update_stack(
+            stack_identifier=stack_identifier,
+            name=stack_name,
+            template=template)
+        self.assertEqual('202', resp[0]['status'])
+        self.client.wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
+
+    @test.attr(type='gate')
+    def test_stack_update_nochange(self):
+        stack_name = data_utils.rand_name('heat')
+        stack_identifier = self.create_stack(stack_name, self.template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+        expected_resources = {'random1': 'OS::Heat::RandomString'}
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+
+        # Update with no changes, resources should be unchanged
+        self.update_stack(stack_identifier, self.template)
+        self.assertEqual(expected_resources,
+                         self.list_resources(stack_identifier))
+
+    @test.attr(type='gate')
+    @test.skip_because(bug='1308682')
+    def test_stack_update_add_remove(self):
+        stack_name = data_utils.rand_name('heat')
+        stack_identifier = self.create_stack(stack_name, self.template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+        initial_resources = {'random1': 'OS::Heat::RandomString'}
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
+
+        # Add one resource via a stack update
+        self.update_stack(stack_identifier, self.update_template)
+        updated_resources = {'random1': 'OS::Heat::RandomString',
+                             'random2': 'OS::Heat::RandomString'}
+        self.assertEqual(updated_resources,
+                         self.list_resources(stack_identifier))
+
+        # Then remove it by updating with the original template
+        self.update_stack(stack_identifier, self.template)
+        self.assertEqual(initial_resources,
+                         self.list_resources(stack_identifier))
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
new file mode 100644
index 0000000..5ac2a8d
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -0,0 +1,106 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CinderResourcesTest(base.BaseOrchestrationTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(CinderResourcesTest, cls).setUpClass()
+        if not CONF.service_available.cinder:
+            raise cls.skipException('Cinder support is required')
+
+    def _cinder_verify(self, volume_id):
+        self.assertIsNotNone(volume_id)
+        resp, volume = self.volumes_client.get_volume(volume_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual('available', volume.get('status'))
+        self.assertEqual(1, volume.get('size'))
+        self.assertEqual('a descriptive description',
+                         volume.get('display_description'))
+        self.assertEqual('volume_name',
+                         volume.get('display_name'))
+
+    def _outputs_verify(self, stack_identifier):
+        self.assertEqual('available',
+                         self.get_stack_output(stack_identifier, 'status'))
+        self.assertEqual('1',
+                         self.get_stack_output(stack_identifier, 'size'))
+        self.assertEqual('a descriptive description',
+                         self.get_stack_output(stack_identifier,
+                                               'display_description'))
+        self.assertEqual('volume_name',
+                         self.get_stack_output(stack_identifier,
+                                               'display_name'))
+
+    @test.attr(type='gate')
+    def test_cinder_volume_create_delete(self):
+        """Create and delete a volume via OS::Cinder::Volume."""
+        stack_name = data_utils.rand_name('heat')
+        template = self.load_template('cinder_basic')
+        stack_identifier = self.create_stack(stack_name, template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        # Verify with cinder that the volume exists, with matching details
+        volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+        self._cinder_verify(volume_id)
+
+        # Verify the stack outputs are as expected
+        self._outputs_verify(stack_identifier)
+
+        # Delete the stack and ensure the volume is gone
+        self.client.delete_stack(stack_identifier)
+        self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+        self.assertRaises(exceptions.NotFound,
+                          self.volumes_client.get_volume,
+                          volume_id)
+
+    def _cleanup_volume(self, volume_id):
+        """Cleanup the volume direct with cinder."""
+        resp = self.volumes_client.delete_volume(volume_id)
+        self.assertEqual(202, resp[0].status)
+        self.volumes_client.wait_for_resource_deletion(volume_id)
+
+    @test.attr(type='gate')
+    def test_cinder_volume_create_delete_retain(self):
+        """Ensure the 'Retain' deletion policy is respected."""
+        stack_name = data_utils.rand_name('heat')
+        template = self.load_template('cinder_basic_delete_retain')
+        stack_identifier = self.create_stack(stack_name, template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        # Verify with cinder that the volume exists, with matching details
+        volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+        self.addCleanup(self._cleanup_volume, volume_id)
+        self._cinder_verify(volume_id)
+
+        # Verify the stack outputs are as expected
+        self._outputs_verify(stack_identifier)
+
+        # Delete the stack and ensure the volume is *not* gone
+        self.client.delete_stack(stack_identifier)
+        self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+        self._cinder_verify(volume_id)
+
+        # Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/queuing/base.py b/tempest/api/queuing/base.py
index 6c22719..5649619 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/queuing/base.py
@@ -50,6 +50,42 @@
 
     @classmethod
     def delete_queue(cls, queue_name):
-        """Wrapper utility that returns a test queue."""
+        """Wrapper utility that deletes a test queue."""
         resp, body = cls.client.delete_queue(queue_name)
         return resp, body
+
+    @classmethod
+    def check_queue_exists(cls, queue_name):
+        """Wrapper utility that checks the existence of a test queue."""
+        resp, body = cls.client.get_queue(queue_name)
+        return resp, body
+
+    @classmethod
+    def check_queue_exists_head(cls, queue_name):
+        """Wrapper utility checks the head of a queue via http HEAD."""
+        resp, body = cls.client.head_queue(queue_name)
+        return resp, body
+
+    @classmethod
+    def list_queues(cls):
+        """Wrapper utility that lists queues."""
+        resp, body = cls.client.list_queues()
+        return resp, body
+
+    @classmethod
+    def get_queue_stats(cls, queue_name):
+        """Wrapper utility that returns the queue stats."""
+        resp, body = cls.client.get_queue_stats(queue_name)
+        return resp, body
+
+    @classmethod
+    def get_queue_metadata(cls, queue_name):
+        """Wrapper utility that gets a queue metadata."""
+        resp, body = cls.client.get_queue_metadata(queue_name)
+        return resp, body
+
+    @classmethod
+    def set_queue_metadata(cls, queue_name, rbody):
+        """Wrapper utility that sets the metadata of a queue."""
+        resp, body = cls.client.set_queue_metadata(queue_name, rbody)
+        return resp, body
diff --git a/tempest/api/queuing/test_queues.py b/tempest/api/queuing/test_queues.py
index 4d03f7e..e43178a 100644
--- a/tempest/api/queuing/test_queues.py
+++ b/tempest/api/queuing/test_queues.py
@@ -14,6 +14,8 @@
 # limitations under the License.
 
 import logging
+from six import moves
+from testtools import matchers
 
 from tempest.api.queuing import base
 from tempest.common.utils import data_utils
@@ -43,18 +45,86 @@
     @classmethod
     def setUpClass(cls):
         super(TestManageQueue, cls).setUpClass()
-        cls.queue_name = data_utils.rand_name('Queues-Test')
-        # Create Queue
-        cls.client.create_queue(cls.queue_name)
+        cls.queues = list()
+        for _ in moves.xrange(5):
+            queue_name = data_utils.rand_name('Queues-Test')
+            cls.queues.append(queue_name)
+            # Create Queue
+            cls.client.create_queue(queue_name)
 
     @test.attr(type='smoke')
     def test_delete_queue(self):
         # Delete Queue
-        resp, body = self.delete_queue(self.queue_name)
+        queue_name = self.queues.pop()
+        resp, body = self.delete_queue(queue_name)
         self.assertEqual('204', resp['status'])
         self.assertEqual('', body)
 
+    @test.attr(type='smoke')
+    def test_check_queue_existence(self):
+        # Checking Queue Existence
+        for queue_name in self.queues:
+            resp, body = self.check_queue_exists(queue_name)
+            self.assertEqual('204', resp['status'])
+            self.assertEqual('', body)
+
+    @test.attr(type='smoke')
+    def test_check_queue_head(self):
+        # Checking Queue Existence by calling HEAD
+        for queue_name in self.queues:
+            resp, body = self.check_queue_exists_head(queue_name)
+            self.assertEqual('204', resp['status'])
+            self.assertEqual('', body)
+
+    @test.attr(type='smoke')
+    def test_list_queues(self):
+        # Listing queues
+        resp, body = self.list_queues()
+        self.assertEqual(len(body['queues']), len(self.queues))
+        for item in body['queues']:
+            self.assertIn(item['name'], self.queues)
+
+    @test.attr(type='smoke')
+    def test_get_queue_stats(self):
+        # Retrieve random queue
+        queue_name = self.queues[data_utils.rand_int_id(0,
+                                                        len(self.queues) - 1)]
+        # Get Queue Stats for a newly created Queue
+        resp, body = self.get_queue_stats(queue_name)
+        msgs = body['messages']
+        for element in ('free', 'claimed', 'total'):
+            self.assertEqual(0, msgs[element])
+        for element in ('oldest', 'newest'):
+            self.assertNotIn(element, msgs)
+
+    @test.attr(type='smoke')
+    def test_set_and_get_queue_metadata(self):
+        # Retrieve random queue
+        queue_name = self.queues[data_utils.rand_int_id(0,
+                                                        len(self.queues) - 1)]
+        # Check the Queue has no metadata
+        resp, body = self.get_queue_metadata(queue_name)
+        self.assertEqual('200', resp['status'])
+        self.assertThat(body, matchers.HasLength(0))
+        # Create metadata
+        key3 = [0, 1, 2, 3, 4]
+        key2 = data_utils.rand_name('value')
+        req_body1 = dict()
+        req_body1[data_utils.rand_name('key3')] = key3
+        req_body1[data_utils.rand_name('key2')] = key2
+        req_body = dict()
+        req_body[data_utils.rand_name('key1')] = req_body1
+        # Set Queue Metadata
+        resp, body = self.set_queue_metadata(queue_name, req_body)
+        self.assertEqual('204', resp['status'])
+        self.assertEqual('', body)
+        # Get Queue Metadata
+        resp, body = self.get_queue_metadata(queue_name)
+        self.assertEqual('200', resp['status'])
+        self.assertThat(body, matchers.Equals(req_body))
+
     @classmethod
     def tearDownClass(cls):
-        cls.client.delete_queue(cls.queue_name)
+        for queue_name in cls.queues:
+            cls.client.delete_queue(queue_name)
         super(TestManageQueue, cls).tearDownClass()
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index c4614c6..2b422fd 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -10,9 +10,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
+from tempest.openstack.common import timeutils
 import tempest.test
 
 CONF = config.CONF
@@ -29,6 +32,12 @@
         super(BaseTelemetryTest, cls).setUpClass()
         os = cls.get_client_manager()
         cls.telemetry_client = os.telemetry_client
+        cls.servers_client = os.servers_client
+        cls.flavors_client = os.flavors_client
+
+        cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size',
+                                  'disk.ephemeral.size']
+        cls.server_ids = []
         cls.alarm_ids = []
 
     @classmethod
@@ -41,11 +50,46 @@
         return resp, body
 
     @classmethod
-    def tearDownClass(cls):
-        for alarm_id in cls.alarm_ids:
+    def create_server(cls):
+        resp, body = cls.servers_client.create_server(
+            data_utils.rand_name('ceilometer-instance'),
+            CONF.compute.image_ref, CONF.compute.flavor_ref,
+            wait_until='ACTIVE')
+        if resp['status'] == '202':
+            cls.server_ids.append(body['id'])
+        return resp, body
+
+    @staticmethod
+    def cleanup_resources(method, list_of_ids):
+        for resource_id in list_of_ids:
             try:
-                cls.telemetry_client.delete_alarm(alarm_id)
+                method(resource_id)
             except exceptions.NotFound:
                 pass
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
+        cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
         cls.clear_isolated_creds()
         super(BaseTelemetryTest, cls).tearDownClass()
+
+    def await_samples(self, metric, query):
+        """
+        This method is to wait for sample to add it to database.
+        There are long time delays when using Postgresql (or Mysql)
+        database as ceilometer backend
+        """
+        timeout = CONF.compute.build_timeout
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout:
+            resp, body = self.telemetry_client.list_samples(metric, query)
+            self.assertEqual(resp.status, 200)
+            if body:
+                return resp, body
+            time.sleep(CONF.compute.build_interval)
+
+        raise exceptions.TimeoutException(
+            'Sample for metric:%s with query:%s has not been added to the '
+            'database within %d seconds' % (metric, query,
+                                            CONF.compute.build_timeout))
diff --git a/tempest/api/telemetry/test_telemetry_alarming_api.py b/tempest/api/telemetry/test_telemetry_alarming_api.py
index a59d3ae..95758e8 100644
--- a/tempest/api/telemetry/test_telemetry_alarming_api.py
+++ b/tempest/api/telemetry/test_telemetry_alarming_api.py
@@ -11,53 +11,104 @@
 #    under the License.
 
 from tempest.api.telemetry import base
+from tempest.common.utils import data_utils
 from tempest import exceptions
-from tempest.test import attr
+from tempest import test
 
 
 class TelemetryAlarmingAPITestJSON(base.BaseTelemetryTest):
     _interface = 'json'
 
-    @attr(type="gate")
-    def test_alarm_list(self):
-        # Create an alarm to verify in the list of alarms
-        created_alarm_ids = list()
-        fetched_ids = list()
-        rules = {'meter_name': 'cpu_util',
-                 'comparison_operator': 'gt',
-                 'threshold': 80.0,
-                 'period': 70}
-        for i in range(3):
-            resp, body = self.create_alarm(threshold_rule=rules)
-            created_alarm_ids.append(body['alarm_id'])
+    @classmethod
+    def setUpClass(cls):
+        super(TelemetryAlarmingAPITestJSON, cls).setUpClass()
+        cls.rule = {'meter_name': 'cpu_util',
+                    'comparison_operator': 'gt',
+                    'threshold': 80.0,
+                    'period': 70}
+        for i in range(2):
+            cls.create_alarm(threshold_rule=cls.rule)
 
+    @test.attr(type="gate")
+    def test_alarm_list(self):
         # List alarms
         resp, alarm_list = self.telemetry_client.list_alarms()
-        self.assertEqual(int(resp['status']), 200)
+        self.assertEqual(200, resp.status)
 
         # Verify created alarm in the list
         fetched_ids = [a['alarm_id'] for a in alarm_list]
-        missing_alarms = [a for a in created_alarm_ids if a not in fetched_ids]
+        missing_alarms = [a for a in self.alarm_ids if a not in fetched_ids]
         self.assertEqual(0, len(missing_alarms),
                          "Failed to find the following created alarm(s)"
                          " in a fetched list: %s" %
                          ', '.join(str(a) for a in missing_alarms))
 
-    @attr(type="gate")
-    def test_create_alarm(self):
-        rules = {'meter_name': 'cpu_util',
-                 'comparison_operator': 'gt',
-                 'threshold': 80.0,
-                 'period': 70}
-        resp, body = self.create_alarm(threshold_rule=rules)
-        self.alarm_id = body['alarm_id']
-        self.assertEqual(int(resp['status']), 201)
-        self.assertDictContainsSubset(rules, body['threshold_rule'])
-        resp, body = self.telemetry_client.get_alarm(self.alarm_id)
-        self.assertEqual(int(resp['status']), 200)
-        self.assertDictContainsSubset(rules, body['threshold_rule'])
-        resp, _ = self.telemetry_client.delete_alarm(self.alarm_id)
-        self.assertEqual(int(resp['status']), 204)
+    @test.attr(type="gate")
+    def test_create_update_get_delete_alarm(self):
+        # Create an alarm
+        alarm_name = data_utils.rand_name('telemetry_alarm')
+        resp, body = self.telemetry_client.create_alarm(
+            name=alarm_name, type='threshold', threshold_rule=self.rule)
+        self.assertEqual(201, resp.status)
+        self.assertEqual(alarm_name, body['name'])
+        alarm_id = body['alarm_id']
+        self.assertDictContainsSubset(self.rule, body['threshold_rule'])
+        # Update alarm with new rule and new name
+        new_rule = {'meter_name': 'cpu',
+                    'comparison_operator': 'eq',
+                    'threshold': 70.0,
+                    'period': 60}
+        alarm_name = data_utils.rand_name('telemetry-alarm-update')
+        resp, body = self.telemetry_client.update_alarm(
+            alarm_id,
+            threshold_rule=new_rule,
+            name=alarm_name,
+            type='threshold')
+        self.assertEqual(200, resp.status)
+        self.assertEqual(alarm_name, body['name'])
+        self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+        # Get and verify details of an alarm after update
+        resp, body = self.telemetry_client.get_alarm(alarm_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(alarm_name, body['name'])
+        self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+        # Delete alarm and verify if deleted
+        resp, _ = self.telemetry_client.delete_alarm(alarm_id)
+        self.assertEqual(204, resp.status)
         self.assertRaises(exceptions.NotFound,
-                          self.telemetry_client.get_alarm,
-                          self.alarm_id)
+                          self.telemetry_client.get_alarm, alarm_id)
+
+    @test.attr(type="gate")
+    def test_set_get_alarm_state(self):
+        alarm_states = ['ok', 'alarm', 'insufficient data']
+        _, alarm = self.create_alarm(threshold_rule=self.rule)
+        # Set alarm state and verify
+        new_state =\
+            [elem for elem in alarm_states if elem != alarm['state']][0]
+        resp, state = self.telemetry_client.alarm_set_state(alarm['alarm_id'],
+                                                            new_state)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(new_state, state)
+        # Get alarm state and verify
+        resp, state = self.telemetry_client.alarm_get_state(alarm['alarm_id'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(new_state, state)
+
+    @test.attr(type="gate")
+    def test_create_delete_alarm_with_combination_rule(self):
+        rule = {"alarm_ids": self.alarm_ids,
+                "operator": "or"}
+        # Verifies alarm create
+        alarm_name = data_utils.rand_name('combination_alarm')
+        resp, body = self.telemetry_client.create_alarm(name=alarm_name,
+                                                        combination_rule=rule,
+                                                        type='combination')
+        self.assertEqual(201, resp.status)
+        self.assertEqual(alarm_name, body['name'])
+        alarm_id = body['alarm_id']
+        self.assertDictContainsSubset(rule, body['combination_rule'])
+        # Verify alarm delete
+        resp, _ = self.telemetry_client.delete_alarm(alarm_id)
+        self.assertEqual(204, resp.status)
+        self.assertRaises(exceptions.NotFound,
+                          self.telemetry_client.get_alarm, alarm_id)
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
new file mode 100644
index 0000000..148f5a3
--- /dev/null
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -0,0 +1,47 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.telemetry import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class TelemetryNotificationAPITestJSON(base.BaseTelemetryTest):
+    _interface = 'json'
+
+    @classmethod
+    def setUpClass(cls):
+        if CONF.telemetry.too_slow_to_test:
+            raise cls.skipException("Ceilometer feature for fast work mysql "
+                                    "is disabled")
+        super(TelemetryNotificationAPITestJSON, cls).setUpClass()
+
+    @test.attr(type="gate")
+    @testtools.skipIf(not CONF.service_available.nova,
+                      "Nova is not available.")
+    def test_check_nova_notification(self):
+
+        resp, body = self.create_server()
+        self.assertEqual(resp.status, 202)
+
+        query = ('resource', 'eq', body['id'])
+
+        for metric in self.nova_notifications:
+            self.await_samples(metric, query)
+
+
+class TelemetryNotificationAPITestXML(TelemetryNotificationAPITestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 2949d56..ecd8836 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -15,6 +15,7 @@
 #    under the License.
 
 from tempest.api.volume import base
+from tempest.common.utils import data_utils
 from tempest import test
 
 QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes']
@@ -29,8 +30,7 @@
     def setUpClass(cls):
         super(VolumeQuotasAdminTestJSON, cls).setUpClass()
         cls.admin_volume_client = cls.os_adm.volumes_client
-        cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
-            'tenantId')
+        cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
 
     @test.attr(type='gate')
     def test_list_quotas(self):
@@ -100,6 +100,27 @@
         self.assertEqual(quota_usage['gigabytes']['in_use'] + 1,
                          new_quota_usage['gigabytes']['in_use'])
 
+    @test.attr(type='gate')
+    def test_delete_quota(self):
+        # Admin can delete the resource quota set for a tenant
+        tenant_name = data_utils.rand_name('quota_tenant_')
+        identity_client = self.os_adm.identity_client
+        tenant = identity_client.create_tenant(tenant_name)[1]
+        tenant_id = tenant['id']
+        self.addCleanup(identity_client.delete_tenant, tenant_id)
+        _, quota_set_default = self.quotas_client.get_default_quota_set(
+            tenant_id)
+        volume_default = quota_set_default['volumes']
+
+        self.quotas_client.update_quota_set(tenant_id,
+                                            volumes=(int(volume_default) + 5))
+
+        resp, _ = self.quotas_client.delete_quota_set(tenant_id)
+        self.assertEqual(200, resp.status)
+
+        _, quota_set_new = self.quotas_client.get_quota_set(tenant_id)
+        self.assertEqual(volume_default, quota_set_new['volumes'])
+
 
 class VolumeQuotasAdminTestXML(VolumeQuotasAdminTestJSON):
     _interface = "xml"
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
new file mode 100644
index 0000000..ab88b90
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -0,0 +1,83 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.volume import base
+from tempest import exceptions
+from tempest import test
+
+
+class VolumeQuotasNegativeTestJSON(base.BaseVolumeV1AdminTest):
+    _interface = "json"
+    force_tenant_isolation = True
+
+    @classmethod
+    @test.safe_setup
+    def setUpClass(cls):
+        super(VolumeQuotasNegativeTestJSON, cls).setUpClass()
+        demo_user = cls.isolated_creds.get_primary_creds()
+        cls.demo_tenant_id = demo_user.tenant_id
+        cls.shared_quota_set = {'gigabytes': 3, 'volumes': 1, 'snapshots': 1}
+
+        # NOTE(gfidente): no need to restore original quota set
+        # after the tests as they only work with tenant isolation.
+        resp, quota_set = cls.quotas_client.update_quota_set(
+            cls.demo_tenant_id,
+            **cls.shared_quota_set)
+
+        # NOTE(gfidente): no need to delete in tearDown as
+        # they are created using utility wrapper methods.
+        cls.volume = cls.create_volume()
+        cls.snapshot = cls.create_snapshot(cls.volume['id'])
+
+    @test.attr(type='negative')
+    def test_quota_volumes(self):
+        self.assertRaises(exceptions.OverLimit,
+                          self.volumes_client.create_volume,
+                          size=1)
+
+    @test.attr(type='negative')
+    def test_quota_volume_snapshots(self):
+        self.assertRaises(exceptions.OverLimit,
+                          self.snapshots_client.create_snapshot,
+                          self.volume['id'])
+
+    @test.attr(type='negative')
+    def test_quota_volume_gigabytes(self):
+        # NOTE(gfidente): quota set needs to be changed for this test
+        # or we may be limited by the volumes or snaps quota number, not by
+        # actual gigs usage; next line ensures shared set is restored.
+        self.addCleanup(self.quotas_client.update_quota_set,
+                        self.demo_tenant_id,
+                        **self.shared_quota_set)
+
+        new_quota_set = {'gigabytes': 2, 'volumes': 2, 'snapshots': 1}
+        resp, quota_set = self.quotas_client.update_quota_set(
+            self.demo_tenant_id,
+            **new_quota_set)
+        self.assertRaises(exceptions.OverLimit,
+                          self.volumes_client.create_volume,
+                          size=1)
+
+        new_quota_set = {'gigabytes': 2, 'volumes': 1, 'snapshots': 2}
+        resp, quota_set = self.quotas_client.update_quota_set(
+            self.demo_tenant_id,
+            **self.shared_quota_set)
+        self.assertRaises(exceptions.OverLimit,
+                          self.snapshots_client.create_snapshot,
+                          self.volume['id'])
+
+
+class VolumeQuotasNegativeTestXML(VolumeQuotasNegativeTestJSON):
+    _interface = "xml"
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
new file mode 100644
index 0000000..012c231
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -0,0 +1,74 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.volume import base
+from tempest import test
+
+
+class VolumesServicesTestJSON(base.BaseVolumeV1AdminTest):
+    """
+    Tests Volume Services API.
+    volume service list requires admin privileges.
+    """
+    _interface = "json"
+
+    @classmethod
+    def setUpClass(cls):
+        super(VolumesServicesTestJSON, cls).setUpClass()
+        cls.client = cls.os_adm.volume_services_client
+        resp, cls.services = cls.client.list_services()
+        cls.host_name = cls.services[0]['host']
+        cls.binary_name = cls.services[0]['binary']
+
+    @test.attr(type='gate')
+    def test_list_services(self):
+        resp, services = self.client.list_services()
+        self.assertEqual(200, resp.status)
+        self.assertNotEqual(0, len(services))
+
+    @test.attr(type='gate')
+    def test_get_service_by_service_binary_name(self):
+        params = {'binary': self.binary_name}
+        resp, services = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertNotEqual(0, len(services))
+        for service in services:
+            self.assertEqual(self.binary_name, service['binary'])
+
+    @test.attr(type='gate')
+    def test_get_service_by_host_name(self):
+        services_on_host = [service for service in self.services if
+                            service['host'] == self.host_name]
+        params = {'host': self.host_name}
+
+        resp, services = self.client.list_services(params)
+
+        # we could have a periodic job checkin between the 2 service
+        # lookups, so only compare binary lists.
+        s1 = map(lambda x: x['binary'], services)
+        s2 = map(lambda x: x['binary'], services_on_host)
+        # sort the lists before comparing, to take out dependency
+        # on order.
+        self.assertEqual(sorted(s1), sorted(s2))
+
+    @test.attr(type='gate')
+    def test_get_service_by_service_and_host_name(self):
+        params = {'host': self.host_name, 'binary': self.binary_name}
+
+        resp, services = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(1, len(services))
+        self.assertEqual(self.host_name, services[0]['host'])
+        self.assertEqual(self.binary_name, services[0]['binary'])
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index ee1d09a..3b8c214 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -118,14 +118,16 @@
                          'from the created Volume_type')
 
     @test.attr(type='smoke')
-    def test_volume_type_encryption_create_get(self):
-        # Create/get encryption type.
+    def test_volume_type_encryption_create_get_delete(self):
+        # Create/get/delete encryption type.
         provider = "LuksEncryptor"
         control_location = "front-end"
         name = data_utils.rand_name("volume-type-")
         resp, body = self.client.create_volume_type(name)
         self.assertEqual(200, resp.status)
         self.addCleanup(self._delete_volume_type, body['id'])
+
+        # Create encryption type
         resp, encryption_type = self.client.create_encryption_type(
             body['id'], provider=provider,
             control_location=control_location)
@@ -137,6 +139,8 @@
         self.assertEqual(control_location, encryption_type['control_location'],
                          "The created encryption_type control_location is not "
                          "equal to the requested control_location")
+
+        # Get encryption type
         resp, fetched_encryption_type = self.client.get_encryption_type(
             encryption_type['volume_type_id'])
         self.assertEqual(200, resp.status)
@@ -148,3 +152,15 @@
                          fetched_encryption_type['control_location'],
                          'The fetched encryption_type control_location is '
                          'different from the created encryption_type')
+
+        # Delete encryption type
+        resp, _ = self.client.delete_encryption_type(
+            encryption_type['volume_type_id'])
+        self.assertEqual(202, resp.status)
+        resource = {"id": encryption_type['volume_type_id'],
+                    "type": "encryption-type"}
+        self.client.wait_for_resource_deletion(resource)
+        resp, deleted_encryption_type = self.client.get_encryption_type(
+            encryption_type['volume_type_id'])
+        self.assertEqual(200, resp.status)
+        self.assertEmpty(deleted_encryption_type)
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 4496f18..008f739 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -85,24 +85,6 @@
             self.volume['id'])
         self.assertEqual('error', volume_get['status'])
 
-    @test.attr(type='gate')
-    def test_volume_begin_detaching(self):
-        # test volume begin detaching : available -> detaching -> available
-        resp, body = self.client.volume_begin_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp_get, volume_get = self.client.get_volume(self.volume['id'])
-        self.assertEqual('detaching', volume_get['status'])
-
-    @test.attr(type='gate')
-    def test_volume_roll_detaching(self):
-        # test volume roll detaching : detaching -> in-use -> available
-        resp, body = self.client.volume_begin_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp, body = self.client.volume_roll_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp_get, volume_get = self.client.get_volume(self.volume['id'])
-        self.assertEqual('in-use', volume_get['status'])
-
     def test_volume_force_delete_when_volume_is_creating(self):
         # test force delete when status of volume is creating
         self._create_reset_and_force_delete_temp_volume('creating')
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 2c6050c..67d0203 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -107,7 +107,9 @@
         cls.snapshots_client = cls.os.snapshots_client
         cls.volumes_client = cls.os.volumes_client
         cls.backups_client = cls.os.backups_client
+        cls.volume_services_client = cls.os.volume_services_client
         cls.volumes_extension_client = cls.os.volumes_extension_client
+        cls.availability_zone_client = cls.os.volume_availability_zone_client
 
     @classmethod
     def create_volume(cls, size=1, **kwargs):
@@ -135,11 +137,7 @@
                    "in configuration.")
             raise cls.skipException(msg)
         if CONF.compute.allow_tenant_isolation:
-            creds = cls.isolated_creds.get_admin_creds()
-            admin_username, admin_tenant_name, admin_password = creds
-            cls.os_adm = clients.Manager(username=admin_username,
-                                         password=admin_password,
-                                         tenant_name=admin_tenant_name,
+            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
                                          interface=cls._interface)
         else:
             cls.os_adm = clients.AdminManager(interface=cls._interface)
diff --git a/tempest/api/compute/v3/servers/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
similarity index 74%
rename from tempest/api/compute/v3/servers/test_availability_zone.py
rename to tempest/api/volume/test_availability_zone.py
index 5a1e07e..fe8f96e 100644
--- a/tempest/api/compute/v3/servers/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -13,24 +13,29 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.api.compute import base
+from tempest.api.volume import base
 from tempest import test
 
 
-class AZV3Test(base.BaseV3ComputeTest):
+class AvailabilityZoneTestJSON(base.BaseVolumeV1Test):
 
     """
     Tests Availability Zone API List
     """
+    _interface = 'json'
 
     @classmethod
     def setUpClass(cls):
-        super(AZV3Test, cls).setUpClass()
+        super(AvailabilityZoneTestJSON, cls).setUpClass()
         cls.client = cls.availability_zone_client
 
     @test.attr(type='gate')
-    def test_get_availability_zone_list_with_non_admin_user(self):
-        # List of availability zone with non-administrator user
+    def test_get_availability_zone_list(self):
+        # List of availability zone
         resp, availability_zone = self.client.get_availability_zone_list()
         self.assertEqual(200, resp.status)
         self.assertTrue(len(availability_zone) > 0)
+
+
+class AvailabilityZoneTestXML(AvailabilityZoneTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 55a72c1..82d1364 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -32,32 +32,18 @@
 
         # Add another tenant to test volume-transfer
         if CONF.compute.allow_tenant_isolation:
-            creds = cls.isolated_creds.get_alt_creds()
-            username, tenant_name, password = creds
-            cls.os_alt = clients.Manager(username=username,
-                                         password=password,
-                                         tenant_name=tenant_name,
+            cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
                                          interface=cls._interface)
-            cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
-
             # Add admin tenant to cleanup resources
-            adm_creds = cls.isolated_creds.get_admin_creds()
-            admin_username, admin_tenant_name, admin_password = adm_creds
-            cls.os_adm = clients.Manager(username=admin_username,
-                                         password=admin_password,
-                                         tenant_name=admin_tenant_name,
+            cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
                                          interface=cls._interface)
         else:
             cls.os_alt = clients.AltManager()
-            alt_tenant_name = cls.os_alt.credentials['tenant_name']
-            identity_client = cls._get_identity_admin_client()
-            _, tenants = identity_client.list_tenants()
-            cls.alt_tenant_id = [tnt['id'] for tnt in tenants
-                                 if tnt['name'] == alt_tenant_name][0]
             cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
 
         cls.client = cls.volumes_client
         cls.alt_client = cls.os_alt.volumes_client
+        cls.alt_tenant_id = cls.alt_client.tenant_id
         cls.adm_client = cls.os_adm.volumes_client
 
     def _delete_volume(self, volume_id):
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index be5d76b..58da440 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -51,8 +51,7 @@
         v_name = data_utils.rand_name('Volume')
         metadata = {'Type': 'Test'}
         # Create a volume
-        resp, volume = self.client.create_volume(size=1,
-                                                 display_name=v_name,
+        resp, volume = self.client.create_volume(display_name=v_name,
                                                  metadata=metadata,
                                                  **kwargs)
         self.assertEqual(200, resp.status)
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index a8b0a8d..bc5b1dc 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -154,6 +154,7 @@
         self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
 
     @test.attr(type=['negative', 'gate'])
+    @test.services('compute')
     def test_attach_volumes_with_nonexistent_volume_id(self):
         srv_name = data_utils.rand_name('Instance-')
         resp, server = self.servers_client.create_server(srv_name,
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 2ce3a4f..26316d2 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -29,6 +29,9 @@
         super(VolumesSnapshotTest, cls).setUpClass()
         cls.volume_origin = cls.create_volume()
 
+        if not CONF.volume_feature_enabled.snapshot:
+            raise cls.skipException("Cinder volume snapshots are disabled")
+
     @classmethod
     def tearDownClass(cls):
         super(VolumesSnapshotTest, cls).tearDownClass()
@@ -60,6 +63,7 @@
                 self.assertEqual(params[key], snap[key], msg)
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_snapshot_create_with_volume_in_use(self):
         # Create a snapshot when volume status is in-use
         # Create a test instance
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 9e47c03..61aa307 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -14,13 +14,23 @@
 
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class VolumesSnapshotNegativeTest(base.BaseVolumeV1Test):
     _interface = "json"
 
+    @classmethod
+    def setUpClass(cls):
+        super(VolumesSnapshotNegativeTest, cls).setUpClass()
+
+        if not CONF.volume_feature_enabled.snapshot:
+            raise cls.skipException("Cinder volume snapshots are disabled")
+
     @test.attr(type=['negative', 'gate'])
     def test_create_snapshot_with_nonexistent_volume_id(self):
         # Create a snapshot with nonexistent volume id
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 41445d7..e90c957 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -203,7 +203,7 @@
         def _list_details_with_multiple_params(limit=2,
                                                status='available',
                                                sort_dir='asc',
-                                               sort_key='created_at'):
+                                               sort_key='id'):
             params = {'limit': limit,
                       'status': status,
                       'sort_dir': sort_dir,
diff --git a/tempest/api_schema/compute/agents.py b/tempest/api_schema/compute/agents.py
new file mode 100644
index 0000000..b04cf64
--- /dev/null
+++ b/tempest/api_schema/compute/agents.py
@@ -0,0 +1,40 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+list_agents = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'agents': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'agent_id': {'type': ['integer', 'string']},
+                        'hypervisor': {'type': 'string'},
+                        'os': {'type': 'string'},
+                        'architecture': {'type': 'string'},
+                        'version': {'type': 'string'},
+                        'url': {'type': 'string', 'format': 'uri'},
+                        'md5hash': {'type': 'string'}
+                    },
+                    'required': ['agent_id', 'hypervisor', 'os',
+                                 'architecture', 'version', 'url', 'md5hash']
+                }
+            }
+        },
+        'required': ['agents']
+    }
+}
diff --git a/tempest/api_schema/compute/aggregates.py b/tempest/api_schema/compute/aggregates.py
index 49793fe..9393a16 100644
--- a/tempest/api_schema/compute/aggregates.py
+++ b/tempest/api_schema/compute/aggregates.py
@@ -12,6 +12,26 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
+aggregate = {
+    'type': 'object',
+    'properties': {
+        'availability_zone': {'type': ['string', 'null']},
+        'created_at': {'type': 'string'},
+        'deleted': {'type': 'boolean'},
+        'deleted_at': {'type': ['string', 'null']},
+        'hosts': {'type': 'array'},
+        'id': {'type': 'integer'},
+        'metadata': {'type': 'object'},
+        'name': {'type': 'string'},
+        'updated_at': {'type': ['string', 'null']}
+    },
+    'required': ['availability_zone', 'created_at', 'deleted',
+                 'deleted_at', 'hosts', 'id', 'metadata',
+                 'name', 'updated_at']
+}
+
 list_aggregates = {
     'status_code': [200],
     'response_body': {
@@ -19,25 +39,48 @@
         'properties': {
             'aggregates': {
                 'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'availability_zone': {'type': ['string', 'null']},
-                        'created_at': {'type': 'string'},
-                        'deleted': {'type': 'boolean'},
-                        'deleted_at': {'type': ['string', 'null']},
-                        'hosts': {'type': 'array'},
-                        'id': {'type': 'integer'},
-                        'metadata': {'type': 'object'},
-                        'name': {'type': 'string'},
-                        'updated_at': {'type': ['string', 'null']}
-                    },
-                    'required': ['availability_zone', 'created_at', 'deleted',
-                                 'deleted_at', 'hosts', 'id', 'metadata',
-                                 'name', 'updated_at']
-                }
+                'items': aggregate
             }
         },
         'required': ['aggregates']
     }
 }
+
+get_aggregate = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'aggregate': aggregate
+        },
+        'required': ['aggregate']
+    }
+}
+
+aggregate_set_metadata = get_aggregate
+# The 'updated_at' attribute of 'update_aggregate' can't be null.
+update_aggregate = copy.deepcopy(get_aggregate)
+update_aggregate['response_body']['properties']['aggregate']['properties'][
+    'updated_at'] = {
+        'type': 'string'
+    }
+
+common_create_aggregate = {
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'aggregate': aggregate
+        },
+        'required': ['aggregate']
+    }
+}
+# create-aggregate api doesn't have 'hosts' and 'metadata' attributes.
+del common_create_aggregate['response_body']['properties']['aggregate'][
+    'properties']['hosts']
+del common_create_aggregate['response_body']['properties']['aggregate'][
+    'properties']['metadata']
+common_create_aggregate['response_body']['properties']['aggregate'][
+    'required'] = ['availability_zone', 'created_at', 'deleted', 'deleted_at',
+                   'id', 'name', 'updated_at']
+
+aggregate_add_remove_host = get_aggregate
diff --git a/tempest/api_schema/compute/availability_zone.py b/tempest/api_schema/compute/availability_zone.py
new file mode 100644
index 0000000..c1abc64
--- /dev/null
+++ b/tempest/api_schema/compute/availability_zone.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# NOTE: This is the detail information for "get az detail" API.
+# The information is the same between v2 and v3 APIs.
+detail = {
+    'type': 'object',
+    'patternProperties': {
+        # NOTE: Here is for a hostname
+        '^[a-zA-Z0-9-_.]+$': {
+            'type': 'object',
+            'patternProperties': {
+                # NOTE: Here is for a service name
+                '^.*$': {
+                    'type': 'object',
+                    'properties': {
+                        'available': {'type': 'boolean'},
+                        'active': {'type': 'boolean'},
+                        'updated_at': {'type': 'string'}
+                    },
+                    'required': ['available', 'active', 'updated_at']
+                }
+            }
+        }
+    }
+}
diff --git a/tempest/api_schema/compute/certificates.py b/tempest/api_schema/compute/certificates.py
new file mode 100644
index 0000000..caac2ab
--- /dev/null
+++ b/tempest/api_schema/compute/certificates.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+_common_schema = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'certificate': {
+                'type': 'object',
+                'properties': {
+                    'data': {'type': 'string'},
+                    'private_key': {'type': 'string'},
+                },
+                'required': ['data', 'private_key'],
+            }
+        },
+        'required': ['certificate'],
+    }
+}
+
+get_certificate = copy.deepcopy(_common_schema)
+get_certificate['response_body']['properties']['certificate'][
+    'properties']['private_key'].update({'type': 'null'})
diff --git a/tempest/api_schema/compute/flavors.py b/tempest/api_schema/compute/flavors.py
new file mode 100644
index 0000000..aa019e4
--- /dev/null
+++ b/tempest/api_schema/compute/flavors.py
@@ -0,0 +1,77 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api_schema.compute import parameter_types
+
+list_flavors = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'flavors': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'name': {'type': 'string'},
+                        'links': parameter_types.links,
+                        'id': {'type': 'string'}
+                    },
+                    'required': ['name', 'links', 'id']
+                }
+            }
+        },
+        'required': ['flavors']
+    }
+}
+
+common_flavor_info = {
+    'type': 'object',
+    'properties': {
+        'name': {'type': 'string'},
+        'links': parameter_types.links,
+        'ram': {'type': 'integer'},
+        'vcpus': {'type': 'integer'},
+        'swap': {'type': 'integer'},
+        'disk': {'type': 'integer'},
+        'id': {'type': 'string'}
+    },
+    'required': ['name', 'links', 'ram', 'vcpus',
+                 'swap', 'disk', 'id']
+}
+
+common_flavor_list_details = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'flavors': {
+                'type': 'array',
+                'items': common_flavor_info
+            }
+        },
+        'required': ['flavors']
+    }
+}
+
+common_flavor_details = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'flavor': common_flavor_info
+        },
+        'required': ['flavor']
+    }
+}
diff --git a/tempest/api_schema/compute/flavors_access.py b/tempest/api_schema/compute/flavors_access.py
index 152e24c..cd31b0a 100644
--- a/tempest/api_schema/compute/flavors_access.py
+++ b/tempest/api_schema/compute/flavors_access.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-list_flavor_access = {
+add_remove_list_flavor_access = {
     'status_code': [200],
     'response_body': {
         'type': 'object',
diff --git a/tempest/api_schema/compute/flavors_extra_specs.py b/tempest/api_schema/compute/flavors_extra_specs.py
new file mode 100644
index 0000000..4003d36
--- /dev/null
+++ b/tempest/api_schema/compute/flavors_extra_specs.py
@@ -0,0 +1,39 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+flavor_extra_specs = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'extra_specs': {
+                'type': 'object',
+                'patternProperties': {
+                    '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+                }
+            }
+        },
+        'required': ['extra_specs']
+    }
+}
+
+flavor_extra_specs_key = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'patternProperties': {
+            '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+        }
+    }
+}
diff --git a/tempest/api_schema/compute/hosts.py b/tempest/api_schema/compute/hosts.py
index b9a3db9..2596c27 100644
--- a/tempest/api_schema/compute/hosts.py
+++ b/tempest/api_schema/compute/hosts.py
@@ -12,6 +12,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+common_start_up_body = {
+    'type': 'object',
+    'properties': {
+        'host': {'type': 'string'},
+        'power_action': {'enum': ['startup']}
+    },
+    'required': ['host', 'power_action']
+}
+
 list_hosts = {
     'status_code': [200],
     'response_body': {
@@ -33,3 +42,44 @@
         'required': ['hosts']
     }
 }
+
+show_host_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'host': {
+                'type': 'array',
+                'item': {
+                    'type': 'object',
+                    'properties': {
+                        'resource': {
+                            'type': 'object',
+                            'properties': {
+                                'cpu': {'type': 'integer'},
+                                'disk_gb': {'type': 'integer'},
+                                'host': {'type': 'string'},
+                                'memory_mb': {'type': 'integer'},
+                                'project': {'type': 'string'}
+                            },
+                            'required': ['cpu', 'disk_gb', 'host',
+                                         'memory_mb', 'project']
+                        }
+                    },
+                    'required': ['resource']
+                }
+            }
+        },
+        'required': ['host']
+    }
+}
+
+update_host_common = {
+    'type': 'object',
+    'properties': {
+        'host': {'type': 'string'},
+        'maintenance_mode': {'enum': ['on_maintenance', 'off_maintenance']},
+        'status': {'enum': ['enabled', 'disabled']}
+    },
+    'required': ['host', 'maintenance_mode', 'status']
+}
diff --git a/tempest/api_schema/compute/hypervisors.py b/tempest/api_schema/compute/hypervisors.py
new file mode 100644
index 0000000..630901e
--- /dev/null
+++ b/tempest/api_schema/compute/hypervisors.py
@@ -0,0 +1,197 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+hypervisor_statistics = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'hypervisor_statistics': {
+                'type': 'object',
+                'properties': {
+                    'count': {'type': 'integer'},
+                    'current_workload': {'type': 'integer'},
+                    'disk_available_least': {'type': 'integer'},
+                    'free_disk_gb': {'type': 'integer'},
+                    'free_ram_mb': {'type': 'integer'},
+                    'local_gb': {'type': 'integer'},
+                    'local_gb_used': {'type': 'integer'},
+                    'memory_mb': {'type': 'integer'},
+                    'memory_mb_used': {'type': 'integer'},
+                    'running_vms': {'type': 'integer'},
+                    'vcpus': {'type': 'integer'},
+                    'vcpus_used': {'type': 'integer'}
+                },
+                'required': ['count', 'current_workload',
+                             'disk_available_least', 'free_disk_gb',
+                             'free_ram_mb', 'local_gb', 'local_gb_used',
+                             'memory_mb', 'memory_mb_used', 'running_vms',
+                             'vcpus', 'vcpus_used']
+            }
+        },
+        'required': ['hypervisor_statistics']
+    }
+}
+
+common_list_hypervisors_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'hypervisors': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'cpu_info': {'type': 'string'},
+                        'current_workload': {'type': 'integer'},
+                        'disk_available_least': {'type': ['integer', 'null']},
+                        'host_ip': {
+                            'type': 'string',
+                            'format': 'ip-address'
+                        },
+                        'free_disk_gb': {'type': 'integer'},
+                        'free_ram_mb': {'type': 'integer'},
+                        'hypervisor_hostname': {'type': 'string'},
+                        'hypervisor_type': {'type': 'string'},
+                        'hypervisor_version': {'type': 'integer'},
+                        'id': {'type': ['integer', 'string']},
+                        'local_gb': {'type': 'integer'},
+                        'local_gb_used': {'type': 'integer'},
+                        'memory_mb': {'type': 'integer'},
+                        'memory_mb_used': {'type': 'integer'},
+                        'running_vms': {'type': 'integer'},
+                        'service': {
+                            'type': 'object',
+                            'properties': {
+                                'host': {'type': 'string'},
+                                'id': {'type': ['integer', 'string']}
+                            },
+                            'required': ['host', 'id']
+                        },
+                        'vcpus': {'type': 'integer'},
+                        'vcpus_used': {'type': 'integer'}
+                    },
+                    'required': ['cpu_info', 'current_workload',
+                                 'disk_available_least', 'host_ip',
+                                 'free_disk_gb', 'free_ram_mb',
+                                 'hypervisor_hostname', 'hypervisor_type',
+                                 'hypervisor_version', 'id', 'local_gb',
+                                 'local_gb_used', 'memory_mb',
+                                 'memory_mb_used', 'running_vms', 'service',
+                                 'vcpus', 'vcpus_used']
+                }
+            }
+        },
+        'required': ['hypervisors']
+    }
+}
+
+common_show_hypervisor = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'hypervisor': {
+                'type': 'object',
+                'properties': {
+                    'cpu_info': {'type': 'string'},
+                    'current_workload': {'type': 'integer'},
+                    'disk_available_least': {'type': 'integer'},
+                    'host_ip': {
+                        'type': 'string',
+                        'format': 'ip-address'
+                    },
+                    'free_disk_gb': {'type': 'integer'},
+                    'free_ram_mb': {'type': 'integer'},
+                    'hypervisor_hostname': {'type': 'string'},
+                    'hypervisor_type': {'type': 'string'},
+                    'hypervisor_version': {'type': 'integer'},
+                    'id': {'type': ['integer', 'string']},
+                    'local_gb': {'type': 'integer'},
+                    'local_gb_used': {'type': 'integer'},
+                    'memory_mb': {'type': 'integer'},
+                    'memory_mb_used': {'type': 'integer'},
+                    'running_vms': {'type': 'integer'},
+                    'service': {
+                        'type': 'object',
+                        'properties': {
+                            'host': {'type': 'string'},
+                            'id': {'type': ['integer', 'string']}
+                        },
+                        'required': ['host', 'id']
+                    },
+                    'vcpus': {'type': 'integer'},
+                    'vcpus_used': {'type': 'integer'}
+                },
+                'required': ['cpu_info', 'current_workload',
+                             'disk_available_least', 'host_ip',
+                             'free_disk_gb', 'free_ram_mb',
+                             'hypervisor_hostname', 'hypervisor_type',
+                             'hypervisor_version', 'id', 'local_gb',
+                             'local_gb_used', 'memory_mb', 'memory_mb_used',
+                             'running_vms', 'service', 'vcpus', 'vcpus_used']
+            }
+        },
+        'required': ['hypervisor']
+    }
+}
+
+common_hypervisors_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'hypervisors': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'id': {'type': ['integer', 'string']},
+                        'hypervisor_hostname': {'type': 'string'}
+                    },
+                    'required': ['id', 'hypervisor_hostname']
+                }
+            }
+        },
+        'required': ['hypervisors']
+    }
+}
+
+common_hypervisors_info = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'hypervisor': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': ['integer', 'string']},
+                    'hypervisor_hostname': {'type': 'string'},
+                },
+                'required': ['id', 'hypervisor_hostname']
+            }
+        },
+        'required': ['hypervisor']
+    }
+}
+
+
+hypervisor_uptime = copy.deepcopy(common_hypervisors_info)
+hypervisor_uptime['response_body']['properties']['hypervisor'][
+    'properties']['uptime'] = {'type': 'string'}
+hypervisor_uptime['response_body']['properties']['hypervisor'][
+    'required'] = ['id', 'hypervisor_hostname', 'uptime']
diff --git a/tempest/api_schema/compute/interfaces.py b/tempest/api_schema/compute/interfaces.py
new file mode 100644
index 0000000..79a8f42
--- /dev/null
+++ b/tempest/api_schema/compute/interfaces.py
@@ -0,0 +1,47 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api_schema.compute import parameter_types
+
+delete_interface = {
+    'status_code': [202]
+}
+
+interface_common_info = {
+    'type': 'object',
+    'properties': {
+        'port_state': {'type': 'string'},
+        'fixed_ips': {
+            'type': 'array',
+            'items': {
+                'type': 'object',
+                'properties': {
+                    'subnet_id': {
+                        'type': 'string',
+                        'format': 'uuid'
+                    },
+                    'ip_address': {
+                        'type': 'string',
+                        'format': 'ipv4'
+                    }
+                },
+                'required': ['subnet_id', 'ip_address']
+            }
+        },
+        'port_id': {'type': 'string', 'format': 'uuid'},
+        'net_id': {'type': 'string', 'format': 'uuid'},
+        'mac_addr': parameter_types.mac_address
+    },
+    'required': ['port_state', 'fixed_ips', 'port_id', 'net_id', 'mac_addr']
+}
diff --git a/tempest/api_schema/compute/keypairs.py b/tempest/api_schema/compute/keypairs.py
index 8973c02..b8f905f 100644
--- a/tempest/api_schema/compute/keypairs.py
+++ b/tempest/api_schema/compute/keypairs.py
@@ -39,3 +39,27 @@
         'required': ['keypairs']
     }
 }
+
+create_keypair = {
+    'type': 'object',
+    'properties': {
+        'keypair': {
+            'type': 'object',
+            'properties': {
+                'fingerprint': {'type': 'string'},
+                'name': {'type': 'string'},
+                'public_key': {'type': 'string'},
+                # NOTE: Now the type of 'user_id' is integer, but here
+                # allows 'string' also because we will be able to change
+                # it to 'uuid' in the future.
+                'user_id': {'type': ['integer', 'string']},
+                'private_key': {'type': 'string'}
+            },
+            # When create keypair API is being called with 'Public key'
+            # (Importing keypair) then, response body does not contain
+            # 'private_key' So it is not defined as 'required'
+            'required': ['fingerprint', 'name', 'public_key', 'user_id']
+        }
+    },
+    'required': ['keypair']
+}
diff --git a/tempest/api_schema/compute/migrations.py b/tempest/api_schema/compute/migrations.py
new file mode 100644
index 0000000..6723869
--- /dev/null
+++ b/tempest/api_schema/compute/migrations.py
@@ -0,0 +1,56 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+list_migrations = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'migrations': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        # NOTE: Now the type of 'id' is integer, but here
+                        # allows 'string' also because we will be able to
+                        # change it to 'uuid' in the future.
+                        'id': {'type': ['integer', 'string']},
+                        'status': {'type': 'string'},
+                        'instance_uuid': {'type': 'string'},
+                        'source_node': {'type': 'string'},
+                        'source_compute': {'type': 'string'},
+                        'dest_node': {'type': 'string'},
+                        'dest_compute': {'type': 'string'},
+                        'dest_host': {'type': 'string'},
+                        'old_instance_type_id': {
+                            'type': ['integer', 'string']
+                        },
+                        'new_instance_type_id': {
+                            'type': ['integer', 'string']
+                        },
+                        'created_at': {'type': 'string'},
+                        'updated_at': {'type': ['string', 'null']}
+                    },
+                    'required': [
+                        'id', 'status', 'instance_uuid', 'source_node',
+                        'source_compute', 'dest_node', 'dest_compute',
+                        'dest_host', 'old_instance_type_id',
+                        'new_instance_type_id', 'created_at', 'updated_at'
+                    ]
+                }
+            }
+        },
+        'required': ['migrations']
+    }
+}
diff --git a/tempest/api_schema/compute/parameter_types.py b/tempest/api_schema/compute/parameter_types.py
index 67c0c9b..4a1dfdd 100644
--- a/tempest/api_schema/compute/parameter_types.py
+++ b/tempest/api_schema/compute/parameter_types.py
@@ -26,3 +26,42 @@
         'required': ['href', 'rel']
     }
 }
+
+mac_address = {
+    'type': 'string',
+    'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+}
+
+access_ip_v4 = {
+    'type': 'string',
+    'anyOf': [{'format': 'ipv4'}, {'enum': ['']}]
+}
+
+access_ip_v6 = {
+    'type': 'string',
+    'anyOf': [{'format': 'ipv6'}, {'enum': ['']}]
+}
+
+addresses = {
+    'type': 'object',
+    'patternProperties': {
+        # NOTE: Here is for 'private' or something.
+        '^[a-zA-Z0-9-_.]+$': {
+            'type': 'array',
+            'items': {
+                'type': 'object',
+                'properties': {
+                    'version': {'type': 'integer'},
+                    'addr': {
+                        'type': 'string',
+                        'anyOf': [
+                            {'format': 'ipv4'},
+                            {'format': 'ipv6'}
+                        ]
+                    }
+                },
+                'required': ['version', 'addr']
+            }
+        }
+    }
+}
diff --git a/tempest/api_schema/compute/quotas.py b/tempest/api_schema/compute/quotas.py
new file mode 100644
index 0000000..f49771e
--- /dev/null
+++ b/tempest/api_schema/compute/quotas.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+common_quota_set = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'quota_set': {
+                'type': 'object',
+                'properties': {
+                    'instances': {'type': 'integer'},
+                    'cores': {'type': 'integer'},
+                    'ram': {'type': 'integer'},
+                    'floating_ips': {'type': 'integer'},
+                    'fixed_ips': {'type': 'integer'},
+                    'metadata_items': {'type': 'integer'},
+                    'key_pairs': {'type': 'integer'},
+                    'security_groups': {'type': 'integer'},
+                    'security_group_rules': {'type': 'integer'}
+                },
+                'required': ['instances', 'cores', 'ram',
+                             'floating_ips', 'fixed_ips',
+                             'metadata_items', 'key_pairs',
+                             'security_groups', 'security_group_rules']
+            }
+        },
+        'required': ['quota_set']
+    }
+}
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
new file mode 100644
index 0000000..33992b1
--- /dev/null
+++ b/tempest/api_schema/compute/servers.py
@@ -0,0 +1,167 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import parameter_types
+
+get_password = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'password': {'type': 'string'}
+        },
+        'required': ['password']
+    }
+}
+
+get_vnc_console = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'console': {
+                'type': 'object',
+                'properties': {
+                    'type': {'type': 'string'},
+                    'url': {
+                        'type': 'string',
+                        'format': 'uri'
+                    }
+                },
+                'required': ['type', 'url']
+            }
+        },
+        'required': ['console']
+    }
+}
+
+base_update_server = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'server': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': ['integer', 'string']},
+                    'name': {'type': 'string'},
+                    'status': {'type': 'string'},
+                    'image': {
+                        'type': 'object',
+                        'properties': {
+                            'id': {'type': ['integer', 'string']},
+                            'links': parameter_types.links
+                        },
+                        'required': ['id', 'links']
+                    },
+                    'flavor': {
+                        'type': 'object',
+                        'properties': {
+                            'id': {'type': ['integer', 'string']},
+                            'links': parameter_types.links
+                        },
+                        'required': ['id', 'links']
+                    },
+                    'user_id': {'type': 'string'},
+                    'tenant_id': {'type': 'string'},
+                    'created': {'type': 'string'},
+                    'updated': {'type': 'string'},
+                    'progress': {'type': 'integer'},
+                    'metadata': {'type': 'object'},
+                    'links': parameter_types.links,
+                    'addresses': parameter_types.addresses,
+                },
+                # NOTE(GMann): 'progress' attribute is present in the response
+                # only when server's status is one of the progress statuses
+                # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
+                # So it is not defined as 'required'.
+                'required': ['id', 'name', 'status', 'image', 'flavor',
+                             'user_id', 'tenant_id', 'created', 'updated',
+                             'metadata', 'links', 'addresses']
+            }
+        },
+        'required': ['server']
+    }
+}
+
+delete_server = {
+    'status_code': [204],
+}
+
+set_server_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': {
+                'type': 'object',
+                'patternProperties': {
+                    '^.+$': {'type': 'string'}
+                }
+            }
+        },
+        'required': ['metadata']
+    }
+}
+
+list_server_metadata = copy.deepcopy(set_server_metadata)
+
+update_server_metadata = copy.deepcopy(set_server_metadata)
+
+delete_server_metadata_item = {
+    'status_code': [204]
+}
+
+list_servers = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'servers': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'id': {'type': 'string'},
+                        'links': parameter_types.links,
+                        'name': {'type': 'string'}
+                    },
+                    'required': ['id', 'links', 'name']
+                }
+            }
+        },
+        'required': ['servers']
+    }
+}
+
+server_actions_common_schema = {
+    'status_code': [202]
+}
+
+server_actions_delete_password = {
+    'status_code': [204]
+}
+
+get_console_output = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'output': {'type': 'string'}
+        },
+        'required': ['output']
+    }
+}
diff --git a/tempest/api_schema/compute/services.py b/tempest/api_schema/compute/services.py
index 4793f5a..4c58013 100644
--- a/tempest/api_schema/compute/services.py
+++ b/tempest/api_schema/compute/services.py
@@ -42,3 +42,22 @@
         'required': ['services']
     }
 }
+
+enable_service = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'service': {
+                'type': 'object',
+                'properties': {
+                    'status': {'type': 'string'},
+                    'binary': {'type': 'string'},
+                    'host': {'type': 'string'}
+                },
+                'required': ['status', 'binary', 'host']
+            }
+        },
+        'required': ['service']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/agents.py b/tempest/api_schema/compute/v2/agents.py
new file mode 100644
index 0000000..837731f
--- /dev/null
+++ b/tempest/api_schema/compute/v2/agents.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_agent = {
+    'status_code': [200]
+}
diff --git a/tempest/api_schema/compute/v2/aggregates.py b/tempest/api_schema/compute/v2/aggregates.py
new file mode 100644
index 0000000..bc36044
--- /dev/null
+++ b/tempest/api_schema/compute/v2/aggregates.py
@@ -0,0 +1,25 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import aggregates
+
+delete_aggregate = {
+    'status_code': [200]
+}
+
+create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
+# V2 API's response status_code is 200
+create_aggregate['status_code'] = [200]
diff --git a/tempest/api_schema/compute/v2/availability_zone.py b/tempest/api_schema/compute/v2/availability_zone.py
new file mode 100644
index 0000000..d3d2787
--- /dev/null
+++ b/tempest/api_schema/compute/v2/availability_zone.py
@@ -0,0 +1,54 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import availability_zone as common
+
+
+base = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'availabilityZoneInfo': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'zoneName': {'type': 'string'},
+                        'zoneState': {
+                            'type': 'object',
+                            'properties': {
+                                'available': {'type': 'boolean'}
+                            },
+                            'required': ['available']
+                        },
+                        # NOTE: Here is the difference between detail and
+                        # non-detail.
+                        'hosts': {'type': 'null'}
+                    },
+                    'required': ['zoneName', 'zoneState', 'hosts']
+                }
+            }
+        },
+        'required': ['availabilityZoneInfo']
+    }
+}
+
+get_availability_zone_list = copy.deepcopy(base)
+
+get_availability_zone_list_detail = copy.deepcopy(base)
+get_availability_zone_list_detail['response_body']['properties'][
+    'availabilityZoneInfo']['items']['properties']['hosts'] = common.detail
diff --git a/tempest/api_schema/compute/v2/certificates.py b/tempest/api_schema/compute/v2/certificates.py
new file mode 100644
index 0000000..1eb38ce
--- /dev/null
+++ b/tempest/api_schema/compute/v2/certificates.py
@@ -0,0 +1,19 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
diff --git a/tempest/api_schema/compute/v2/extensions.py b/tempest/api_schema/compute/v2/extensions.py
new file mode 100644
index 0000000..570cd03
--- /dev/null
+++ b/tempest/api_schema/compute/v2/extensions.py
@@ -0,0 +1,45 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+list_extensions = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'extensions': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'updated': {
+                            'type': 'string',
+                            'format': 'data-time'
+                        },
+                        'name': {'type': 'string'},
+                        'links': {'type': 'array'},
+                        'namespace': {
+                            'type': 'string',
+                            'format': 'uri'
+                        },
+                        'alias': {'type': 'string'},
+                        'description': {'type': 'string'}
+                    },
+                    'required': ['updated', 'name', 'links', 'namespace',
+                                 'alias', 'description']
+                }
+            }
+        },
+        'required': ['extensions']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/fixed_ips.py b/tempest/api_schema/compute/v2/fixed_ips.py
index a6add04..446633f 100644
--- a/tempest/api_schema/compute/v2/fixed_ips.py
+++ b/tempest/api_schema/compute/v2/fixed_ips.py
@@ -34,3 +34,8 @@
         'required': ['fixed_ip']
     }
 }
+
+fixed_ip_action = {
+    'status_code': [202],
+    'response_body': {'type': 'string'}
+}
diff --git a/tempest/api_schema/compute/v2/flavors.py b/tempest/api_schema/compute/v2/flavors.py
new file mode 100644
index 0000000..bee6ecb
--- /dev/null
+++ b/tempest/api_schema/compute/v2/flavors.py
@@ -0,0 +1,57 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import flavors
+
+list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+list_flavors_details['response_body']['properties']['flavors']['items'][
+    'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+list_flavors_details['response_body']['properties']['flavors']['items'][
+    'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+                          'os-flavor-access:is_public': {'type': 'boolean'},
+                          'rxtx_factor': {'type': 'number'},
+                          'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+unset_flavor_extra_specs = {
+    'status_code': [200]
+}
+
+create_get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+create_get_flavor_details['response_body']['properties']['flavor'][
+    'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+create_get_flavor_details['response_body']['properties']['flavor'][
+    'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+                          'os-flavor-access:is_public': {'type': 'boolean'},
+                          'rxtx_factor': {'type': 'number'},
+                          'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+delete_flavor = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/hosts.py b/tempest/api_schema/compute/v2/hosts.py
new file mode 100644
index 0000000..86efadf
--- /dev/null
+++ b/tempest/api_schema/compute/v2/hosts.py
@@ -0,0 +1,42 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import hosts
+
+
+startup_host = {
+    'status_code': [200],
+    'response_body': hosts.common_start_up_body
+}
+
+# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
+shutdown_host = copy.deepcopy(startup_host)
+
+shutdown_host['response_body']['properties']['power_action'] = {
+    'enum': ['shutdown']
+}
+
+# The 'power_action' attribute of 'reboot_host' API is 'reboot'
+reboot_host = copy.deepcopy(startup_host)
+
+reboot_host['response_body']['properties']['power_action'] = {
+    'enum': ['reboot']
+}
+
+update_host = {
+    'status_code': [200],
+    'response_body': hosts.update_host_common
+}
diff --git a/tempest/api_schema/compute/v2/hypervisors.py b/tempest/api_schema/compute/v2/hypervisors.py
new file mode 100644
index 0000000..6bb43a7
--- /dev/null
+++ b/tempest/api_schema/compute/v2/hypervisors.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+from tempest.api_schema.compute import hypervisors
+
+hypervisors_servers = copy.deepcopy(hypervisors.common_hypervisors_detail)
+
+# Defining extra attributes for V3 show hypervisor schema
+hypervisors_servers['response_body']['properties']['hypervisors']['items'][
+    'properties']['servers'] = {
+        'type': 'array',
+        'items': {
+            'type': 'object',
+            'properties': {
+                # NOTE: Now the type of 'id' is integer,
+                # but here allows 'string' also because we
+                # will be able to change it to 'uuid' in
+                # the future.
+                'id': {'type': ['integer', 'string']},
+                'name': {'type': 'string'}
+            }
+        }
+    }
+# In V2 API, if there is no servers (VM) on the Hypervisor host then 'servers'
+# attribute will not be present in response body So it is not 'required'.
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
index 41593c6..d121060 100644
--- a/tempest/api_schema/compute/v2/images.py
+++ b/tempest/api_schema/compute/v2/images.py
@@ -14,43 +14,46 @@
 
 from tempest.api_schema.compute import parameter_types
 
+common_image_schema = {
+    'type': 'object',
+    'properties': {
+        'id': {'type': 'string'},
+        'status': {'type': 'string'},
+        'updated': {'type': 'string'},
+        'links': parameter_types.links,
+        'name': {'type': 'string'},
+        'created': {'type': 'string'},
+        'minDisk': {'type': 'integer'},
+        'minRam': {'type': 'integer'},
+        'progress': {'type': 'integer'},
+        'metadata': {'type': 'object'},
+        'server': {
+            'type': 'object',
+            'properties': {
+                # NOTE: Now the type of 'id' is integer, but here
+                # allows 'string' also because we will be able to
+                # change it to 'uuid' in the future.
+                'id': {'type': ['integer', 'string']},
+                'links': parameter_types.links
+            },
+            'required': ['id', 'links']
+        },
+        'OS-EXT-IMG-SIZE:size': {'type': 'integer'}
+    },
+    # 'server' attributes only comes in response body if image is
+    # associated with any server. 'OS-EXT-IMG-SIZE:size' is API
+    # extension, So those are not defined as 'required'.
+    'required': ['id', 'status', 'updated', 'links', 'name',
+                 'created', 'minDisk', 'minRam', 'progress',
+                 'metadata']
+}
+
 get_image = {
     'status_code': [200],
     'response_body': {
         'type': 'object',
         'properties': {
-            'image': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'status': {'type': 'string'},
-                    'updated': {'type': 'string'},
-                    'links': parameter_types.links,
-                    'name': {'type': 'string'},
-                    'created': {'type': 'string'},
-                    'OS-EXT-IMG-SIZE:size': {'type': 'integer'},
-                    'minDisk': {'type': 'integer'},
-                    'minRam': {'type': 'integer'},
-                    'progress': {'type': 'integer'},
-                    'metadata': {'type': 'object'},
-                    'server': {
-                        'type': 'object',
-                        'properties': {
-                            # NOTE: Now the type of 'id' is integer, but here
-                            # allows 'string' also because we will be able to
-                            # change it to 'uuid' in the future.
-                            'id': {'type': ['integer', 'string']},
-                            'links': parameter_types.links
-                        },
-                        'required': ['id', 'links']
-                    }
-                },
-                # 'server' attributes only comes in response body if image is
-                # associated with any server. So it is not 'required'
-                'required': ['id', 'status', 'updated', 'links', 'name',
-                             'created', 'OS-EXT-IMG-SIZE:size', 'minDisk',
-                             'minRam', 'progress', 'metadata']
-            }
+            'image': common_image_schema
         },
         'required': ['image']
     }
@@ -67,20 +70,7 @@
                     'type': 'object',
                     'properties': {
                         'id': {'type': 'string'},
-                        'links': {
-                            'type': 'array',
-                            'items': {
-                                'type': 'object',
-                                'properties': {
-                                    'href': {
-                                        'type': 'string',
-                                        'format': 'uri'
-                                    },
-                                    'rel': {'type': 'string'}
-                                },
-                                'required': ['href', 'rel']
-                            }
-                        },
+                        'links': parameter_types.links,
                         'name': {'type': 'string'}
                     },
                     'required': ['id', 'links', 'name']
@@ -90,3 +80,57 @@
         'required': ['images']
     }
 }
+
+create_image = {
+    'status_code': [202],
+    'response_header': {
+        'type': 'object',
+        'properties': {
+            'location': {
+                'type': 'string',
+                'format': 'uri'
+            }
+        },
+        'required': ['location']
+    }
+}
+
+delete = {
+    'status_code': [204]
+}
+
+image_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': {'type': 'object'}
+        },
+        'required': ['metadata']
+    }
+}
+
+image_meta_item = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'meta': {'type': 'object'}
+        },
+        'required': ['meta']
+    }
+}
+
+list_images_details = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'images': {
+                'type': 'array',
+                'items': common_image_schema
+            }
+        },
+        'required': ['images']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/instance_usage_audit_logs.py b/tempest/api_schema/compute/v2/instance_usage_audit_logs.py
new file mode 100644
index 0000000..658f574
--- /dev/null
+++ b/tempest/api_schema/compute/v2/instance_usage_audit_logs.py
@@ -0,0 +1,59 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+common_instance_usage_audit_log = {
+    'type': 'object',
+    'properties': {
+        'hosts_not_run': {
+            'type': 'array',
+            'items': {'type': 'string'}
+        },
+        'log': {'type': 'object'},
+        'num_hosts': {'type': 'integer'},
+        'num_hosts_done': {'type': 'integer'},
+        'num_hosts_not_run': {'type': 'integer'},
+        'num_hosts_running': {'type': 'integer'},
+        'overall_status': {'type': 'string'},
+        'period_beginning': {'type': 'string'},
+        'period_ending': {'type': 'string'},
+        'total_errors': {'type': 'integer'},
+        'total_instances': {'type': 'integer'}
+    },
+    'required': ['hosts_not_run', 'log', 'num_hosts', 'num_hosts_done',
+                 'num_hosts_not_run', 'num_hosts_running', 'overall_status',
+                 'period_beginning', 'period_ending', 'total_errors',
+                 'total_instances']
+}
+
+get_instance_usage_audit_log = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'instance_usage_audit_log': common_instance_usage_audit_log
+        },
+        'required': ['instance_usage_audit_log']
+    }
+}
+
+list_instance_usage_audit_log = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'instance_usage_audit_logs': common_instance_usage_audit_log
+        },
+        'required': ['instance_usage_audit_logs']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/interfaces.py b/tempest/api_schema/compute/v2/interfaces.py
new file mode 100644
index 0000000..7fca791
--- /dev/null
+++ b/tempest/api_schema/compute/v2/interfaces.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api_schema.compute import interfaces as common_schema
+
+list_interfaces = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'interfaceAttachments': {
+                'type': 'array',
+                'items': common_schema.interface_common_info
+            }
+        },
+        'required': ['interfaceAttachments']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/keypairs.py b/tempest/api_schema/compute/v2/keypairs.py
index 3225b0d..9a025c3 100644
--- a/tempest/api_schema/compute/v2/keypairs.py
+++ b/tempest/api_schema/compute/v2/keypairs.py
@@ -12,6 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.api_schema.compute import keypairs
+
 get_keypair = {
     'status_code': [200],
     'response_body': {
@@ -45,3 +47,12 @@
         'required': ['keypair']
     }
 }
+
+create_keypair = {
+    'status_code': [200],
+    'response_body': keypairs.create_keypair
+}
+
+delete_keypair = {
+    'status_code': [202],
+}
diff --git a/tempest/api_schema/compute/v2/quotas.py b/tempest/api_schema/compute/v2/quotas.py
index d69cbd7..31c0458 100644
--- a/tempest/api_schema/compute/v2/quotas.py
+++ b/tempest/api_schema/compute/v2/quotas.py
@@ -12,36 +12,37 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-quota_set = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'instances': {'type': 'integer'},
-                    'cores': {'type': 'integer'},
-                    'ram': {'type': 'integer'},
-                    'floating_ips': {'type': 'integer'},
-                    'fixed_ips': {'type': 'integer'},
-                    'metadata_items': {'type': 'integer'},
-                    'injected_files': {'type': 'integer'},
-                    'injected_file_content_bytes': {'type': 'integer'},
-                    'injected_file_path_bytes': {'type': 'integer'},
-                    'key_pairs': {'type': 'integer'},
-                    'security_groups': {'type': 'integer'},
-                    'security_group_rules': {'type': 'integer'}
-                },
-                'required': ['id', 'instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'injected_files',
-                             'injected_file_content_bytes',
-                             'injected_file_path_bytes', 'key_pairs',
-                             'security_groups', 'security_group_rules']
-            }
-        },
-        'required': ['quota_set']
-    }
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_files'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['required'].extend([
+    'id',
+    'injected_files',
+    'injected_file_content_bytes',
+    'injected_file_path_bytes'])
+
+quota_set_update = copy.deepcopy(quotas.common_quota_set)
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_files'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set'][
+    'required'].extend(['injected_files',
+                        'injected_file_content_bytes',
+                        'injected_file_path_bytes'])
+
+delete_quota = {
+    'status_code': [202]
 }
diff --git a/tempest/api_schema/compute/v2/security_groups.py b/tempest/api_schema/compute/v2/security_groups.py
index 68b65b4..9a852e5 100644
--- a/tempest/api_schema/compute/v2/security_groups.py
+++ b/tempest/api_schema/compute/v2/security_groups.py
@@ -12,6 +12,49 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+common_security_group_rule = {
+    'from_port': {'type': ['integer', 'null']},
+    'to_port': {'type': ['integer', 'null']},
+    'group': {
+        'type': 'object',
+        'properties': {
+            'tenant_id': {'type': 'string'},
+            'name': {'type': 'string'}
+        }
+    },
+    'ip_protocol': {'type': ['string', 'null']},
+    # 'parent_group_id' can be UUID so defining it as 'string' also.
+    'parent_group_id': {'type': ['string', 'integer', 'null']},
+    'ip_range': {
+        'type': 'object',
+        'properties': {
+            'cidr': {'type': 'string'}
+        }
+        # When optional argument is provided in request body
+        # like 'group_id' then, attribute 'cidr' does not
+        # comes in response body. So it is not 'required'.
+    },
+    'id': {'type': ['string', 'integer']}
+}
+
+common_security_group = {
+    'type': 'object',
+    'properties': {
+        'id': {'type': ['integer', 'string']},
+        'name': {'type': 'string'},
+        'tenant_id': {'type': 'string'},
+        'rules': {
+            'type': 'array',
+            'items': {
+                'type': ['object', 'null'],
+                'properties': common_security_group_rule
+            }
+        },
+        'description': {'type': 'string'},
+    },
+    'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
+}
+
 list_security_groups = {
     'status_code': [200],
     'response_body': {
@@ -19,20 +62,44 @@
         'properties': {
             'security_groups': {
                 'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': ['integer', 'string']},
-                        'name': {'type': 'string'},
-                        'tenant_id': {'type': 'string'},
-                        'rules': {'type': 'array'},
-                        'description': {'type': 'string'},
-                    },
-                    'required': ['id', 'name', 'tenant_id', 'rules',
-                                 'description'],
-                }
+                'items': common_security_group
             }
         },
         'required': ['security_groups']
     }
 }
+
+get_security_group = create_security_group = update_security_group = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'security_group': common_security_group
+        },
+        'required': ['security_group']
+    }
+}
+
+delete_security_group = {
+    'status_code': [202]
+}
+
+create_security_group_rule = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'security_group_rule': {
+                'type': 'object',
+                'properties': common_security_group_rule,
+                'required': ['from_port', 'to_port', 'group', 'ip_protocol',
+                             'parent_group_id', 'id', 'ip_range']
+            }
+        },
+        'required': ['security_group_rule']
+    }
+}
+
+delete_security_group_rule = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index b4e6e53..f7ed94e 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -12,7 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
 from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
 
 create_server = {
     'status_code': [202],
@@ -34,9 +37,143 @@
                 # NOTE: OS-DCF:diskConfig is API extension, and some
                 # environments return a response without the attribute.
                 # So it is not 'required'.
-                'required': ['id', 'security_groups', 'links', 'adminPass']
+                # NOTE: adminPass is not required because it can be deactivated
+                # with nova API flag enable_instance_password=False
+                'required': ['id', 'security_groups', 'links']
             }
         },
         'required': ['server']
     }
 }
+
+update_server = copy.deepcopy(servers.base_update_server)
+update_server['response_body']['properties']['server']['properties'].update({
+    'hostId': {'type': 'string'},
+    'OS-DCF:diskConfig': {'type': 'string'},
+    'accessIPv4': parameter_types.access_ip_v4,
+    'accessIPv6': parameter_types.access_ip_v6
+})
+update_server['response_body']['properties']['server']['required'].append(
+    # NOTE: OS-DCF:diskConfig and accessIPv4/v6 are API
+    # extensions, and some environments return a response
+    # without these attributes. So they are not 'required'.
+    'hostId'
+)
+
+list_virtual_interfaces = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'virtual_interfaces': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'id': {'type': 'string'},
+                        'mac_address': parameter_types.mac_address,
+                        'OS-EXT-VIF-NET:net_id': {'type': 'string'}
+                    },
+                    # 'OS-EXT-VIF-NET:net_id' is API extension So it is
+                    # not defined as 'required'
+                    'required': ['id', 'mac_address']
+                }
+            }
+        },
+        'required': ['virtual_interfaces']
+    }
+}
+
+attach_volume = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volumeAttachment': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string'},
+                    'device': {'type': 'string'},
+                    'volumeId': {'type': 'string'},
+                    'serverId': {'type': ['integer', 'string']}
+                },
+                'required': ['id', 'device', 'volumeId', 'serverId']
+            }
+        },
+        'required': ['volumeAttachment']
+    }
+}
+
+detach_volume = {
+    'status_code': [202]
+}
+
+set_get_server_metadata_item = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'meta': {
+                'type': 'object',
+                'patternProperties': {
+                    '^.+$': {'type': 'string'}
+                }
+            }
+        },
+        'required': ['meta']
+    }
+}
+
+list_addresses_by_network = {
+    'status_code': [200],
+    'response_body': parameter_types.addresses
+}
+
+server_actions_confirm_resize = copy.deepcopy(
+    servers.server_actions_delete_password)
+
+list_addresses = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'addresses': parameter_types.addresses
+        },
+        'required': ['addresses']
+    }
+}
+
+common_server_group = {
+    'type': 'object',
+    'properties': {
+        'id': {'type': 'string'},
+        'name': {'type': 'string'},
+        'policies': {
+            'type': 'array',
+            'items': {'type': 'string'}
+        },
+        # 'members' attribute contains the array of instance's UUID of
+        # instances present in server group
+        'members': {
+            'type': 'array',
+            'items': {'type': 'string'}
+        },
+        'metadata': {'type': 'object'}
+    },
+    'required': ['id', 'name', 'policies', 'members', 'metadata']
+}
+
+create_get_server_group = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'server_group': common_server_group
+        },
+        'required': ['server_group']
+    }
+}
+
+delete_server_group = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v2/tenant_usages.py b/tempest/api_schema/compute/v2/tenant_usages.py
new file mode 100644
index 0000000..0b824a1
--- /dev/null
+++ b/tempest/api_schema/compute/v2/tenant_usages.py
@@ -0,0 +1,92 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+_server_usages = {
+    'type': 'array',
+    'items': {
+        'type': 'object',
+        'properties': {
+            'ended_at': {
+                'oneOf': [
+                    {'type': 'string'},
+                    {'type': 'null'}
+                ]
+            },
+            'flavor': {'type': 'string'},
+            'hours': {'type': 'number'},
+            'instance_id': {'type': 'string'},
+            'local_gb': {'type': 'integer'},
+            'memory_mb': {'type': 'integer'},
+            'name': {'type': 'string'},
+            'started_at': {'type': 'string'},
+            'state': {'type': 'string'},
+            'tenant_id': {'type': 'string'},
+            'uptime': {'type': 'integer'},
+            'vcpus': {'type': 'integer'},
+        },
+        'required': ['ended_at', 'flavor', 'hours', 'instance_id', 'local_gb',
+                     'memory_mb', 'name', 'started_at', 'state', 'tenant_id',
+                     'uptime', 'vcpus']
+    }
+}
+
+_tenant_usage_list = {
+    'type': 'object',
+    'properties': {
+        'server_usages': _server_usages,
+        'start': {'type': 'string'},
+        'stop': {'type': 'string'},
+        'tenant_id': {'type': 'string'},
+        'total_hours': {'type': 'number'},
+        'total_local_gb_usage': {'type': 'number'},
+        'total_memory_mb_usage': {'type': 'number'},
+        'total_vcpus_usage': {'type': 'number'},
+    },
+    'required': ['start', 'stop', 'tenant_id',
+                 'total_hours', 'total_local_gb_usage',
+                 'total_memory_mb_usage', 'total_vcpus_usage']
+}
+
+# 'required' of get_tenant is different from list_tenant's.
+_tenant_usage_get = copy.deepcopy(_tenant_usage_list)
+_tenant_usage_get['required'] = ['server_usages', 'start', 'stop', 'tenant_id',
+                                 'total_hours', 'total_local_gb_usage',
+                                 'total_memory_mb_usage', 'total_vcpus_usage']
+
+list_tenant = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'tenant_usages': {
+                'type': 'array',
+                'items': _tenant_usage_list
+            }
+        },
+        'required': ['tenant_usages']
+    }
+}
+
+get_tenant = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'tenant_usage': _tenant_usage_get
+        },
+        'required': ['tenant_usage']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/volumes.py b/tempest/api_schema/compute/v2/volumes.py
index 9cfd7e3..84a659c 100644
--- a/tempest/api_schema/compute/v2/volumes.py
+++ b/tempest/api_schema/compute/v2/volumes.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-get_volume = {
+create_get_volume = {
     'status_code': [200],
     'response_body': {
         'type': 'object',
@@ -108,3 +108,7 @@
         'required': ['volumes']
     }
 }
+
+delete_volume = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v3/agents.py b/tempest/api_schema/compute/v3/agents.py
new file mode 100644
index 0000000..63d1c46
--- /dev/null
+++ b/tempest/api_schema/compute/v3/agents.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_agent = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/aggregates.py b/tempest/api_schema/compute/v3/aggregates.py
new file mode 100644
index 0000000..0272641
--- /dev/null
+++ b/tempest/api_schema/compute/v3/aggregates.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import aggregates
+
+delete_aggregate = {
+    'status_code': [204]
+}
+
+create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
+# V3 API's response status_code is 201
+create_aggregate['status_code'] = [201]
+
+aggregate_add_remove_host = copy.deepcopy(aggregates.aggregate_add_remove_host)
+# V3 API's response status_code is 202
+aggregate_add_remove_host['status_code'] = [202]
diff --git a/tempest/api_schema/compute/v3/availability_zone.py b/tempest/api_schema/compute/v3/availability_zone.py
new file mode 100644
index 0000000..5f36c33
--- /dev/null
+++ b/tempest/api_schema/compute/v3/availability_zone.py
@@ -0,0 +1,53 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import availability_zone as common
+
+
+base = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'availability_zone_info': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'zone_name': {'type': 'string'},
+                        'zone_state': {
+                            'type': 'object',
+                            'properties': {
+                                'available': {'type': 'boolean'}
+                            },
+                            'required': ['available']
+                        },
+                        # NOTE: Here is the difference between detail and
+                        # non-detail
+                        'hosts': {'type': 'null'}
+                    },
+                    'required': ['zone_name', 'zone_state', 'hosts']
+                }
+            }
+        },
+        'required': ['availability_zone_info']
+    }
+}
+
+get_availability_zone_list = copy.deepcopy(base)
+get_availability_zone_list_detail = copy.deepcopy(base)
+get_availability_zone_list_detail['response_body']['properties'][
+    'availability_zone_info']['items']['properties']['hosts'] = common.detail
diff --git a/tempest/api_schema/compute/v3/certificates.py b/tempest/api_schema/compute/v3/certificates.py
new file mode 100644
index 0000000..0723a16
--- /dev/null
+++ b/tempest/api_schema/compute/v3/certificates.py
@@ -0,0 +1,20 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
+create_certificate['status_code'] = [201]
diff --git a/tempest/api_schema/compute/v3/extensions.py b/tempest/api_schema/compute/v3/extensions.py
new file mode 100644
index 0000000..ceb0ce2
--- /dev/null
+++ b/tempest/api_schema/compute/v3/extensions.py
@@ -0,0 +1,36 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+list_extensions = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'extensions': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'name': {'type': 'string'},
+                        'alias': {'type': 'string'},
+                        'description': {'type': 'string'},
+                        'version': {'type': 'integer'}
+                    },
+                    'required': ['name', 'alias', 'description', 'version']
+                }
+            }
+        },
+        'required': ['extensions']
+    }
+}
diff --git a/tempest/api_schema/compute/v3/flavors.py b/tempest/api_schema/compute/v3/flavors.py
new file mode 100644
index 0000000..52010f5
--- /dev/null
+++ b/tempest/api_schema/compute/v3/flavors.py
@@ -0,0 +1,68 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import flavors
+from tempest.api_schema.compute import flavors_extra_specs
+
+list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+list_flavors_details['response_body']['properties']['flavors']['items'][
+    'properties'].update({'disabled': {'type': 'boolean'},
+                          'ephemeral': {'type': 'integer'},
+                          'flavor-access:is_public': {'type': 'boolean'},
+                          'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+list_flavors_details['response_body']['properties']['flavors']['items'][
+    'required'].extend(['disabled', 'ephemeral'])
+
+set_flavor_extra_specs = copy.deepcopy(flavors_extra_specs.flavor_extra_specs)
+set_flavor_extra_specs['status_code'] = [201]
+
+unset_flavor_extra_specs = {
+    'status_code': [204]
+}
+
+get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+get_flavor_details['response_body']['properties']['flavor'][
+    'properties'].update({'disabled': {'type': 'boolean'},
+                          'ephemeral': {'type': 'integer'},
+                          'flavor-access:is_public': {'type': 'boolean'},
+                          'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+get_flavor_details['response_body']['properties']['flavor'][
+    'required'].extend(['disabled', 'ephemeral'])
+
+
+create_flavor_details = copy.deepcopy(get_flavor_details)
+
+# Overriding the status code for create flavor V3 API.
+create_flavor_details['status_code'] = [201]
+
+delete_flavor = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/hosts.py b/tempest/api_schema/compute/v3/hosts.py
new file mode 100644
index 0000000..eb689d1
--- /dev/null
+++ b/tempest/api_schema/compute/v3/hosts.py
@@ -0,0 +1,53 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import hosts
+
+startup_host = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'host': hosts.common_start_up_body
+        },
+        'required': ['host']
+    }
+}
+
+# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
+shutdown_host = copy.deepcopy(startup_host)
+
+shutdown_host['response_body']['properties']['power_action'] = {
+    'enum': ['shutdown']
+}
+
+# The 'power_action' attribute of 'reboot_host' API is 'reboot'
+reboot_host = copy.deepcopy(startup_host)
+
+reboot_host['response_body']['properties']['power_action'] = {
+    'enum': ['reboot']
+}
+
+update_host = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'host': hosts.update_host_common
+        },
+        'required': ['host']
+    }
+}
diff --git a/tempest/api_schema/compute/v3/hypervisors.py b/tempest/api_schema/compute/v3/hypervisors.py
new file mode 100644
index 0000000..aa31827
--- /dev/null
+++ b/tempest/api_schema/compute/v3/hypervisors.py
@@ -0,0 +1,50 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+from tempest.api_schema.compute import hypervisors
+
+list_hypervisors_detail = copy.deepcopy(
+    hypervisors.common_list_hypervisors_detail)
+# Defining extra attributes for V3 show hypervisor schema
+list_hypervisors_detail['response_body']['properties']['hypervisors'][
+    'items']['properties']['os-pci:pci_stats'] = {'type': 'array'}
+
+show_hypervisor = copy.deepcopy(hypervisors.common_show_hypervisor)
+# Defining extra attributes for V3 show hypervisor schema
+show_hypervisor['response_body']['properties']['hypervisor']['properties'][
+    'os-pci:pci_stats'] = {'type': 'array'}
+
+hypervisors_servers = copy.deepcopy(hypervisors.common_hypervisors_info)
+
+# Defining extra attributes for V3 show hypervisor schema
+hypervisors_servers['response_body']['properties']['hypervisor']['properties'][
+    'servers'] = {
+        'type': 'array',
+        'items': {
+            'type': 'object',
+            'properties': {
+                # NOTE: Now the type of 'id' is integer,
+                # but here allows 'string' also because we
+                # will be able to change it to 'uuid' in
+                # the future.
+                'id': {'type': ['integer', 'string']},
+                'name': {'type': 'string'}
+            }
+        }
+    }
+# V3 API response body always contains the 'servers' attribute even there
+# is no server (VM) are present on Hypervisor host.
+hypervisors_servers['response_body']['properties']['hypervisor'][
+    'required'] = ['id', 'hypervisor_hostname', 'servers']
diff --git a/tempest/api_schema/compute/v3/interfaces.py b/tempest/api_schema/compute/v3/interfaces.py
new file mode 100644
index 0000000..5e1cee2
--- /dev/null
+++ b/tempest/api_schema/compute/v3/interfaces.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api_schema.compute import interfaces as common_schema
+
+list_interfaces = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'interface_attachments': {
+                'type': 'array',
+                'items': common_schema.interface_common_info
+            }
+        },
+        'required': ['interface_attachments']
+    }
+}
diff --git a/tempest/api_schema/compute/v3/keypairs.py b/tempest/api_schema/compute/v3/keypairs.py
index 0197c84..de5f4ba 100644
--- a/tempest/api_schema/compute/v3/keypairs.py
+++ b/tempest/api_schema/compute/v3/keypairs.py
@@ -12,6 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.api_schema.compute import keypairs
+
 get_keypair = {
     'status_code': [200],
     'response_body': {
@@ -30,3 +32,12 @@
         'required': ['keypair']
     }
 }
+
+create_keypair = {
+    'status_code': [201],
+    'response_body': keypairs.create_keypair
+}
+
+delete_keypair = {
+    'status_code': [204],
+}
diff --git a/tempest/api_schema/compute/v3/quotas.py b/tempest/api_schema/compute/v3/quotas.py
index 1b9989d..a3212ed 100644
--- a/tempest/api_schema/compute/v3/quotas.py
+++ b/tempest/api_schema/compute/v3/quotas.py
@@ -12,31 +12,48 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-quota_set = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'instances': {'type': 'integer'},
-                    'cores': {'type': 'integer'},
-                    'ram': {'type': 'integer'},
-                    'floating_ips': {'type': 'integer'},
-                    'fixed_ips': {'type': 'integer'},
-                    'metadata_items': {'type': 'integer'},
-                    'key_pairs': {'type': 'integer'},
-                    'security_groups': {'type': 'integer'},
-                    'security_group_rules': {'type': 'integer'}
-                },
-                'required': ['id', 'instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'key_pairs',
-                             'security_groups', 'security_group_rules']
-            }
-        },
-        'required': ['quota_set']
-    }
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set'][
+    'required'].extend(['id'])
+
+quota_common_info = {
+    'type': 'object',
+    'properties': {
+        'reserved': {'type': 'integer'},
+        'limit': {'type': 'integer'},
+        'in_use': {'type': 'integer'}
+    },
+    'required': ['reserved', 'limit', 'in_use']
+}
+
+quota_set_detail = copy.deepcopy(quotas.common_quota_set)
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'instances'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'cores'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'ram'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'floating_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'fixed_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'metadata_items'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'key_pairs'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'security_groups'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'security_group_rules'] = quota_common_info
+
+delete_quota = {
+    'status_code': [204]
 }
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 390962e..6716249 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -12,7 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
 from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
 
 create_server = {
     'status_code': [202],
@@ -29,8 +32,8 @@
                     'os-security-groups:security_groups': {'type': 'array'},
                     'links': parameter_types.links,
                     'admin_password': {'type': 'string'},
-                    'os-access-ips:access_ip_v4': {'type': 'string'},
-                    'os-access-ips:access_ip_v6': {'type': 'string'}
+                    'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+                    'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
                 },
                 # NOTE: os-access-ips:access_ip_v4/v6 are API extension,
                 # and some environments return a response without these
@@ -42,3 +45,57 @@
         'required': ['server']
     }
 }
+
+addresses_v3 = copy.deepcopy(parameter_types.addresses)
+addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
+    'properties'].update({
+        'type': {'type': 'string'},
+        'mac_addr': {'type': 'string'}
+    })
+addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
+    'required'].extend(
+        ['type', 'mac_addr']
+    )
+
+update_server = copy.deepcopy(servers.base_update_server)
+update_server['response_body']['properties']['server']['properties'].update({
+    'addresses': addresses_v3,
+    'host_id': {'type': 'string'},
+    'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+    'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
+})
+update_server['response_body']['properties']['server']['required'].append(
+    # NOTE: os-access-ips:access_ip_v4/v6 are API extension,
+    # and some environments return a response without these
+    # attributes. So they are not 'required'.
+    'host_id'
+)
+
+attach_detach_volume = {
+    'status_code': [202]
+}
+
+set_get_server_metadata_item = copy.deepcopy(servers.set_server_metadata)
+
+list_addresses_by_network = {
+    'status_code': [200],
+    'response_body': addresses_v3
+}
+
+server_actions_change_password = copy.deepcopy(
+    servers.server_actions_delete_password)
+
+list_addresses = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'addresses': addresses_v3
+        },
+        'required': ['addresses']
+    }
+}
+
+update_server_metadata = copy.deepcopy(servers.update_server_metadata)
+# V3 API's response status_code is 201
+update_server_metadata['status_code'] = [201]
diff --git a/tempest/api_schema/compute/version.py b/tempest/api_schema/compute/version.py
new file mode 100644
index 0000000..32c6d96
--- /dev/null
+++ b/tempest/api_schema/compute/version.py
@@ -0,0 +1,55 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+version = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'version': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string'},
+                    'links': {
+                        'type': 'array',
+                        'items': {
+                            'type': 'object',
+                            'properties': {
+                                'href': {'type': 'string', 'format': 'uri'},
+                                'rel': {'type': 'string'},
+                                'type': {'type': 'string'}
+                            },
+                            'required': ['href', 'rel']
+                        }
+                    },
+                    'media-types': {
+                        'type': 'array',
+                        'items': {
+                            'type': 'object',
+                            'properties': {
+                                'base': {'type': 'string'},
+                                'type': {'type': 'string'}
+                            },
+                            'required': ['base', 'type']
+                        }
+                    },
+                    'status': {'type': 'string'},
+                    'updated': {'type': 'string', 'format': 'date-time'}
+                },
+                'required': ['id', 'links', 'media-types', 'status', 'updated']
+            }
+        },
+        'required': ['version']
+    }
+}
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api_schema/queuing/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/api_schema/queuing/__init__.py
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api_schema/queuing/v1/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/api_schema/queuing/v1/__init__.py
diff --git a/tempest/api_schema/queuing/v1/queues.py b/tempest/api_schema/queuing/v1/queues.py
new file mode 100644
index 0000000..4630e1c
--- /dev/null
+++ b/tempest/api_schema/queuing/v1/queues.py
@@ -0,0 +1,98 @@
+
+# Copyright (c) 2014 Rackspace, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+list_link = {
+    'type': 'object',
+    'properties': {
+        'rel': {'type': 'string'},
+        'href': {
+            'type': 'string',
+            'format': 'uri'
+        }
+    },
+    'required': ['href', 'rel']
+}
+
+list_queue = {
+    'type': 'object',
+    'properties': {
+        'name': {'type': 'string'},
+        'href': {
+            'type': 'string',
+            'format': 'uri'
+        },
+        'metadata': {'type': 'object'}
+    },
+    'required': ['name', 'href']
+}
+
+list_queues = {
+    'status_code': [200, 204],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'links': {
+                'type': 'array',
+                'items': list_link,
+                'maxItems': 1
+            },
+            'queues': {
+                'type': 'array',
+                'items': list_queue
+            }
+        },
+        'required': ['links', 'queues']
+    }
+}
+
+message_link = {
+    'type': 'object',
+    'properties': {
+        'href': {
+            'type': 'string',
+            'format': 'uri'
+        },
+        'age': {'type': 'number'},
+        'created': {
+            'type': 'string',
+            'format': 'date-time'
+        }
+    },
+    'required': ['href', 'age', 'created']
+}
+
+messages = {
+    'type': 'object',
+    'properties': {
+        'free': {'type': 'number'},
+        'claimed': {'type': 'number'},
+        'total': {'type': 'number'},
+        'oldest': message_link,
+        'newest': message_link
+    },
+    'required': ['free', 'claimed', 'total']
+}
+
+queue_stats = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'messages': messages
+        },
+        'required': ['messages']
+    }
+}
diff --git a/tempest/auth.py b/tempest/auth.py
index 5fc923f..9c51edb 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -43,11 +43,11 @@
         :param client_type: 'tempest' or 'official'
         :param interface: 'json' or 'xml'. Applicable for tempest client only
         """
+        credentials = self._convert_credentials(credentials)
         if self.check_credentials(credentials):
             self.credentials = credentials
         else:
             raise TypeError("Invalid credentials")
-        self.credentials = credentials
         self.client_type = client_type
         self.interface = interface
         if self.client_type == 'tempest' and self.interface is None:
@@ -56,6 +56,13 @@
         self.alt_auth_data = None
         self.alt_part = None
 
+    def _convert_credentials(self, credentials):
+        # Support dict credentials for backwards compatibility
+        if isinstance(credentials, dict):
+            return get_credentials(**credentials)
+        else:
+            return credentials
+
     def __str__(self):
         return "Creds :{creds}, client type: {client_type}, interface: " \
                "{interface}, cached auth data: {cache}".format(
@@ -73,29 +80,55 @@
     def _get_auth(self):
         raise NotImplementedError
 
+    def _fill_credentials(self, auth_data_body):
+        raise NotImplementedError
+
+    def fill_credentials(self):
+        """
+        Fill credentials object with data from auth
+        """
+        auth_data = self.get_auth()
+        self._fill_credentials(auth_data[1])
+        return self.credentials
+
     @classmethod
     def check_credentials(cls, credentials):
         """
-        Verify credentials are valid. Subclasses can do a better check.
+        Verify credentials are valid.
         """
-        return isinstance(credentials, dict)
+        return isinstance(credentials, Credentials) and credentials.is_valid()
 
     @property
     def auth_data(self):
-        if self.cache is None or self.is_expired(self.cache):
-            self.cache = self._get_auth()
-        return self.cache
+        return self.get_auth()
 
     @auth_data.deleter
     def auth_data(self):
         self.clear_auth()
 
+    def get_auth(self):
+        """
+        Returns auth from cache if available, else auth first
+        """
+        if self.cache is None or self.is_expired(self.cache):
+            self.set_auth()
+        return self.cache
+
+    def set_auth(self):
+        """
+        Forces setting auth, ignores cache if it exists.
+        Refills credentials
+        """
+        self.cache = self._get_auth()
+        self._fill_credentials(self.cache[1])
+
     def clear_auth(self):
         """
         Can be called to clear the access cache so that next request
         will fetch a new token and base_url.
         """
         self.cache = None
+        self.credentials.reset()
 
     def is_expired(self, auth_data):
         raise NotImplementedError
@@ -218,16 +251,6 @@
 
     EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
 
-    @classmethod
-    def check_credentials(cls, credentials, scoped=True):
-        # tenant_name is optional if not scoped
-        valid = super(KeystoneV2AuthProvider, cls).check_credentials(
-            credentials) and 'username' in credentials and \
-            'password' in credentials
-        if scoped:
-            valid = valid and 'tenant_name' in credentials
-        return valid
-
     def _auth_client(self):
         if self.client_type == 'tempest':
             if self.interface == 'json':
@@ -240,13 +263,25 @@
     def _auth_params(self):
         if self.client_type == 'tempest':
             return dict(
-                user=self.credentials['username'],
-                password=self.credentials['password'],
-                tenant=self.credentials.get('tenant_name', None),
+                user=self.credentials.username,
+                password=self.credentials.password,
+                tenant=self.credentials.tenant_name,
                 auth_data=True)
         else:
             raise NotImplementedError
 
+    def _fill_credentials(self, auth_data_body):
+        tenant = auth_data_body['token']['tenant']
+        user = auth_data_body['user']
+        if self.credentials.tenant_name is None:
+            self.credentials.tenant_name = tenant['name']
+        if self.credentials.tenant_id is None:
+            self.credentials.tenant_id = tenant['id']
+        if self.credentials.username is None:
+            self.credentials.username = user['name']
+        if self.credentials.user_id is None:
+            self.credentials.user_id = user['id']
+
     def base_url(self, filters, auth_data=None):
         """
         Filters can be:
@@ -303,16 +338,6 @@
 
     EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
 
-    @classmethod
-    def check_credentials(cls, credentials, scoped=True):
-        # tenant_name is optional if not scoped
-        valid = super(KeystoneV3AuthProvider, cls).check_credentials(
-            credentials) and 'username' in credentials and \
-            'password' in credentials and 'domain_name' in credentials
-        if scoped:
-            valid = valid and 'tenant_name' in credentials
-        return valid
-
     def _auth_client(self):
         if self.client_type == 'tempest':
             if self.interface == 'json':
@@ -325,14 +350,47 @@
     def _auth_params(self):
         if self.client_type == 'tempest':
             return dict(
-                user=self.credentials['username'],
-                password=self.credentials['password'],
-                tenant=self.credentials.get('tenant_name', None),
-                domain=self.credentials['domain_name'],
+                user=self.credentials.username,
+                password=self.credentials.password,
+                tenant=self.credentials.tenant_name,
+                domain=self.credentials.user_domain_name,
                 auth_data=True)
         else:
             raise NotImplementedError
 
+    def _fill_credentials(self, auth_data_body):
+        # project or domain, depending on the scope
+        project = auth_data_body.get('project', None)
+        domain = auth_data_body.get('domain', None)
+        # user is always there
+        user = auth_data_body['user']
+        # Set project fields
+        if project is not None:
+            if self.credentials.project_name is None:
+                self.credentials.project_name = project['name']
+            if self.credentials.project_id is None:
+                self.credentials.project_id = project['id']
+            if self.credentials.project_domain_id is None:
+                self.credentials.project_domain_id = project['domain']['id']
+            if self.credentials.project_domain_name is None:
+                self.credentials.project_domain_name = \
+                    project['domain']['name']
+        # Set domain fields
+        if domain is not None:
+            if self.credentials.domain_id is None:
+                self.credentials.domain_id = domain['id']
+            if self.credentials.domain_name is None:
+                self.credentials.domain_name = domain['name']
+        # Set user fields
+        if self.credentials.username is None:
+            self.credentials.username = user['name']
+        if self.credentials.user_id is None:
+            self.credentials.user_id = user['id']
+        if self.credentials.user_domain_id is None:
+            self.credentials.user_domain_id = user['domain']['id']
+        if self.credentials.user_domain_name is None:
+            self.credentials.user_domain_name = user['domain']['name']
+
     def base_url(self, filters, auth_data=None):
         """
         Filters can be:
@@ -398,3 +456,248 @@
                                             self.EXPIRY_DATE_FORMAT)
         return expiry - self.token_expiry_threshold <= \
             datetime.datetime.utcnow()
+
+
+def get_default_credentials(credential_type, fill_in=True):
+    """
+    Returns configured credentials of the specified type
+    based on the configured auth_version
+    """
+    return get_credentials(fill_in=fill_in, credential_type=credential_type)
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+    """
+    Builds a credentials object based on the configured auth_version
+
+    :param credential_type (string): requests credentials from tempest
+           configuration file. Valid values are defined in
+           Credentials.TYPE.
+    :param kwargs (dict): take into account only if credential_type is
+           not specified or None. Dict of credential key/value pairs
+
+    Examples:
+
+        Returns credentials from the provided parameters:
+        >>> get_credentials(username='foo', password='bar')
+
+        Returns credentials from tempest configuration:
+        >>> get_credentials(credential_type='user')
+    """
+    if CONF.identity.auth_version == 'v2':
+        credential_class = KeystoneV2Credentials
+        auth_provider_class = KeystoneV2AuthProvider
+    elif CONF.identity.auth_version == 'v3':
+        credential_class = KeystoneV3Credentials
+        auth_provider_class = KeystoneV3AuthProvider
+    else:
+        raise exceptions.InvalidConfiguration('Unsupported auth version')
+    if credential_type is not None:
+        creds = credential_class.get_default(credential_type)
+    else:
+        creds = credential_class(**kwargs)
+    # Fill in the credentials fields that were not specified
+    if fill_in:
+        auth_provider = auth_provider_class(creds)
+        creds = auth_provider.fill_credentials()
+    return creds
+
+
+class Credentials(object):
+    """
+    Set of credentials for accessing OpenStack services
+
+    ATTRIBUTES: list of valid class attributes representing credentials.
+
+    TYPES: types of credentials available in the configuration file.
+           For each key there's a tuple (section, prefix) to match the
+           configuration options.
+    """
+
+    ATTRIBUTES = []
+    TYPES = {
+        'identity_admin': ('identity', 'admin'),
+        'compute_admin': ('compute_admin', None),
+        'user': ('identity', None),
+        'alt_user': ('identity', 'alt')
+    }
+
+    def __init__(self, **kwargs):
+        """
+        Enforce the available attributes at init time (only).
+        Additional attributes can still be set afterwards if tests need
+        to do so.
+        """
+        self._initial = kwargs
+        self._apply_credentials(kwargs)
+
+    def _apply_credentials(self, attr):
+        for key in attr.keys():
+            if key in self.ATTRIBUTES:
+                setattr(self, key, attr[key])
+            else:
+                raise exceptions.InvalidCredentials
+
+    def __str__(self):
+        """
+        Represent only attributes included in self.ATTRIBUTES
+        """
+        _repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
+        return str(_repr)
+
+    def __eq__(self, other):
+        """
+        Credentials are equal if attributes in self.ATTRIBUTES are equal
+        """
+        return str(self) == str(other)
+
+    def __getattr__(self, key):
+        # If an attribute is set, __getattr__ is not invoked
+        # If an attribute is not set, and it is a known one, return None
+        if key in self.ATTRIBUTES:
+            return None
+        else:
+            raise AttributeError
+
+    def __delitem__(self, key):
+        # For backwards compatibility, support dict behaviour
+        if key in self.ATTRIBUTES:
+            delattr(self, key)
+        else:
+            raise AttributeError
+
+    def get(self, item, default):
+        # In this patch act as dict for backward compatibility
+        try:
+            return getattr(self, item)
+        except AttributeError:
+            return default
+
+    @classmethod
+    def get_default(cls, credentials_type):
+        if credentials_type not in cls.TYPES:
+            raise exceptions.InvalidCredentials()
+        creds = cls._get_default(credentials_type)
+        if not creds.is_valid():
+            raise exceptions.InvalidConfiguration()
+        return creds
+
+    @classmethod
+    def _get_default(cls, credentials_type):
+        raise NotImplementedError
+
+    def is_valid(self):
+        raise NotImplementedError
+
+    def reset(self):
+        # First delete all known attributes
+        for key in self.ATTRIBUTES:
+            if getattr(self, key) is not None:
+                delattr(self, key)
+        # Then re-apply initial setup
+        self._apply_credentials(self._initial)
+
+
+class KeystoneV2Credentials(Credentials):
+
+    CONF_ATTRIBUTES = ['username', 'password', 'tenant_name']
+    ATTRIBUTES = ['user_id', 'tenant_id']
+    ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+    @classmethod
+    def _get_default(cls, credentials_type='user'):
+        params = {}
+        section, prefix = cls.TYPES[credentials_type]
+        for attr in cls.CONF_ATTRIBUTES:
+            _section = getattr(CONF, section)
+            if prefix is None:
+                params[attr] = getattr(_section, attr)
+            else:
+                params[attr] = getattr(_section, prefix + "_" + attr)
+        return cls(**params)
+
+    def is_valid(self):
+        """
+        Minimum set of valid credentials, are username and password.
+        Tenant is optional.
+        """
+        return None not in (self.username, self.password)
+
+
+class KeystoneV3Credentials(KeystoneV2Credentials):
+    """
+    Credentials suitable for the Keystone Identity V3 API
+    """
+
+    CONF_ATTRIBUTES = ['domain_name', 'password', 'tenant_name', 'username']
+    ATTRIBUTES = ['project_domain_id', 'project_domain_name', 'project_id',
+                  'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
+                  'user_domain_name', 'user_id']
+    ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+    def __init__(self, **kwargs):
+        """
+        If domain is not specified, load the one configured for the
+        identity manager.
+        """
+        domain_fields = set(x for x in self.ATTRIBUTES if 'domain' in x)
+        if not domain_fields.intersection(kwargs.keys()):
+            kwargs['user_domain_name'] = CONF.identity.admin_domain_name
+        super(KeystoneV3Credentials, self).__init__(**kwargs)
+
+    def __setattr__(self, key, value):
+        parent = super(KeystoneV3Credentials, self)
+        # for tenant_* set both project and tenant
+        if key == 'tenant_id':
+            parent.__setattr__('project_id', value)
+        elif key == 'tenant_name':
+            parent.__setattr__('project_name', value)
+        # for project_* set both project and tenant
+        if key == 'project_id':
+            parent.__setattr__('tenant_id', value)
+        elif key == 'project_name':
+            parent.__setattr__('tenant_name', value)
+        # for *_domain_* set both user and project if not set yet
+        if key == 'user_domain_id':
+            if self.project_domain_id is None:
+                parent.__setattr__('project_domain_id', value)
+        if key == 'project_domain_id':
+            if self.user_domain_id is None:
+                parent.__setattr__('user_domain_id', value)
+        if key == 'user_domain_name':
+            if self.project_domain_name is None:
+                parent.__setattr__('project_domain_name', value)
+        if key == 'project_domain_name':
+            if self.user_domain_name is None:
+                parent.__setattr__('user_domain_name', value)
+        # support domain_name coming from config
+        if key == 'domain_name':
+            parent.__setattr__('user_domain_name', value)
+            parent.__setattr__('project_domain_name', value)
+        # finally trigger default behaviour for all attributes
+        parent.__setattr__(key, value)
+
+    def is_valid(self):
+        """
+        Valid combinations of v3 credentials (excluding token, scope)
+        - User id, password (optional domain)
+        - User name, password and its domain id/name
+        For the scope, valid combinations are:
+        - None
+        - Project id (optional domain)
+        - Project name and its domain id/name
+        """
+        valid_user_domain = any(
+            [self.user_domain_id is not None,
+             self.user_domain_name is not None])
+        valid_project_domain = any(
+            [self.project_domain_id is not None,
+             self.project_domain_name is not None])
+        valid_user = any(
+            [self.user_id is not None,
+             self.username is not None and valid_user_domain])
+        valid_project = any(
+            [self.project_name is None and self.project_id is None,
+             self.project_id is not None,
+             self.project_name is not None and valid_project_domain])
+        return all([self.password is not None, valid_user, valid_project])
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index afbd732..9001302 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -16,6 +16,7 @@
 import logging
 import re
 import subprocess
+import testtools
 
 import tempest.cli
 from tempest import config
@@ -86,6 +87,8 @@
     def test_cinder_rate_limits(self):
         self.cinder('rate-limits')
 
+    @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+                          'Volume snapshot not available.')
     def test_cinder_snapshot_list(self):
         self.cinder('snapshot-list')
 
@@ -115,6 +118,12 @@
     def test_cinder_bash_completion(self):
         self.cinder('bash-completion')
 
+    def test_cinder_qos_list(self):
+        self.cinder('qos-list')
+
+    def test_cinder_encryption_type_list(self):
+        self.cinder('encryption-type-list')
+
     def test_admin_help(self):
         help_text = self.cinder('help')
         lines = help_text.split('\n')
diff --git a/tempest/cli/simple_read_only/test_heat.py b/tempest/cli/simple_read_only/test_heat.py
index cf4580c..7a952fc 100644
--- a/tempest/cli/simple_read_only/test_heat.py
+++ b/tempest/cli/simple_read_only/test_heat.py
@@ -85,6 +85,9 @@
     def test_heat_help(self):
         self.heat('help')
 
+    def test_heat_bash_completion(self):
+        self.heat('bash-completion')
+
     def test_heat_help_cmd(self):
         # Check requesting help for a specific command works
         help_text = self.heat('help resource-template')
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 1efbede..dda65c1 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -117,6 +117,11 @@
     def test_admin_bashcompletion(self):
         self.keystone('bash-completion')
 
+    def test_admin_ec2_credentials_list(self):
+        creds = self.keystone('ec2-credentials-list')
+        creds = self.parser.listing(creds)
+        self.assertTableStruct(creds, ['tenant', 'access', 'secret'])
+
     # Optional arguments:
 
     def test_admin_version(self):
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/test_nova.py
index a3787ab..1c1ddf1 100644
--- a/tempest/cli/simple_read_only/test_nova.py
+++ b/tempest/cli/simple_read_only/test_nova.py
@@ -145,6 +145,9 @@
     def test_admin_secgroup_list_rules(self):
         self.nova('secgroup-list-rules')
 
+    def test_admin_server_group_list(self):
+        self.nova('server-group-list')
+
     def test_admin_servce_list(self):
         self.nova('service-list')
 
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index cd819a4..36cc324 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -48,23 +48,93 @@
 
     def test_sahara_plugins_list(self):
         plugins = self.parser.listing(self.sahara('plugin-list'))
-        self.assertTableStruct(plugins, ['name', 'versions', 'title'])
+        self.assertTableStruct(plugins, [
+            'name',
+            'versions',
+            'title'
+        ])
 
     def test_sahara_plugins_show(self):
-        plugin = self.parser.listing(self.sahara('plugin-show',
-                                                 params='--name vanilla'))
-        self.assertTableStruct(plugin, ['Property', 'Value'])
+        result = self.sahara('plugin-show', params='--name vanilla')
+        plugin = self.parser.listing(result)
+        self.assertTableStruct(plugin, [
+            'Property',
+            'Value'
+        ])
 
     def test_sahara_node_group_template_list(self):
-        plugins = self.parser.listing(self.sahara('node-group-template-list'))
-        self.assertTableStruct(plugins, ['name', 'id', 'plugin_name',
-                                         'node_processes', 'description'])
+        result = self.sahara('node-group-template-list')
+        node_group_templates = self.parser.listing(result)
+        self.assertTableStruct(node_group_templates, [
+            'name',
+            'id',
+            'plugin_name',
+            'node_processes',
+            'description'
+        ])
 
     def test_sahara_cluster_template_list(self):
-        plugins = self.parser.listing(self.sahara('cluster-template-list'))
-        self.assertTableStruct(plugins, ['name', 'id', 'plugin_name',
-                                         'node_groups', 'description'])
+        result = self.sahara('cluster-template-list')
+        cluster_templates = self.parser.listing(result)
+        self.assertTableStruct(cluster_templates, [
+            'name',
+            'id',
+            'plugin_name',
+            'node_groups',
+            'description'
+        ])
 
     def test_sahara_cluster_list(self):
-        plugins = self.parser.listing(self.sahara('cluster-list'))
-        self.assertTableStruct(plugins, ['name', 'id', 'status', 'node_count'])
+        result = self.sahara('cluster-list')
+        clusters = self.parser.listing(result)
+        self.assertTableStruct(clusters, [
+            'name',
+            'id',
+            'status',
+            'node_count'
+        ])
+
+    def test_sahara_data_source_list(self):
+        result = self.sahara('data-source-list')
+        data_sources = self.parser.listing(result)
+        self.assertTableStruct(data_sources, [
+            'name',
+            'id',
+            'type',
+            'description'
+        ])
+
+    def test_sahara_job_binary_data_list(self):
+        result = self.sahara('job-binary-data-list')
+        job_binary_data_list = self.parser.listing(result)
+        self.assertTableStruct(job_binary_data_list, [
+            'id',
+            'name'
+        ])
+
+    def test_sahara_job_binary_list(self):
+        result = self.sahara('job-binary-list')
+        job_binaries = self.parser.listing(result)
+        self.assertTableStruct(job_binaries, [
+            'id',
+            'name',
+            'description'
+        ])
+
+    def test_sahara_job_template_list(self):
+        result = self.sahara('job-template-list')
+        job_templates = self.parser.listing(result)
+        self.assertTableStruct(job_templates, [
+            'id',
+            'name',
+            'description'
+        ])
+
+    def test_sahara_job_list(self):
+        result = self.sahara('job-list')
+        jobs = self.parser.listing(result)
+        self.assertTableStruct(jobs, [
+            'id',
+            'cluster_id',
+            'status'
+        ])
diff --git a/tempest/clients.py b/tempest/clients.py
index 7ebd983..7532bf2 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,23 +13,19 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-# Default client libs
-import cinderclient.client
-import glanceclient
-import heatclient.client
 import keystoneclient.exceptions
 import keystoneclient.v2_0.client
-import neutronclient.v2_0.client
-import novaclient.client
-import swiftclient
 
-from tempest.common.rest_client import NegativeRestClient
+from tempest import auth
+from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
 from tempest import manager
 from tempest.openstack.common import log as logging
 from tempest.services.baremetal.v1.client_json import BaremetalClientJSON
 from tempest.services import botoclients
+from tempest.services.compute.json.agents_client import \
+    AgentsClientJSON
 from tempest.services.compute.json.aggregates_client import \
     AggregatesClientJSON
 from tempest.services.compute.json.availability_zone_client import \
@@ -52,6 +48,8 @@
     InterfacesClientJSON
 from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
 from tempest.services.compute.json.limits_client import LimitsClientJSON
+from tempest.services.compute.json.migrations_client import \
+    MigrationsClientJSON
 from tempest.services.compute.json.quotas_client import QuotasClientJSON
 from tempest.services.compute.json.security_groups_client import \
     SecurityGroupsClientJSON
@@ -78,6 +76,8 @@
     InterfacesV3ClientJSON
 from tempest.services.compute.v3.json.keypairs_client import \
     KeyPairsV3ClientJSON
+from tempest.services.compute.v3.json.migration_client import \
+    MigrationsV3ClientJSON
 from tempest.services.compute.v3.json.quotas_client import \
     QuotasV3ClientJSON
 from tempest.services.compute.v3.json.servers_client import \
@@ -117,6 +117,8 @@
 from tempest.services.data_processing.v1_1.client import DataProcessingClient
 from tempest.services.database.json.flavors_client import \
     DatabaseFlavorsClientJSON
+from tempest.services.database.json.versions_client import \
+    DatabaseVersionsClientJSON
 from tempest.services.identity.json.identity_client import IdentityClientJSON
 from tempest.services.identity.json.identity_client import TokenClientJSON
 from tempest.services.identity.v3.json.credentials_client import \
@@ -127,6 +129,7 @@
     IdentityV3ClientJSON
 from tempest.services.identity.v3.json.identity_client import V3TokenClientJSON
 from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
+from tempest.services.identity.v3.json.region_client import RegionClientJSON
 from tempest.services.identity.v3.json.service_client import \
     ServiceClientJSON
 from tempest.services.identity.v3.xml.credentials_client import \
@@ -136,6 +139,7 @@
     IdentityV3ClientXML
 from tempest.services.identity.v3.xml.identity_client import V3TokenClientXML
 from tempest.services.identity.v3.xml.policy_client import PolicyClientXML
+from tempest.services.identity.v3.xml.region_client import RegionClientXML
 from tempest.services.identity.v3.xml.service_client import \
     ServiceClientXML
 from tempest.services.identity.xml.identity_client import IdentityClientXML
@@ -162,8 +166,12 @@
     VolumeHostsClientJSON
 from tempest.services.volume.json.admin.volume_quotas_client import \
     VolumeQuotasClientJSON
+from tempest.services.volume.json.admin.volume_services_client import \
+    VolumesServicesClientJSON
 from tempest.services.volume.json.admin.volume_types_client import \
     VolumeTypesClientJSON
+from tempest.services.volume.json.availability_zone_client import \
+    VolumeAvailabilityZoneClientJSON
 from tempest.services.volume.json.backups_client import BackupsClientJSON
 from tempest.services.volume.json.extensions_client import \
     ExtensionsClientJSON as VolumeExtensionClientJSON
@@ -175,8 +183,12 @@
     VolumeHostsClientXML
 from tempest.services.volume.xml.admin.volume_quotas_client import \
     VolumeQuotasClientXML
+from tempest.services.volume.xml.admin.volume_services_client import \
+    VolumesServicesClientXML
 from tempest.services.volume.xml.admin.volume_types_client import \
     VolumeTypesClientXML
+from tempest.services.volume.xml.availability_zone_client import \
+    VolumeAvailabilityZoneClientXML
 from tempest.services.volume.xml.backups_client import BackupsClientXML
 from tempest.services.volume.xml.extensions_client import \
     ExtensionsClientXML as VolumeExtensionClientXML
@@ -193,22 +205,12 @@
     Top level manager for OpenStack tempest clients
     """
 
-    def __init__(self, username=None, password=None, tenant_name=None,
-                 interface='json', service=None):
-        """
-        We allow overriding of the credentials used within the various
-        client classes managed by the Manager object. Left as None, the
-        standard username/password/tenant_name is used.
-
-        :param username: Override of the username
-        :param password: Override of the password
-        :param tenant_name: Override of the tenant name
-        """
+    def __init__(self, credentials=None, interface='json', service=None):
+        # Set interface and client type first
         self.interface = interface
         self.client_type = 'tempest'
         # super cares for credentials validation
-        super(Manager, self).__init__(
-            username=username, password=password, tenant_name=tenant_name)
+        super(Manager, self).__init__(credentials=credentials)
 
         if self.interface == 'xml':
             self.certificates_client = CertificatesClientXML(
@@ -241,11 +243,14 @@
             self.availability_zone_client = AvailabilityZoneClientXML(
                 self.auth_provider)
             self.service_client = ServiceClientXML(self.auth_provider)
+            self.volume_services_client = VolumesServicesClientXML(
+                self.auth_provider)
             self.aggregates_client = AggregatesClientXML(self.auth_provider)
             self.services_client = ServicesClientXML(self.auth_provider)
             self.tenant_usages_client = TenantUsagesClientXML(
                 self.auth_provider)
             self.policy_client = PolicyClientXML(self.auth_provider)
+            self.region_client = RegionClientXML(self.auth_provider)
             self.hosts_client = HostsClientXML(self.auth_provider)
             self.hypervisor_client = HypervisorClientXML(self.auth_provider)
             self.network_client = NetworkClientXML(self.auth_provider)
@@ -264,6 +269,8 @@
                     self.auth_provider)
             self.token_client = TokenClientXML()
             self.token_v3_client = V3TokenClientXML()
+            self.volume_availability_zone_client = \
+                VolumeAvailabilityZoneClientXML(self.auth_provider)
 
         elif self.interface == 'json':
             self.certificates_client = CertificatesClientJSON(
@@ -316,6 +323,8 @@
             self.services_v3_client = ServicesV3ClientJSON(
                 self.auth_provider)
             self.service_client = ServiceClientJSON(self.auth_provider)
+            self.volume_services_client = VolumesServicesClientJSON(
+                self.auth_provider)
             self.agents_v3_client = AgentsV3ClientJSON(self.auth_provider)
             self.aggregates_v3_client = AggregatesV3ClientJSON(
                 self.auth_provider)
@@ -325,7 +334,10 @@
             self.tenant_usages_client = TenantUsagesClientJSON(
                 self.auth_provider)
             self.version_v3_client = VersionV3ClientJSON(self.auth_provider)
+            self.migrations_v3_client = MigrationsV3ClientJSON(
+                self.auth_provider)
             self.policy_client = PolicyClientJSON(self.auth_provider)
+            self.region_client = RegionClientJSON(self.auth_provider)
             self.hosts_client = HostsClientJSON(self.auth_provider)
             self.hypervisor_v3_client = HypervisorV3ClientJSON(
                 self.auth_provider)
@@ -345,27 +357,33 @@
             self.hosts_v3_client = HostsV3ClientJSON(self.auth_provider)
             self.database_flavors_client = DatabaseFlavorsClientJSON(
                 self.auth_provider)
+            self.database_versions_client = DatabaseVersionsClientJSON(
+                self.auth_provider)
             self.queuing_client = QueuingClientJSON(self.auth_provider)
             if CONF.service_available.ceilometer:
                 self.telemetry_client = TelemetryClientJSON(
                     self.auth_provider)
             self.token_client = TokenClientJSON()
             self.token_v3_client = V3TokenClientJSON()
-            self.negative_client = NegativeRestClient(self.auth_provider)
+            self.negative_client = rest_client.NegativeRestClient(
+                self.auth_provider)
             self.negative_client.service = service
+            self.volume_availability_zone_client = \
+                VolumeAvailabilityZoneClientJSON(self.auth_provider)
 
         else:
             msg = "Unsupported interface type `%s'" % interface
             raise exceptions.InvalidConfiguration(msg)
 
         # TODO(andreaf) EC2 client still do their auth, v2 only
-        ec2_client_args = (self.credentials.get('username'),
-                           self.credentials.get('password'),
+        ec2_client_args = (self.credentials.username,
+                           self.credentials.password,
                            CONF.identity.uri,
-                           self.credentials.get('tenant_name'))
+                           self.credentials.tenant_name)
 
         # common clients
         self.account_client = AccountClient(self.auth_provider)
+        self.agents_client = AgentsClientJSON(self.auth_provider)
         if CONF.service_available.glance:
             self.image_client = ImageClientJSON(self.auth_provider)
             self.image_client_v2 = ImageClientV2JSON(self.auth_provider)
@@ -381,6 +399,7 @@
             AccountClientCustomizedHeader(self.auth_provider)
         self.data_processing_client = DataProcessingClient(
             self.auth_provider)
+        self.migrations_client = MigrationsClientJSON(self.auth_provider)
 
 
 class AltManager(Manager):
@@ -391,11 +410,10 @@
     """
 
     def __init__(self, interface='json', service=None):
-        super(AltManager, self).__init__(CONF.identity.alt_username,
-                                         CONF.identity.alt_password,
-                                         CONF.identity.alt_tenant_name,
-                                         interface=interface,
-                                         service=service)
+        super(AltManager, self).__init__(
+            credentials=auth.get_default_credentials('alt_user'),
+            interface=interface,
+            service=service)
 
 
 class AdminManager(Manager):
@@ -406,11 +424,10 @@
     """
 
     def __init__(self, interface='json', service=None):
-        super(AdminManager, self).__init__(CONF.identity.admin_username,
-                                           CONF.identity.admin_password,
-                                           CONF.identity.admin_tenant_name,
-                                           interface=interface,
-                                           service=service)
+        super(AdminManager, self).__init__(
+            credentials=auth.get_default_credentials('identity_admin'),
+            interface=interface,
+            service=service)
 
 
 class ComputeAdminManager(Manager):
@@ -422,29 +439,10 @@
 
     def __init__(self, interface='json', service=None):
         base = super(ComputeAdminManager, self)
-        base.__init__(CONF.compute_admin.username,
-                      CONF.compute_admin.password,
-                      CONF.compute_admin.tenant_name,
-                      interface=interface,
-                      service=service)
-
-
-class OrchestrationManager(Manager):
-    """
-    Manager object that uses the admin credentials for its
-    so that heat templates can create users
-    """
-    def __init__(self, interface='json', service=None):
-        base = super(OrchestrationManager, self)
-        # heat currently needs an admin user so that stacks can create users
-        # however the tests need the demo tenant so that the neutron
-        # private network is the default. DO NOT change this auth combination
-        # until heat can run with the demo user.
-        base.__init__(CONF.identity.admin_username,
-                      CONF.identity.admin_password,
-                      CONF.identity.tenant_name,
-                      interface=interface,
-                      service=service)
+        base.__init__(
+            credentials=auth.get_default_credentials('compute_admin'),
+            interface=interface,
+            service=service)
 
 
 class OfficialClientManager(manager.Manager):
@@ -456,46 +454,57 @@
     NOVACLIENT_VERSION = '2'
     CINDERCLIENT_VERSION = '1'
     HEATCLIENT_VERSION = '1'
+    IRONICCLIENT_VERSION = '1'
+    SAHARACLIENT_VERSION = '1.1'
 
-    def __init__(self, username, password, tenant_name):
+    def __init__(self, credentials):
         # FIXME(andreaf) Auth provider for client_type 'official' is
         # not implemented yet, setting to 'tempest' for now.
         self.client_type = 'tempest'
         self.interface = None
         # super cares for credentials validation
-        super(OfficialClientManager, self).__init__(
-            username=username, password=password, tenant_name=tenant_name)
-        self.compute_client = self._get_compute_client(username,
-                                                       password,
-                                                       tenant_name)
-        self.identity_client = self._get_identity_client(username,
-                                                         password,
-                                                         tenant_name)
+        super(OfficialClientManager, self).__init__(credentials=credentials)
+        self.baremetal_client = self._get_baremetal_client()
+        self.compute_client = self._get_compute_client(credentials)
+        self.identity_client = self._get_identity_client(credentials)
         self.image_client = self._get_image_client()
         self.network_client = self._get_network_client()
-        self.volume_client = self._get_volume_client(username,
-                                                     password,
-                                                     tenant_name)
+        self.volume_client = self._get_volume_client(credentials)
         self.object_storage_client = self._get_object_storage_client(
-            username,
-            password,
-            tenant_name)
+            credentials)
         self.orchestration_client = self._get_orchestration_client(
-            username,
-            password,
-            tenant_name)
+            credentials)
+        self.data_processing_client = self._get_data_processing_client(
+            credentials)
 
-    def _get_compute_client(self, username, password, tenant_name):
+    def _get_roles(self):
+        admin_credentials = auth.get_default_credentials('identity_admin')
+        keystone_admin = self._get_identity_client(admin_credentials)
+
+        username = self.credentials.username
+        tenant_name = self.credentials.tenant_name
+        user_id = keystone_admin.users.find(name=username).id
+        tenant_id = keystone_admin.tenants.find(name=tenant_name).id
+
+        roles = keystone_admin.roles.roles_for_user(
+            user=user_id, tenant=tenant_id)
+
+        return [r.name for r in roles]
+
+    def _get_compute_client(self, credentials):
         # Novaclient will not execute operations for anyone but the
         # identified user, so a new client needs to be created for
         # each user that operations need to be performed for.
-        self._validate_credentials(username, password, tenant_name)
+        if not CONF.service_available.nova:
+            return None
+        import novaclient.client
 
         auth_url = CONF.identity.uri
         dscv = CONF.identity.disable_ssl_certificate_validation
         region = CONF.identity.region
 
-        client_args = (username, password, tenant_name, auth_url)
+        client_args = (credentials.username, credentials.password,
+                       credentials.tenant_name, auth_url)
 
         # Create our default Nova client to use in testing
         service_type = CONF.compute.catalog_type
@@ -510,6 +519,9 @@
                                         http_log_debug=True)
 
     def _get_image_client(self):
+        if not CONF.service_available.glance:
+            return None
+        import glanceclient
         token = self.identity_client.auth_token
         region = CONF.identity.region
         endpoint_type = CONF.image.endpoint_type
@@ -520,26 +532,32 @@
         return glanceclient.Client('1', endpoint=endpoint, token=token,
                                    insecure=dscv)
 
-    def _get_volume_client(self, username, password, tenant_name):
+    def _get_volume_client(self, credentials):
+        if not CONF.service_available.cinder:
+            return None
+        import cinderclient.client
         auth_url = CONF.identity.uri
         region = CONF.identity.region
         endpoint_type = CONF.volume.endpoint_type
+        dscv = CONF.identity.disable_ssl_certificate_validation
         return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
-                                          username,
-                                          password,
-                                          tenant_name,
+                                          credentials.username,
+                                          credentials.password,
+                                          credentials.tenant_name,
                                           auth_url,
                                           region_name=region,
                                           endpoint_type=endpoint_type,
+                                          insecure=dscv,
                                           http_log_debug=True)
 
-    def _get_object_storage_client(self, username, password, tenant_name):
+    def _get_object_storage_client(self, credentials):
+        if not CONF.service_available.swift:
+            return None
+        import swiftclient
         auth_url = CONF.identity.uri
         # add current tenant to swift operator role group.
-        keystone_admin = self._get_identity_client(
-            CONF.identity.admin_username,
-            CONF.identity.admin_password,
-            CONF.identity.admin_tenant_name)
+        admin_credentials = auth.get_default_credentials('identity_admin')
+        keystone_admin = self._get_identity_client(admin_credentials)
 
         # enable test user to operate swift by adding operator role to him.
         roles = keystone_admin.roles.list()
@@ -556,23 +574,18 @@
 
         endpoint_type = CONF.object_storage.endpoint_type
         os_options = {'endpoint_type': endpoint_type}
-        return swiftclient.Connection(auth_url, username, password,
-                                      tenant_name=tenant_name,
+        return swiftclient.Connection(auth_url, credentials.username,
+                                      credentials.password,
+                                      tenant_name=credentials.tenant_name,
                                       auth_version='2',
                                       os_options=os_options)
 
-    def _get_orchestration_client(self, username=None, password=None,
-                                  tenant_name=None):
-        if not username:
-            username = CONF.identity.admin_username
-        if not password:
-            password = CONF.identity.admin_password
-        if not tenant_name:
-            tenant_name = CONF.identity.tenant_name
+    def _get_orchestration_client(self, credentials):
+        if not CONF.service_available.heat:
+            return None
+        import heatclient.client
 
-        self._validate_credentials(username, password, tenant_name)
-
-        keystone = self._get_identity_client(username, password, tenant_name)
+        keystone = self._get_identity_client(credentials)
         region = CONF.identity.region
         endpoint_type = CONF.orchestration.endpoint_type
         token = keystone.auth_token
@@ -589,22 +602,53 @@
             return heatclient.client.Client(self.HEATCLIENT_VERSION,
                                             endpoint,
                                             token=token,
-                                            username=username,
-                                            password=password)
+                                            username=credentials.username,
+                                            password=credentials.password)
 
-    def _get_identity_client(self, username, password, tenant_name):
+    def _get_identity_client(self, credentials):
         # This identity client is not intended to check the security
         # of the identity service, so use admin credentials by default.
-        self._validate_credentials(username, password, tenant_name)
 
         auth_url = CONF.identity.uri
         dscv = CONF.identity.disable_ssl_certificate_validation
 
-        return keystoneclient.v2_0.client.Client(username=username,
-                                                 password=password,
-                                                 tenant_name=tenant_name,
-                                                 auth_url=auth_url,
-                                                 insecure=dscv)
+        return keystoneclient.v2_0.client.Client(
+            username=credentials.username,
+            password=credentials.password,
+            tenant_name=credentials.tenant_name,
+            auth_url=auth_url,
+            insecure=dscv)
+
+    def _get_baremetal_client(self):
+        # ironic client is currently intended to by used by admin users
+        if not CONF.service_available.ironic:
+            return None
+        import ironicclient.client
+        roles = self._get_roles()
+        if CONF.identity.admin_role not in roles:
+            return None
+
+        auth_url = CONF.identity.uri
+        api_version = self.IRONICCLIENT_VERSION
+        insecure = CONF.identity.disable_ssl_certificate_validation
+        service_type = CONF.baremetal.catalog_type
+        endpoint_type = CONF.baremetal.endpoint_type
+        creds = {
+            'os_username': self.credentials.username,
+            'os_password': self.credentials.password,
+            'os_tenant_name': self.credentials.tenant_name
+        }
+
+        try:
+            return ironicclient.client.get_client(
+                api_version=api_version,
+                os_auth_url=auth_url,
+                insecure=insecure,
+                os_service_type=service_type,
+                os_endpoint_type=endpoint_type,
+                **creds)
+        except keystoneclient.exceptions.EndpointNotFound:
+            return None
 
     def _get_network_client(self):
         # The intended configuration is for the network client to have
@@ -613,19 +657,42 @@
         # preferable to authenticating as a specific user because
         # working with certain resources (public routers and networks)
         # often requires admin privileges anyway.
-        username = CONF.identity.admin_username
-        password = CONF.identity.admin_password
-        tenant_name = CONF.identity.admin_tenant_name
+        if not CONF.service_available.neutron:
+            return None
+        import neutronclient.v2_0.client
 
-        self._validate_credentials(username, password, tenant_name)
+        credentials = auth.get_default_credentials('identity_admin')
 
         auth_url = CONF.identity.uri
         dscv = CONF.identity.disable_ssl_certificate_validation
         endpoint_type = CONF.network.endpoint_type
 
-        return neutronclient.v2_0.client.Client(username=username,
-                                                password=password,
-                                                tenant_name=tenant_name,
-                                                endpoint_type=endpoint_type,
-                                                auth_url=auth_url,
-                                                insecure=dscv)
+        return neutronclient.v2_0.client.Client(
+            username=credentials.username,
+            password=credentials.password,
+            tenant_name=credentials.tenant_name,
+            endpoint_type=endpoint_type,
+            auth_url=auth_url,
+            insecure=dscv)
+
+    def _get_data_processing_client(self, credentials):
+        if not CONF.service_available.sahara:
+            # Sahara isn't available
+            return None
+
+        import saharaclient.client
+
+        endpoint_type = CONF.data_processing.endpoint_type
+        catalog_type = CONF.data_processing.catalog_type
+        auth_url = CONF.identity.uri
+
+        client = saharaclient.client.Client(
+            self.SAHARACLIENT_VERSION,
+            credentials.username,
+            credentials.password,
+            project_name=credentials.tenant_name,
+            endpoint_type=endpoint_type,
+            service_type=catalog_type,
+            auth_url=auth_url)
+
+        return client
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/cmd/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/cmd/__init__.py
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
new file mode 100755
index 0000000..20ee63e
--- /dev/null
+++ b/tempest/cmd/javelin.py
@@ -0,0 +1,430 @@
+#!/usr/bin/env python
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Javelin makes resources that should survive an upgrade.
+
+Javelin is a tool for creating, verifying, and deleting a small set of
+resources in a declarative way.
+
+"""
+
+import logging
+import os
+import sys
+import unittest
+import yaml
+
+import argparse
+
+import tempest.auth
+from tempest import exceptions
+from tempest.services.compute.json import flavors_client
+from tempest.services.compute.json import servers_client
+from tempest.services.identity.json import identity_client
+from tempest.services.image.v2.json import image_client
+from tempest.services.object_storage import container_client
+from tempest.services.object_storage import object_client
+
+OPTS = {}
+USERS = {}
+RES = {}
+
+LOG = None
+
+
+class OSClient(object):
+    _creds = None
+    identity = None
+    servers = None
+
+    def __init__(self, user, pw, tenant):
+        _creds = tempest.auth.KeystoneV2Credentials(
+            username=user,
+            password=pw,
+            tenant_name=tenant)
+        _auth = tempest.auth.KeystoneV2AuthProvider(_creds)
+        self.identity = identity_client.IdentityClientJSON(_auth)
+        self.servers = servers_client.ServersClientJSON(_auth)
+        self.objects = object_client.ObjectClient(_auth)
+        self.containers = container_client.ContainerClient(_auth)
+        self.images = image_client.ImageClientV2JSON(_auth)
+        self.flavors = flavors_client.FlavorsClientJSON(_auth)
+
+
+def load_resources(fname):
+    """Load the expected resources from a yaml flie."""
+    return yaml.load(open(fname, 'r'))
+
+
+def keystone_admin():
+    return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
+
+
+def client_for_user(name):
+    LOG.debug("Entering client_for_user")
+    if name in USERS:
+        user = USERS[name]
+        LOG.debug("Created client for user %s" % user)
+        return OSClient(user['name'], user['pass'], user['tenant'])
+    else:
+        LOG.error("%s not found in USERS: %s" % (name, USERS))
+
+###################
+#
+# TENANTS
+#
+###################
+
+
+def create_tenants(tenants):
+    """Create tenants from resource definition.
+
+    Don't create the tenants if they already exist.
+    """
+    admin = keystone_admin()
+    _, body = admin.identity.list_tenants()
+    existing = [x['name'] for x in body]
+    for tenant in tenants:
+        if tenant not in existing:
+            admin.identity.create_tenant(tenant)
+        else:
+            LOG.warn("Tenant '%s' already exists in this environment" % tenant)
+
+##############
+#
+# USERS
+#
+##############
+
+
+def _users_for_tenant(users, tenant):
+    u_for_t = []
+    for user in users:
+        for n in user:
+            if user[n]['tenant'] == tenant:
+                u_for_t.append(user[n])
+    return u_for_t
+
+
+def _tenants_from_users(users):
+    tenants = set()
+    for user in users:
+        for n in user:
+            tenants.add(user[n]['tenant'])
+    return tenants
+
+
+def _assign_swift_role(user):
+    admin = keystone_admin()
+    resp, roles = admin.identity.list_roles()
+    role = next(r for r in roles if r['name'] == 'Member')
+    LOG.debug(USERS[user])
+    try:
+        admin.identity.assign_user_role(
+            USERS[user]['tenant_id'],
+            USERS[user]['id'],
+            role['id'])
+    except exceptions.Conflict:
+        # don't care if it's already assigned
+        pass
+
+
+def create_users(users):
+    """Create tenants from resource definition.
+
+    Don't create the tenants if they already exist.
+    """
+    global USERS
+    LOG.info("Creating users")
+    admin = keystone_admin()
+    for u in users:
+        try:
+            tenant = admin.identity.get_tenant_by_name(u['tenant'])
+        except exceptions.NotFound:
+            LOG.error("Tenant: %s - not found" % u['tenant'])
+            continue
+        try:
+            admin.identity.get_user_by_username(tenant['id'], u['name'])
+            LOG.warn("User '%s' already exists in this environment"
+                     % u['name'])
+        except exceptions.NotFound:
+            admin.identity.create_user(
+                u['name'], u['pass'], tenant['id'],
+                "%s@%s" % (u['name'], tenant['id']),
+                enabled=True)
+
+
+def collect_users(users):
+    global USERS
+    LOG.info("Creating users")
+    admin = keystone_admin()
+    for u in users:
+        tenant = admin.identity.get_tenant_by_name(u['tenant'])
+        u['tenant_id'] = tenant['id']
+        USERS[u['name']] = u
+        body = admin.identity.get_user_by_username(tenant['id'], u['name'])
+        USERS[u['name']]['id'] = body['id']
+
+
+class JavelinCheck(unittest.TestCase):
+    def __init__(self, users, resources):
+        super(JavelinCheck, self).__init__()
+        self.users = users
+        self.res = resources
+
+    def runTest(self, *args):
+        pass
+
+    def check(self):
+        self.check_users()
+        self.check_objects()
+        self.check_servers()
+
+    def check_users(self):
+        """Check that the users we expect to exist, do.
+
+        We don't use the resource list for this because we need to validate
+        that things like tenantId didn't drift across versions.
+        """
+        for name, user in self.users.iteritems():
+            client = keystone_admin()
+            _, found = client.identity.get_user(user['id'])
+            self.assertEqual(found['name'], user['name'])
+            self.assertEqual(found['tenantId'], user['tenant_id'])
+
+            # also ensure we can auth with that user, and do something
+            # on the cloud. We don't care about the results except that it
+            # remains authorized.
+            client = client_for_user(user['name'])
+            resp, body = client.servers.list_servers()
+            self.assertEqual(resp['status'], '200')
+
+    def check_objects(self):
+        """Check that the objects created are still there."""
+        for obj in self.res['objects']:
+            client = client_for_user(obj['owner'])
+            r, contents = client.objects.get_object(
+                obj['container'], obj['name'])
+            source = _file_contents(obj['file'])
+            self.assertEqual(contents, source)
+
+    def check_servers(self):
+        """Check that the servers are still up and running."""
+        for server in self.res['servers']:
+            client = client_for_user(server['owner'])
+            found = _get_server_by_name(client, server['name'])
+            self.assertIsNotNone(
+                found,
+                "Couldn't find expected server %s" % server['name'])
+
+            r, found = client.servers.get_server(found['id'])
+            # get the ipv4 address
+            addr = found['addresses']['private'][0]['addr']
+            self.assertEqual(os.system("ping -c 1 " + addr), 0,
+                             "Server %s is not pingable at %s" % (
+                                 server['name'], addr))
+
+
+#######################
+#
+# OBJECTS
+#
+#######################
+
+
+def _file_contents(fname):
+    with open(fname, 'r') as f:
+        return f.read()
+
+
+def create_objects(objects):
+    LOG.info("Creating objects")
+    for obj in objects:
+        LOG.debug("Object %s" % obj)
+        _assign_swift_role(obj['owner'])
+        client = client_for_user(obj['owner'])
+        client.containers.create_container(obj['container'])
+        client.objects.create_object(
+            obj['container'], obj['name'],
+            _file_contents(obj['file']))
+
+#######################
+#
+# IMAGES
+#
+#######################
+
+
+def create_images(images):
+    for image in images:
+        client = client_for_user(image['owner'])
+
+        # only upload a new image if the name isn't there
+        r, body = client.images.image_list()
+        names = [x['name'] for x in body]
+        if image['name'] in names:
+            continue
+
+        # special handling for 3 part image
+        extras = {}
+        if image['format'] == 'ami':
+            r, aki = client.images.create_image(
+                'javelin_' + image['aki'], 'aki', 'aki')
+            client.images.store_image(aki.get('id'), open(image['aki'], 'r'))
+            extras['kernel_id'] = aki.get('id')
+
+            r, ari = client.images.create_image(
+                'javelin_' + image['ari'], 'ari', 'ari')
+            client.images.store_image(ari.get('id'), open(image['ari'], 'r'))
+            extras['ramdisk_id'] = ari.get('id')
+
+        r, body = client.images.create_image(
+            image['name'], image['format'], image['format'], **extras)
+        image_id = body.get('id')
+        client.images.store_image(image_id, open(image['file'], 'r'))
+
+
+#######################
+#
+# SERVERS
+#
+#######################
+
+def _get_server_by_name(client, name):
+    r, body = client.servers.list_servers()
+    for server in body['servers']:
+        if name == server['name']:
+            return server
+    return None
+
+
+def _get_image_by_name(client, name):
+    r, body = client.images.image_list()
+    for image in body:
+        if name == image['name']:
+            return image
+    return None
+
+
+def _get_flavor_by_name(client, name):
+    r, body = client.flavors.list_flavors()
+    for flavor in body:
+        if name == flavor['name']:
+            return flavor
+    return None
+
+
+def create_servers(servers):
+    for server in servers:
+        client = client_for_user(server['owner'])
+
+        if _get_server_by_name(client, server['name']):
+            continue
+
+        image_id = _get_image_by_name(client, server['image'])['id']
+        flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
+        client.servers.create_server(server['name'], image_id, flavor_id)
+
+
+#######################
+#
+# MAIN LOGIC
+#
+#######################
+
+def create_resources():
+    LOG.info("Creating Resources")
+    # first create keystone level resources, and we need to be admin
+    # for those.
+    create_tenants(RES['tenants'])
+    create_users(RES['users'])
+    collect_users(RES['users'])
+
+    # next create resources in a well known order
+    create_objects(RES['objects'])
+    create_images(RES['images'])
+    create_servers(RES['servers'])
+
+
+def get_options():
+    global OPTS
+    parser = argparse.ArgumentParser(
+        description='Create and validate a fixed set of OpenStack resources')
+    parser.add_argument('-m', '--mode',
+                        metavar='<create|check|destroy>',
+                        required=True,
+                        help=('One of (create, check, destroy)'))
+    parser.add_argument('-r', '--resources',
+                        required=True,
+                        metavar='resourcefile.yaml',
+                        help='Resources definition yaml file')
+    # auth bits, letting us also just source the devstack openrc
+    parser.add_argument('--os-username',
+                        metavar='<auth-user-name>',
+                        default=os.environ.get('OS_USERNAME'),
+                        help=('Defaults to env[OS_USERNAME].'))
+    parser.add_argument('--os-password',
+                        metavar='<auth-password>',
+                        default=os.environ.get('OS_PASSWORD'),
+                        help=('Defaults to env[OS_PASSWORD].'))
+    parser.add_argument('--os-tenant-name',
+                        metavar='<auth-tenant-name>',
+                        default=os.environ.get('OS_TENANT_NAME'),
+                        help=('Defaults to env[OS_TENANT_NAME].'))
+
+    OPTS = parser.parse_args()
+    if OPTS.mode not in ('create', 'check', 'destroy'):
+        print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
+        parser.print_help()
+        sys.exit(1)
+
+
+def setup_logging(debug=True):
+    global LOG
+    LOG = logging.getLogger(__name__)
+    if debug:
+        LOG.setLevel(logging.DEBUG)
+    else:
+        LOG.setLevel(logging.INFO)
+
+    ch = logging.StreamHandler(sys.stdout)
+    ch.setLevel(logging.DEBUG)
+    formatter = logging.Formatter(
+        datefmt='%Y-%m-%d %H:%M:%S',
+        fmt='%(asctime)s.%(msecs).03d - %(levelname)s - %(message)s')
+    ch.setFormatter(formatter)
+    LOG.addHandler(ch)
+
+
+def main():
+    global RES
+    get_options()
+    setup_logging()
+    RES = load_resources(OPTS.resources)
+
+    if OPTS.mode == 'create':
+        create_resources()
+    elif OPTS.mode == 'check':
+        collect_users(RES['users'])
+        checker = JavelinCheck(USERS, RES)
+        checker.check()
+    elif OPTS.mode == 'destroy':
+        LOG.warn("Destroy mode not yet implemented")
+    else:
+        LOG.error('Unknown mode %s' % OPTS.mode)
+        return 1
+    return 0
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
new file mode 100644
index 0000000..f7cb8a9
--- /dev/null
+++ b/tempest/cmd/resources.yaml
@@ -0,0 +1,51 @@
+# This is a yaml description for the most basic definitions
+# of what should exist across the resource boundary. Perhaps
+# one day this will grow into a Heat resource template, but as
+# Heat isn't a known working element in the upgrades, we do
+# this much simpler thing for now.
+
+tenants:
+  - javelin
+  - discuss
+
+users:
+  - name: javelin
+    pass: gungnir
+    tenant: javelin
+  - name: javelin2
+    pass: gungnir2
+    tenant: discuss
+
+secgroups:
+  - angon:
+    owner: javelin
+    rules:
+      - 'icmp -1 -1 0.0.0.0/0'
+      - 'tcp 22 22 0.0.0.0/0'
+
+# resources that we want to create
+images:
+  - name: javelin_cirros
+    owner: javelin
+    file: cirros-0.3.2-x86_64-blank.img
+    format: ami
+    aki: cirros-0.3.2-x86_64-vmlinuz
+    ari: cirros-0.3.2-x86_64-initrd
+volumes:
+  - assegai:
+    - owner: javelin
+    - gb: 1
+servers:
+  - name: peltast
+    owner: javelin
+    flavor: m1.small
+    image: javelin_cirros
+  - name: hoplite
+    owner: javelin
+    flavor: m1.medium
+    image: javelin_cirros
+objects:
+  - container: jc1
+    name: javelin1
+    owner: javelin
+    file: /etc/hosts
diff --git a/tempest/stress/run_stress.py b/tempest/cmd/run_stress.py
similarity index 98%
rename from tempest/stress/run_stress.py
rename to tempest/cmd/run_stress.py
index c7c17c0..f773996 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/cmd/run_stress.py
@@ -70,7 +70,29 @@
     return tests
 
 
-def main(ns):
+parser = argparse.ArgumentParser(description='Run stress tests')
+parser.add_argument('-d', '--duration', default=300, type=int,
+                    help="Duration of test in secs")
+parser.add_argument('-s', '--serial', action='store_true',
+                    help="Trigger running tests serially")
+parser.add_argument('-S', '--stop', action='store_true',
+                    default=False, help="Stop on first error")
+parser.add_argument('-n', '--number', type=int,
+                    help="How often an action is executed for each process")
+group = parser.add_mutually_exclusive_group(required=True)
+group.add_argument('-a', '--all', action='store_true',
+                   help="Execute all stress tests")
+parser.add_argument('-T', '--type',
+                    help="Filters tests of a certain type (e.g. gate)")
+parser.add_argument('-i', '--call-inherited', action='store_true',
+                    default=False,
+                    help="Call also inherited function with stress attribute")
+group.add_argument('-t', "--tests", nargs='?',
+                   help="Name of the file with test description")
+
+
+def main():
+    ns = parser.parse_args()
     result = 0
     if not ns.all:
         tests = json.load(open(ns.tests, 'r'))
@@ -97,29 +119,9 @@
     return result
 
 
-parser = argparse.ArgumentParser(description='Run stress tests')
-parser.add_argument('-d', '--duration', default=300, type=int,
-                    help="Duration of test in secs")
-parser.add_argument('-s', '--serial', action='store_true',
-                    help="Trigger running tests serially")
-parser.add_argument('-S', '--stop', action='store_true',
-                    default=False, help="Stop on first error")
-parser.add_argument('-n', '--number', type=int,
-                    help="How often an action is executed for each process")
-group = parser.add_mutually_exclusive_group(required=True)
-group.add_argument('-a', '--all', action='store_true',
-                   help="Execute all stress tests")
-parser.add_argument('-T', '--type',
-                    help="Filters tests of a certain type (e.g. gate)")
-parser.add_argument('-i', '--call-inherited', action='store_true',
-                    default=False,
-                    help="Call also inherited function with stress attribute")
-group.add_argument('-t', "--tests", nargs='?',
-                   help="Name of the file with test description")
-
 if __name__ == "__main__":
     try:
-        sys.exit(main(parser.parse_args()))
+        sys.exit(main())
     except Exception:
         LOG.exception("Failure in the stress test framework")
         sys.exit(1)
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
new file mode 100755
index 0000000..3bf05e1
--- /dev/null
+++ b/tempest/cmd/verify_tempest_config.py
@@ -0,0 +1,350 @@
+#!/usr/bin/env python
+
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import argparse
+import json
+import os
+import sys
+import urlparse
+
+import httplib2
+from six.moves import configparser
+
+from tempest import clients
+from tempest import config
+
+
+CONF = config.CONF
+RAW_HTTP = httplib2.Http()
+CONF_FILE = None
+OUTFILE = sys.stdout
+
+
+def _get_config_file():
+    default_config_dir = os.path.join(os.path.abspath(
+        os.path.dirname(os.path.dirname(__file__))), "etc")
+    default_config_file = "tempest.conf"
+
+    conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
+    conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
+    path = os.path.join(conf_dir, conf_file)
+    fd = open(path, 'rw')
+    return fd
+
+
+def change_option(option, group, value):
+    config_parse = configparser.SafeConfigParser()
+    config_parse.optionxform = str
+    config_parse.readfp(CONF_FILE)
+    if not config_parse.has_section(group):
+        config_parse.add_section(group)
+    config_parse.set(group, option, str(value))
+    global OUTFILE
+    config_parse.write(OUTFILE)
+
+
+def print_and_or_update(option, group, value, update):
+    print('Config option %s in group %s should be changed to: %s'
+          % (option, group, value))
+    if update:
+        change_option(option, group, value)
+
+
+def verify_glance_api_versions(os, update):
+    # Check glance api versions
+    __, versions = os.image_client.get_versions()
+    if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
+                                             versions):
+        print_and_or_update('api_v1', 'image_feature_enabled',
+                            not CONF.image_feature_enabled.api_v1, update)
+    if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'image_feature_enabled',
+                            not CONF.image_feature_enabled.api_v2, update)
+
+
+def _get_unversioned_endpoint(base_url):
+    endpoint_parts = urlparse.urlparse(base_url)
+    endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
+    return endpoint
+
+
+def _get_api_versions(os, service):
+    client_dict = {
+        'nova': os.servers_client,
+        'keystone': os.identity_client,
+        'cinder': os.volumes_client,
+    }
+    client_dict[service].skip_path()
+    endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
+    __, body = RAW_HTTP.request(endpoint, 'GET')
+    client_dict[service].reset_path()
+    body = json.loads(body)
+    if service == 'keystone':
+        versions = map(lambda x: x['id'], body['versions']['values'])
+    else:
+        versions = map(lambda x: x['id'], body['versions'])
+    return versions
+
+
+def verify_keystone_api_versions(os, update):
+    # Check keystone api versions
+    versions = _get_api_versions(os, 'keystone')
+    if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'identity_feature_enabled',
+                            not CONF.identity_feature_enabled.api_v2, update)
+    if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
+        print_and_or_update('api_v3', 'identity_feature_enabled',
+                            not CONF.identity_feature_enabled.api_v3, update)
+
+
+def verify_nova_api_versions(os, update):
+    versions = _get_api_versions(os, 'nova')
+    if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
+        print_and_or_update('api_v3', 'compute_feature_enabled',
+                            not CONF.compute_feature_enabled.api_v3, update)
+
+
+def verify_cinder_api_versions(os, update):
+    # Check cinder api versions
+    versions = _get_api_versions(os, 'cinder')
+    if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
+        print_and_or_update('api_v1', 'volume_feature_enabled',
+                            not CONF.volume_feature_enabled.api_v1, update)
+    if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'volume_feature_enabled',
+                            not CONF.volume_feature_enabled.api_v2, update)
+
+
+def get_extension_client(os, service):
+    extensions_client = {
+        'nova': os.extensions_client,
+        'nova_v3': os.extensions_v3_client,
+        'cinder': os.volumes_extension_client,
+        'neutron': os.network_client,
+        'swift': os.account_client,
+    }
+    if service not in extensions_client:
+        print('No tempest extensions client for %s' % service)
+        exit(1)
+    return extensions_client[service]
+
+
+def get_enabled_extensions(service):
+    extensions_options = {
+        'nova': CONF.compute_feature_enabled.api_extensions,
+        'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
+        'cinder': CONF.volume_feature_enabled.api_extensions,
+        'neutron': CONF.network_feature_enabled.api_extensions,
+        'swift': CONF.object_storage_feature_enabled.discoverable_apis,
+    }
+    if service not in extensions_options:
+        print('No supported extensions list option for %s' % service)
+        exit(1)
+    return extensions_options[service]
+
+
+def verify_extensions(os, service, results):
+    extensions_client = get_extension_client(os, service)
+    __, resp = extensions_client.list_extensions()
+    if isinstance(resp, dict):
+        # For both Nova and Neutron we use the alias name rather than the
+        # 'name' field because the alias is considered to be the canonical
+        # name.
+        if service in ['nova', 'nova_v3', 'neutron']:
+            extensions = map(lambda x: x['alias'], resp['extensions'])
+        elif service == 'swift':
+            # Remove Swift general information from extensions list
+            resp.pop('swift')
+            extensions = resp.keys()
+        else:
+            extensions = map(lambda x: x['name'], resp['extensions'])
+
+    else:
+        extensions = map(lambda x: x['name'], resp)
+    if not results.get(service):
+        results[service] = {}
+    extensions_opt = get_enabled_extensions(service)
+    if extensions_opt[0] == 'all':
+        results[service]['extensions'] = extensions
+        return results
+    # Verify that all configured extensions are actually enabled
+    for extension in extensions_opt:
+        results[service][extension] = extension in extensions
+    # Verify that there aren't additional extensions enabled that aren't
+    # specified in the config list
+    for extension in extensions:
+        if extension not in extensions_opt:
+            results[service][extension] = False
+    return results
+
+
+def display_results(results, update, replace):
+    update_dict = {
+        'swift': 'object-storage-feature-enabled',
+        'nova': 'compute-feature-enabled',
+        'nova_v3': 'compute-feature-enabled',
+        'cinder': 'volume-feature-enabled',
+        'neutron': 'network-feature-enabled',
+    }
+    for service in results:
+        # If all extensions are specified as being enabled there is no way to
+        # verify this so we just assume this to be true
+        if results[service].get('extensions'):
+            if replace:
+                output_list = results[service].get('extensions')
+            else:
+                output_list = ['all']
+        else:
+            extension_list = get_enabled_extensions(service)
+            output_list = []
+            for extension in results[service]:
+                if not results[service][extension]:
+                    if extension in extension_list:
+                        print("%s extension: %s should not be included in the "
+                              "list of enabled extensions" % (service,
+                                                              extension))
+                    else:
+                        print("%s extension: %s should be included in the list"
+                              " of enabled extensions" % (service, extension))
+                        output_list.append(extension)
+                else:
+                    output_list.append(extension)
+        if update:
+            # Sort List
+            output_list.sort()
+            # Convert list to a string
+            output_string = ', '.join(output_list)
+            if service == 'swift':
+                change_option('discoverable_apis', update_dict[service],
+                              output_string)
+            elif service == 'nova_v3':
+                change_option('api_v3_extensions', update_dict[service],
+                              output_string)
+            else:
+                change_option('api_extensions', update_dict[service],
+                              output_string)
+
+
+def check_service_availability(os, update):
+    services = []
+    avail_services = []
+    codename_match = {
+        'volume': 'cinder',
+        'network': 'neutron',
+        'image': 'glance',
+        'object_storage': 'swift',
+        'compute': 'nova',
+        'orchestration': 'heat',
+        'metering': 'ceilometer',
+        'telemetry': 'ceilometer',
+        'data_processing': 'sahara',
+        'baremetal': 'ironic',
+        'identity': 'keystone',
+        'queuing': 'marconi',
+        'database': 'trove'
+    }
+    # Get catalog list for endpoints to use for validation
+    __, endpoints = os.endpoints_client.list_endpoints()
+    for endpoint in endpoints:
+        __, service = os.service_client.get_service(endpoint['service_id'])
+        services.append(service['type'])
+    # Pull all catalog types from config file and compare against endpoint list
+    for cfgname in dir(CONF._config):
+        cfg = getattr(CONF, cfgname)
+        catalog_type = getattr(cfg, 'catalog_type', None)
+        if not catalog_type:
+            continue
+        else:
+            if cfgname == 'identity':
+                # Keystone is a required service for tempest
+                continue
+            if catalog_type not in services:
+                if getattr(CONF.service_available, codename_match[cfgname]):
+                    print('Endpoint type %s not found either disable service '
+                          '%s or fix the catalog_type in the config file' % (
+                          catalog_type, codename_match[cfgname]))
+                    if update:
+                        change_option(codename_match[cfgname],
+                                      'service_available', False)
+            else:
+                if not getattr(CONF.service_available,
+                               codename_match[cfgname]):
+                    print('Endpoint type %s is available, service %s should be'
+                          ' set as available in the config file.' % (
+                          catalog_type, codename_match[cfgname]))
+                    if update:
+                        change_option(codename_match[cfgname],
+                                      'service_available', True)
+                else:
+                    avail_services.append(codename_match[cfgname])
+    return avail_services
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-u', '--update', action='store_true',
+                        help='Update the config file with results from api '
+                             'queries. This assumes whatever is set in the '
+                             'config file is incorrect. In the case of '
+                             'endpoint checks where it could either be the '
+                             'incorrect catalog type or the service available '
+                             'option the service available option is assumed '
+                             'to be incorrect and is thus changed')
+    parser.add_argument('-o', '--output',
+                        help="Output file to write an updated config file to. "
+                             "This has to be a separate file from the "
+                             "original config file. If one isn't specified "
+                             "with -u the new config file will be printed to "
+                             "STDOUT")
+    parser.add_argument('-r', '--replace-ext', action='store_true',
+                        help="If specified the all option will be replaced "
+                             "with a full list of extensions")
+    args = parser.parse_args()
+    return args
+
+
+def main():
+    print('Running config verification...')
+    opts = parse_args()
+    update = opts.update
+    replace = opts.replace_ext
+    global CONF_FILE
+    global OUTFILE
+    if update:
+        CONF_FILE = _get_config_file()
+        if opts.output:
+            OUTFILE = open(opts.output, 'w+')
+    os = clients.ComputeAdminManager(interface='json')
+    services = check_service_availability(os, update)
+    results = {}
+    for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
+        if service == 'nova_v3' and 'nova' not in services:
+            continue
+        elif service not in services:
+            continue
+        results = verify_extensions(os, service, results)
+    verify_keystone_api_versions(os, update)
+    verify_glance_api_versions(os, update)
+    verify_nova_api_versions(os, update)
+    verify_cinder_api_versions(os, update)
+    display_results(results, update, replace)
+    if CONF_FILE:
+        CONF_FILE.close()
+    OUTFILE.close()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index c31a038..6720847 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -28,15 +28,13 @@
     args = shlex.split(cmd)
     subprocess_args = {'stdout': subprocess.PIPE,
                        'stderr': subprocess.STDOUT}
-    try:
-        proc = subprocess.Popen(['/usr/bin/sudo', '-n'] + args,
-                                **subprocess_args)
-        return proc.communicate()[0]
-        if proc.returncode != 0:
-            LOG.error(cmd + "returned with: " +
-                      proc.returncode + "exit status")
-    except subprocess.CalledProcessError as e:
-        LOG.error("command output:\n%s" % e.output)
+    proc = subprocess.Popen(['/usr/bin/sudo', '-n'] + args,
+                            **subprocess_args)
+    stdout = proc.communicate()[0]
+    if proc.returncode != 0:
+        LOG.error(("Command {0} returned with exit status {1},"
+                   "output {2}").format(cmd, proc.returncode, stdout))
+    return stdout
 
 
 def ip_addr_raw():
@@ -77,3 +75,22 @@
 
 def ovs_db_dump():
     return sudo_cmd_call("ovsdb-client dump")
+
+
+def copy_file_to_host(file_from, dest, host, username, pkey):
+    dest = "%s@%s:%s" % (username, host, dest)
+    cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
+          "-o StrictHostKeyChecking=no " \
+          "-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
+                                              'file1': file_from,
+                                              'dest': dest}
+    args = shlex.split(cmd)
+    subprocess_args = {'stdout': subprocess.PIPE,
+                       'stderr': subprocess.STDOUT}
+    proc = subprocess.Popen(args, **subprocess_args)
+    stdout, stderr = proc.communicate()
+    if proc.returncode != 0:
+        LOG.error(("Command {0} returned with exit status {1},"
+                  "output {2}, error {3}").format(cmd, proc.returncode,
+                                                  stdout, stderr))
+    return stdout
diff --git a/tempest/common/debug.py b/tempest/common/debug.py
index 6a496c2..228be7a 100644
--- a/tempest/common/debug.py
+++ b/tempest/common/debug.py
@@ -20,7 +20,7 @@
 CONF = config.CONF
 LOG = logging.getLogger(__name__)
 
-tables = ['filter', 'nat', 'mangle']
+TABLES = ['filter', 'nat', 'mangle']
 
 
 def log_ip_ns():
@@ -28,14 +28,14 @@
         return
     LOG.info("Host Addr:\n" + commands.ip_addr_raw())
     LOG.info("Host Route:\n" + commands.ip_route_raw())
-    for table in ['filter', 'nat', 'mangle']:
+    for table in TABLES:
         LOG.info('Host %s table:\n%s', table, commands.iptables_raw(table))
     ns_list = commands.ip_ns_list()
     LOG.info("Host ns list" + str(ns_list))
     for ns in ns_list:
         LOG.info("ns(%s) Addr:\n%s", ns, commands.ip_ns_addr(ns))
         LOG.info("ns(%s) Route:\n%s", ns, commands.ip_ns_route(ns))
-        for table in ['filter', 'nat', 'mangle']:
+        for table in TABLES:
             LOG.info('ns(%s) table(%s):\n%s', ns, table,
                      commands.iptables_ns(ns, table))
 
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 95d50e2..57b98f7 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -87,12 +87,6 @@
         "additionalProperties": False,
     }
 
-    def __new__(cls, *args, **kwargs):
-        if not cls._instance:
-            cls._instance = super(BasicGeneratorSet, cls).__new__(cls, *args,
-                                                                  **kwargs)
-        return cls._instance
-
     def __init__(self):
         self.types_dict = {}
         for m in dir(self):
@@ -129,7 +123,7 @@
                 raise Exception("non-integer list types not supported")
         result = []
         if schema_type not in self.types_dict:
-            raise Exception("generator (%s) doesn't support type: %s"
+            raise TypeError("generator (%s) doesn't support type: %s"
                             % (self.__class__.__name__, schema_type))
         for generator in self.types_dict[schema_type]:
             ret = generator(schema)
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index c54a8e8..b4618ed 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -14,9 +14,7 @@
 
 import netaddr
 
-import keystoneclient.v2_0.client as keystoneclient
-import neutronclient.v2_0.client as neutronclient
-
+from tempest import auth
 from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import config
@@ -42,24 +40,6 @@
         self.identity_admin_client, self.network_admin_client = (
             self._get_admin_clients())
 
-    def _get_official_admin_clients(self):
-        username = CONF.identity.admin_username
-        password = CONF.identity.admin_password
-        tenant_name = CONF.identity.admin_tenant_name
-        auth_url = CONF.identity.uri
-        dscv = CONF.identity.disable_ssl_certificate_validation
-        identity_client = keystoneclient.Client(username=username,
-                                                password=password,
-                                                tenant_name=tenant_name,
-                                                auth_url=auth_url,
-                                                insecure=dscv)
-        network_client = neutronclient.Client(username=username,
-                                              password=password,
-                                              tenant_name=tenant_name,
-                                              auth_url=auth_url,
-                                              insecure=dscv)
-        return identity_client, network_client
-
     def _get_admin_clients(self):
         """
         Returns a tuple with instances of the following admin clients (in this
@@ -69,11 +49,11 @@
         """
         if self.tempest_client:
             os = clients.AdminManager(interface=self.interface)
-            admin_clients = (os.identity_client,
-                             os.network_client,)
         else:
-            admin_clients = self._get_official_admin_clients()
-        return admin_clients
+            os = clients.OfficialClientManager(
+                auth.get_default_credentials('identity_admin')
+            )
+        return os.identity_client, os.network_client
 
     def _create_tenant(self, name, description):
         if self.tempest_client:
@@ -185,22 +165,19 @@
                 self._assign_user_role(tenant['id'], user['id'], role['id'])
             else:
                 self._assign_user_role(tenant.id, user.id, role.id)
-        return user, tenant
+        return self._get_credentials(user, tenant)
 
-    def _get_cred_names(self, user, tenant):
+    def _get_credentials(self, user, tenant):
         if self.tempest_client:
-            username = user.get('name')
-            tenant_name = tenant.get('name')
+            user_get = user.get
+            tenant_get = tenant.get
         else:
-            username = user.name
-            tenant_name = tenant.name
-        return username, tenant_name
-
-    def _get_tenant_id(self, tenant):
-        if self.tempest_client:
-            return tenant.get('id')
-        else:
-            return tenant.id
+            user_get = user.__dict__.get
+            tenant_get = tenant.__dict__.get
+        return auth.get_credentials(
+            username=user_get('name'), user_id=user_get('id'),
+            tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
+            password=self.password)
 
     def _create_network_resources(self, tenant_id):
         network = None
@@ -314,24 +291,6 @@
             body = {'subnet_id': subnet_id}
             self.network_admin_client.add_interface_router(router_id, body)
 
-    def get_primary_tenant(self):
-        return self.isolated_creds.get('primary')[1]
-
-    def get_primary_user(self):
-        return self.isolated_creds.get('primary')[0]
-
-    def get_alt_tenant(self):
-        return self.isolated_creds.get('alt')[1]
-
-    def get_alt_user(self):
-        return self.isolated_creds.get('alt')[0]
-
-    def get_admin_tenant(self):
-        return self.isolated_creds.get('admin')[1]
-
-    def get_admin_user(self):
-        return self.isolated_creds.get('admin')[0]
-
     def get_primary_network(self):
         return self.isolated_net_resources.get('primary')[0]
 
@@ -359,62 +318,33 @@
     def get_alt_router(self):
         return self.isolated_net_resources.get('alt')[2]
 
-    def get_primary_creds(self):
-        if self.isolated_creds.get('primary'):
-            user, tenant = self.isolated_creds['primary']
-            username, tenant_name = self._get_cred_names(user, tenant)
+    def get_credentials(self, credential_type):
+        if self.isolated_creds.get(credential_type):
+            credentials = self.isolated_creds[credential_type]
         else:
-            user, tenant = self._create_creds()
-            username, tenant_name = self._get_cred_names(user, tenant)
-            self.isolated_creds['primary'] = (user, tenant)
-            LOG.info("Acquired isolated creds:\n user: %s, tenant: %s"
-                     % (username, tenant_name))
+            is_admin = (credential_type == 'admin')
+            credentials = self._create_creds(admin=is_admin)
+            self.isolated_creds[credential_type] = credentials
+            # Maintained until tests are ported
+            LOG.info("Acquired isolated creds:\n credentials: %s"
+                     % credentials)
             if CONF.service_available.neutron:
                 network, subnet, router = self._create_network_resources(
-                    self._get_tenant_id(tenant))
-                self.isolated_net_resources['primary'] = (
+                    credentials.tenant_id)
+                self.isolated_net_resources[credential_type] = (
                     network, subnet, router,)
                 LOG.info("Created isolated network resources for : \n"
-                         + " user: %s, tenant: %s" % (username, tenant_name))
-        return username, tenant_name, self.password
+                         + " credentials: %s" % credentials)
+        return credentials
+
+    def get_primary_creds(self):
+        return self.get_credentials('primary')
 
     def get_admin_creds(self):
-        if self.isolated_creds.get('admin'):
-            user, tenant = self.isolated_creds['admin']
-            username, tenant_name = self._get_cred_names(user, tenant)
-        else:
-            user, tenant = self._create_creds(admin=True)
-            username, tenant_name = self._get_cred_names(user, tenant)
-            self.isolated_creds['admin'] = (user, tenant)
-            LOG.info("Acquired admin isolated creds:\n user: %s, tenant: %s"
-                     % (username, tenant_name))
-            if CONF.service_available.neutron:
-                network, subnet, router = self._create_network_resources(
-                    self._get_tenant_id(tenant))
-                self.isolated_net_resources['admin'] = (
-                    network, subnet, router,)
-                LOG.info("Created isolated network resources for : \n"
-                         + " user: %s, tenant: %s" % (username, tenant_name))
-        return username, tenant_name, self.password
+        return self.get_credentials('admin')
 
     def get_alt_creds(self):
-        if self.isolated_creds.get('alt'):
-            user, tenant = self.isolated_creds['alt']
-            username, tenant_name = self._get_cred_names(user, tenant)
-        else:
-            user, tenant = self._create_creds()
-            username, tenant_name = self._get_cred_names(user, tenant)
-            self.isolated_creds['alt'] = (user, tenant)
-            LOG.info("Acquired alt isolated creds:\n user: %s, tenant: %s"
-                     % (username, tenant_name))
-            if CONF.service_available.neutron:
-                network, subnet, router = self._create_network_resources(
-                    self._get_tenant_id(tenant))
-                self.isolated_net_resources['alt'] = (
-                    network, subnet, router,)
-                LOG.info("Created isolated network resources for : \n"
-                         + " user: %s, tenant: %s" % (username, tenant_name))
-        return username, tenant_name, self.password
+        return self.get_credentials('alt')
 
     def _clear_isolated_router(self, router_id, router_name):
         net_client = self.network_admin_client
@@ -423,7 +353,6 @@
         except exceptions.NotFound:
             LOG.warn('router with name: %s not found for delete' %
                      router_name)
-            pass
 
     def _clear_isolated_subnet(self, subnet_id, subnet_name):
         net_client = self.network_admin_client
@@ -432,7 +361,6 @@
         except exceptions.NotFound:
             LOG.warn('subnet with name: %s not found for delete' %
                      subnet_name)
-            pass
 
     def _clear_isolated_network(self, network_id, network_name):
         net_client = self.network_admin_client
@@ -441,7 +369,6 @@
         except exceptions.NotFound:
             LOG.warn('network with name: %s not found for delete' %
                      network_name)
-            pass
 
     def _cleanup_ports(self, network_id):
         # TODO(mlavalle) This method will be removed once patch
@@ -487,7 +414,6 @@
                 except exceptions.NotFound:
                     LOG.warn('router with name: %s not found for delete' %
                              router['name'])
-                    pass
                 self._clear_isolated_router(router['id'], router['name'])
             if (not self.network_resources or
                 self.network_resources.get('network')):
@@ -505,29 +431,14 @@
         if not self.isolated_creds:
             return
         self._clear_isolated_net_resources()
-        for cred in self.isolated_creds:
-            user, tenant = self.isolated_creds.get(cred)
+        for creds in self.isolated_creds.itervalues():
             try:
-                if self.tempest_client:
-                    self._delete_user(user['id'])
-                else:
-                    self._delete_user(user.id)
+                self._delete_user(creds.user_id)
             except exceptions.NotFound:
-                if self.tempest_client:
-                    name = user['name']
-                else:
-                    name = user.name
-                LOG.warn("user with name: %s not found for delete" % name)
-                pass
+                LOG.warn("user with name: %s not found for delete" %
+                         creds.username)
             try:
-                if self.tempest_client:
-                    self._delete_tenant(tenant['id'])
-                else:
-                    self._delete_tenant(tenant.id)
+                self._delete_tenant(creds.tenant_id)
             except exceptions.NotFound:
-                if self.tempest_client:
-                    name = tenant['name']
-                else:
-                    name = tenant.name
-                LOG.warn("tenant with name: %s not found for delete" % name)
-                pass
+                LOG.warn("tenant with name: %s not found for delete" %
+                         creds.tenant_name)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 934b861..3c527f5 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -15,7 +15,6 @@
 #    under the License.
 
 import collections
-import inspect
 import json
 from lxml import etree
 import re
@@ -24,10 +23,11 @@
 import jsonschema
 
 from tempest.common import http
+from tempest.common.utils import misc as misc_utils
+from tempest.common import xml_utils as common
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log as logging
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
@@ -140,15 +140,23 @@
 
     @property
     def user(self):
-        return self.auth_provider.credentials.get('username', None)
+        return self.auth_provider.credentials.username
+
+    @property
+    def user_id(self):
+        return self.auth_provider.credentials.user_id
 
     @property
     def tenant_name(self):
-        return self.auth_provider.credentials.get('tenant_name', None)
+        return self.auth_provider.credentials.tenant_name
+
+    @property
+    def tenant_id(self):
+        return self.auth_provider.credentials.tenant_id
 
     @property
     def password(self):
-        return self.auth_provider.credentials.get('password', None)
+        return self.auth_provider.credentials.password
 
     @property
     def base_url(self):
@@ -197,26 +205,26 @@
                 details = pattern.format(read_code, expected_code)
                 raise exceptions.InvalidHttpSuccessCode(details)
 
-    def post(self, url, body, headers=None):
-        return self.request('POST', url, headers, body)
+    def post(self, url, body, headers=None, extra_headers=False):
+        return self.request('POST', url, extra_headers, headers, body)
 
-    def get(self, url, headers=None):
-        return self.request('GET', url, headers)
+    def get(self, url, headers=None, extra_headers=False):
+        return self.request('GET', url, extra_headers, headers)
 
-    def delete(self, url, headers=None, body=None):
-        return self.request('DELETE', url, headers, body)
+    def delete(self, url, headers=None, body=None, extra_headers=False):
+        return self.request('DELETE', url, extra_headers, headers, body)
 
-    def patch(self, url, body, headers=None):
-        return self.request('PATCH', url, headers, body)
+    def patch(self, url, body, headers=None, extra_headers=False):
+        return self.request('PATCH', url, extra_headers, headers, body)
 
-    def put(self, url, body, headers=None):
-        return self.request('PUT', url, headers, body)
+    def put(self, url, body, headers=None, extra_headers=False):
+        return self.request('PUT', url, extra_headers, headers, body)
 
-    def head(self, url, headers=None):
-        return self.request('HEAD', url, headers)
+    def head(self, url, headers=None, extra_headers=False):
+        return self.request('HEAD', url, extra_headers, headers)
 
-    def copy(self, url, headers=None):
-        return self.request('COPY', url, headers)
+    def copy(self, url, headers=None, extra_headers=False):
+        return self.request('COPY', url, extra_headers, headers)
 
     def get_versions(self):
         resp, body = self.get('')
@@ -224,81 +232,66 @@
         versions = map(lambda x: x['id'], body)
         return resp, versions
 
-    def _find_caller(self):
-        """Find the caller class and test name.
-
-        Because we know that the interesting things that call us are
-        test_* methods, and various kinds of setUp / tearDown, we
-        can look through the call stack to find appropriate methods,
-        and the class we were in when those were called.
-        """
-        caller_name = None
-        names = []
-        frame = inspect.currentframe()
-        is_cleanup = False
-        # Start climbing the ladder until we hit a good method
-        while True:
-            try:
-                frame = frame.f_back
-                name = frame.f_code.co_name
-                names.append(name)
-                if re.search("^(test_|setUp|tearDown)", name):
-                    cname = ""
-                    if 'self' in frame.f_locals:
-                        cname = frame.f_locals['self'].__class__.__name__
-                    if 'cls' in frame.f_locals:
-                        cname = frame.f_locals['cls'].__name__
-                    caller_name = cname + ":" + name
-                    break
-                elif re.search("^_run_cleanup", name):
-                    is_cleanup = True
-                else:
-                    cname = ""
-                    if 'self' in frame.f_locals:
-                        cname = frame.f_locals['self'].__class__.__name__
-                    if 'cls' in frame.f_locals:
-                        cname = frame.f_locals['cls'].__name__
-
-                    # the fact that we are running cleanups is indicated pretty
-                    # deep in the stack, so if we see that we want to just
-                    # start looking for a real class name, and declare victory
-                    # once we do.
-                    if is_cleanup and cname:
-                        if not re.search("^RunTest", cname):
-                            caller_name = cname + ":_run_cleanups"
-                            break
-            except Exception:
-                break
-        # prevents frame leaks
-        del frame
-        if caller_name is None:
-            self.LOG.debug("Sane call name not found in %s" % names)
-        return caller_name
-
     def _get_request_id(self, resp):
         for i in ('x-openstack-request-id', 'x-compute-request-id'):
             if i in resp:
                 return resp[i]
         return ""
 
-    def _log_request(self, method, req_url, resp, secs=""):
+    def _log_request_start(self, method, req_url, req_headers={},
+                           req_body=None):
+        caller_name = misc_utils.find_test_caller()
+        trace_regex = CONF.debug.trace_requests
+        if trace_regex and re.search(trace_regex, caller_name):
+            self.LOG.debug('Starting Request (%s): %s %s' %
+                           (caller_name, method, req_url))
+
+    def _log_request(self, method, req_url, resp,
+                     secs="", req_headers={},
+                     req_body=None, resp_body=None):
         # if we have the request id, put it in the right part of the log
         extra = dict(request_id=self._get_request_id(resp))
         # NOTE(sdague): while we still have 6 callers to this function
         # we're going to just provide work around on who is actually
         # providing timings by gracefully adding no content if they don't.
         # Once we're down to 1 caller, clean this up.
+        caller_name = misc_utils.find_test_caller()
         if secs:
             secs = " %.3fs" % secs
         self.LOG.info(
             'Request (%s): %s %s %s%s' % (
-                self._find_caller(),
+                caller_name,
                 resp['status'],
                 method,
                 req_url,
                 secs),
             extra=extra)
 
+        # We intentionally duplicate the info content because in a parallel
+        # world this is important to match
+        trace_regex = CONF.debug.trace_requests
+        if trace_regex and re.search(trace_regex, caller_name):
+            if 'X-Auth-Token' in req_headers:
+                req_headers['X-Auth-Token'] = '<omitted>'
+            log_fmt = """Request (%s): %s %s %s%s
+    Request - Headers: %s
+        Body: %s
+    Response - Headers: %s
+        Body: %s"""
+
+            self.LOG.debug(
+                log_fmt % (
+                    caller_name,
+                    resp['status'],
+                    method,
+                    req_url,
+                    secs,
+                    str(req_headers),
+                    str(req_body)[:2048],
+                    str(resp),
+                    str(resp_body)[:2048]),
+                extra=extra)
+
     def _parse_resp(self, body):
         if self._get_type() is "json":
             body = json.loads(body)
@@ -343,7 +336,7 @@
             # Parse one-item-like xmls (user, role, etc)
             return common.xml_to_json(element)
 
-    def response_checker(self, method, url, headers, body, resp, resp_body):
+    def response_checker(self, method, resp, resp_body):
         if (resp.status in set((204, 205, 304)) or resp.status < 200 or
                 method.upper() == 'HEAD') and resp_body:
             raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
@@ -379,23 +372,35 @@
 
         # Do the actual request, and time it
         start = time.time()
+        self._log_request_start(method, req_url)
         resp, resp_body = self.http_obj.request(
             req_url, method, headers=req_headers, body=req_body)
         end = time.time()
-        self._log_request(method, req_url, resp, secs=(end - start))
+        self._log_request(method, req_url, resp, secs=(end - start),
+                          req_headers=req_headers, req_body=req_body,
+                          resp_body=resp_body)
+
         # Verify HTTP response codes
-        self.response_checker(method, url, req_headers, req_body, resp,
-                              resp_body)
+        self.response_checker(method, resp, resp_body)
 
         return resp, resp_body
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
+        # if extra_headers is True
+        # default headers would be added to headers
         retry = 0
 
         if headers is None:
             # NOTE(vponomaryov): if some client do not need headers,
             # it should explicitly pass empty dict
             headers = self.get_headers()
+        elif extra_headers:
+            try:
+                headers = headers.copy()
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = self.get_headers()
 
         resp, resp_body = self._request(method, url,
                                         headers=headers, body=body)
@@ -452,7 +457,7 @@
             raise exceptions.InvalidContentType(str(resp.status))
 
         if resp.status == 401 or resp.status == 403:
-            raise exceptions.Unauthorized()
+            raise exceptions.Unauthorized(resp_body)
 
         if resp.status == 404:
             raise exceptions.NotFound(resp_body)
@@ -549,16 +554,18 @@
         # code if it exists is something that we expect. This is explicitly
         # declared in the V3 API and so we should be able to export this in
         # the response schema. For now we'll ignore it.
-        if str(resp.status).startswith('2'):
+        if resp.status in HTTP_SUCCESS:
             response_code = schema['status_code']
             if resp.status not in response_code:
                 msg = ("The status code(%s) is different than the expected "
                        "one(%s)") % (resp.status, response_code)
                 raise exceptions.InvalidHttpSuccessCode(msg)
-            response_schema = schema.get('response_body')
-            if response_schema:
+
+            # Check the body of a response
+            body_schema = schema.get('response_body')
+            if body_schema:
                 try:
-                    jsonschema.validate(body, response_schema)
+                    jsonschema.validate(body, body_schema)
                 except jsonschema.ValidationError as ex:
                     msg = ("HTTP response body is invalid (%s)") % ex
                     raise exceptions.InvalidHTTPResponseBody(msg)
@@ -567,6 +574,15 @@
                     msg = ("HTTP response body should not exist (%s)") % body
                     raise exceptions.InvalidHTTPResponseBody(msg)
 
+            # Check the header of a response
+            header_schema = schema.get('response_header')
+            if header_schema:
+                try:
+                    jsonschema.validate(resp, header_schema)
+                except jsonschema.ValidationError as ex:
+                    msg = ("HTTP response header is invalid (%s)") % ex
+                    raise exceptions.InvalidHTTPResponseHeader(msg)
+
 
 class NegativeRestClient(RestClient):
     """
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 00e5e0d..95b6833 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -112,3 +112,8 @@
     def turn_nic_on(self, nic):
         cmd = "sudo /bin/ip link set {nic} up".format(nic=nic)
         return self.exec_command(cmd)
+
+    def get_pids(self, pr_name):
+        # Get pid(s) of a process/program
+        cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name
+        return self.exec_command(cmd).split('\n')
diff --git a/tempest/common/utils/misc.py b/tempest/common/utils/misc.py
index a0b0c0a..0d78273 100644
--- a/tempest/common/utils/misc.py
+++ b/tempest/common/utils/misc.py
@@ -13,6 +13,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import inspect
+import re
+
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
 
 def singleton(cls):
     """Simple wrapper for classes that should only have a single instance."""
@@ -23,3 +30,58 @@
             instances[cls] = cls()
         return instances[cls]
     return getinstance
+
+
+def find_test_caller():
+    """Find the caller class and test name.
+
+    Because we know that the interesting things that call us are
+    test_* methods, and various kinds of setUp / tearDown, we
+    can look through the call stack to find appropriate methods,
+    and the class we were in when those were called.
+    """
+    caller_name = None
+    names = []
+    frame = inspect.currentframe()
+    is_cleanup = False
+    # Start climbing the ladder until we hit a good method
+    while True:
+        try:
+            frame = frame.f_back
+            name = frame.f_code.co_name
+            names.append(name)
+            if re.search("^(test_|setUp|tearDown)", name):
+                cname = ""
+                if 'self' in frame.f_locals:
+                    cname = frame.f_locals['self'].__class__.__name__
+                if 'cls' in frame.f_locals:
+                    cname = frame.f_locals['cls'].__name__
+                caller_name = cname + ":" + name
+                break
+            elif re.search("^_run_cleanup", name):
+                is_cleanup = True
+            elif name == 'main':
+                caller_name = 'main'
+                break
+            else:
+                cname = ""
+                if 'self' in frame.f_locals:
+                    cname = frame.f_locals['self'].__class__.__name__
+                if 'cls' in frame.f_locals:
+                    cname = frame.f_locals['cls'].__name__
+
+                # the fact that we are running cleanups is indicated pretty
+                # deep in the stack, so if we see that we want to just
+                # start looking for a real class name, and declare victory
+                # once we do.
+                if is_cleanup and cname:
+                    if not re.search("^RunTest", cname):
+                        caller_name = cname + ":_run_cleanups"
+                        break
+        except Exception:
+            break
+    # prevents frame leaks
+    del frame
+    if caller_name is None:
+        LOG.debug("Sane call name not found in %s" % names)
+    return caller_name
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 8e6b9fb..d8474a0 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -13,6 +13,7 @@
 
 import time
 
+from tempest.common.utils import misc as misc_utils
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log as logging
@@ -21,6 +22,16 @@
 LOG = logging.getLogger(__name__)
 
 
+def _console_dump(client, server_id):
+    try:
+        resp, output = client.get_console_output(server_id, None)
+        LOG.debug("Console Output for Server %s:\n%s" % (
+            server_id, output))
+    except exceptions.NotFound:
+        LOG.debug("Server %s: doesn't have a console" % server_id)
+        pass
+
+
 # NOTE(afazekas): This function needs to know a token and a subject.
 def wait_for_server_status(client, server_id, status, ready_wait=True,
                            extra_timeout=0, raise_on_error=True):
@@ -70,7 +81,9 @@
                      '/'.join((old_status, str(old_task_state))),
                      '/'.join((server_status, str(task_state))),
                      time.time() - start_time)
+
         if (server_status == 'ERROR') and raise_on_error:
+            _console_dump(client, server_id)
             raise exceptions.BuildErrorException(server_id=server_id)
 
         timed_out = int(time.time()) - start_time >= timeout
@@ -86,6 +99,11 @@
                         'timeout': timeout})
             message += ' Current status: %s.' % server_status
             message += ' Current task state: %s.' % task_state
+
+            caller = misc_utils.find_test_caller()
+            if caller:
+                message = '(%s) %s' % (caller, message)
+            _console_dump(client, server_id)
             raise exceptions.TimeoutException(message)
         old_status = server_status
         old_task_state = task_state
@@ -119,4 +137,7 @@
                         'status': status,
                         'timeout': client.build_timeout})
             message += ' Current status: %s.' % image['status']
+            caller = misc_utils.find_test_caller()
+            if caller:
+                message = '(%s) %s' % (caller, message)
             raise exceptions.TimeoutException(message)
diff --git a/tempest/services/compute/xml/common.py b/tempest/common/xml_utils.py
similarity index 100%
rename from tempest/services/compute/xml/common.py
rename to tempest/common/xml_utils.py
diff --git a/tempest/config.py b/tempest/config.py
index b0945bb..6b17885 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -72,6 +72,10 @@
                default=None,
                help="API key to use when authenticating.",
                secret=True),
+    cfg.StrOpt('domain_name',
+               default=None,
+               help="Domain name for authentication (Keystone V3)."
+                    "The same domain applies to user and project"),
     cfg.StrOpt('alt_username',
                default=None,
                help="Username of alternate user to use for Nova API "
@@ -84,6 +88,10 @@
                default=None,
                help="API key to use when authenticating as alternate user.",
                secret=True),
+    cfg.StrOpt('alt_domain_name',
+               default=None,
+               help="Alternate domain name for authentication (Keystone V3)."
+                    "The same domain applies to user and project"),
     cfg.StrOpt('admin_username',
                default=None,
                help="Administrative Username to use for "
@@ -96,6 +104,10 @@
                default=None,
                help="API key to use when authenticating as admin.",
                secret=True),
+    cfg.StrOpt('admin_domain_name',
+               default=None,
+               help="Admin domain name for authentication (Keystone V3)."
+                    "The same domain applies to user and project"),
 ]
 
 identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
@@ -125,11 +137,12 @@
                      "better parallel execution, but also requires that "
                      "OpenStack Identity API admin credentials are known."),
     cfg.StrOpt('image_ref',
-               default="{$IMAGE_ID}",
-               help="Valid secondary image reference to be used in tests."),
+               help="Valid primary image reference to be used in tests. "
+                    "This is a required option"),
     cfg.StrOpt('image_ref_alt',
-               default="{$IMAGE_ID_ALT}",
-               help="Valid secondary image reference to be used in tests."),
+               help="Valid secondary image reference to be used in tests. "
+                    "This is a required option, but if only one image is "
+                    "available duplicate the value of image_ref above"),
     cfg.StrOpt('flavor_ref',
                default="1",
                help="Valid primary flavor to use in tests."),
@@ -151,7 +164,7 @@
                help="Password used to authenticate to an instance using "
                     "the alternate image."),
     cfg.IntOpt('build_interval',
-               default=10,
+               default=1,
                help="Time in seconds between build status checks."),
     cfg.IntOpt('build_timeout',
                default=300,
@@ -159,6 +172,19 @@
     cfg.BoolOpt('run_ssh',
                 default=False,
                 help="Should the tests ssh to instances?"),
+    cfg.StrOpt('ssh_auth_method',
+               default='keypair',
+               help="Auth method used for authenticate to the instance. "
+                    "Valid choices are: keypair, configured, adminpass. "
+                    "keypair: start the servers with an ssh keypair. "
+                    "configured: use the configured user and password. "
+                    "adminpass: use the injected adminPass. "
+                    "disabled: avoid using ssh when it is an option."),
+    cfg.StrOpt('ssh_connect_method',
+               default='fixed',
+               help="How to connect to the instance? "
+                    "fixed: using the first ip belongs the fixed network "
+                    "floating: creating and using a floating ip"),
     cfg.StrOpt('ssh_user',
                default='root',
                help="User name used to authenticate to an instance."),
@@ -189,7 +215,7 @@
                help="IP version used for SSH connections."),
     cfg.BoolOpt('use_floatingip_for_ssh',
                 default=True,
-                help="Dose the SSH uses Floating IP?"),
+                help="Does SSH use Floating IPs?"),
     cfg.StrOpt('catalog_type',
                default='compute',
                help="Catalog type of the Compute service."),
@@ -237,11 +263,13 @@
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help='A list of enabled compute extensions with a special '
-                     'entry all which indicates every extension is enabled'),
+                     'entry all which indicates every extension is enabled. '
+                     'Each extension should be specified with alias name'),
     cfg.ListOpt('api_v3_extensions',
                 default=['all'],
                 help='A list of enabled v3 extensions with a special entry all'
-                     ' which indicates every extension is enabled'),
+                     ' which indicates every extension is enabled. '
+                     'Each extension should be specified with alias name'),
     cfg.BoolOpt('change_password',
                 default=False,
                 help="Does the test environment support changing the admin "
@@ -270,7 +298,15 @@
     cfg.BoolOpt('vnc_console',
                 default=False,
                 help='Enable VNC console. This configuration value should '
-                     'be same as [nova.vnc]->vnc_enabled in nova.conf')
+                     'be same as [nova.vnc]->vnc_enabled in nova.conf'),
+    cfg.BoolOpt('spice_console',
+                default=False,
+                help='Enable Spice console. This configuration value should '
+                     'be same as [nova.spice]->enabled in nova.conf'),
+    cfg.BoolOpt('rdp_console',
+                default=False,
+                help='Enable RDP console. This configuration value should '
+                     'be same as [nova.rdp]->enabled in nova.conf')
 ]
 
 
@@ -289,6 +325,10 @@
                default=None,
                help="API key to use when authenticating as admin.",
                secret=True),
+    cfg.StrOpt('domain_name',
+               default=None,
+               help="Domain name for authentication as admin (Keystone V3)."
+                    "The same domain applies to user and project"),
 ]
 
 image_group = cfg.OptGroup(name='image',
@@ -374,9 +414,13 @@
                help="Timeout in seconds to wait for network operation to "
                     "complete."),
     cfg.IntOpt('build_interval',
-               default=10,
+               default=1,
                help="Time in seconds between network operation status "
                     "checks."),
+    cfg.ListOpt('dns_servers',
+                default=["8.8.8.8", "8.8.4.4"],
+                help="List of dns servers whichs hould be used"
+                     " for subnet creation")
 ]
 
 network_feature_group = cfg.OptGroup(name='network-feature-enabled',
@@ -399,6 +443,10 @@
     cfg.StrOpt('catalog_type',
                default='queuing',
                help='Catalog type of the Queuing service.'),
+    cfg.IntOpt('max_queues_per_page',
+               default=20,
+               help='The maximum number of queue records per page when '
+                    'listing queues'),
 ]
 
 volume_group = cfg.OptGroup(name='volume',
@@ -406,7 +454,7 @@
 
 VolumeGroup = [
     cfg.IntOpt('build_interval',
-               default=10,
+               default=1,
                help='Time in seconds between volume availability checks.'),
     cfg.IntOpt('build_timeout',
                default=300,
@@ -441,6 +489,9 @@
     cfg.StrOpt('disk_format',
                default='raw',
                help='Disk format to use when copying a volume to image'),
+    cfg.IntOpt('volume_size',
+               default=1,
+               help='Default size in GB for volumes created by volumes tests'),
 ]
 
 volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
@@ -453,6 +504,9 @@
     cfg.BoolOpt('backup',
                 default=True,
                 help='Runs Cinder volumes backup test'),
+    cfg.BoolOpt('snapshot',
+                default=True,
+                help='Runs Cinder volume snapshot test'),
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help='A list of enabled volume extensions with a special '
@@ -523,6 +577,9 @@
     cfg.StrOpt('db_flavor_ref',
                default="1",
                help="Valid primary flavor to use in database tests."),
+    cfg.StrOpt('db_current_version',
+               default="v1.0",
+               help="Current database version to use in database tests."),
 ]
 
 orchestration_group = cfg.OptGroup(name='orchestration',
@@ -553,7 +610,7 @@
                default=1,
                help="Time in seconds between build status checks."),
     cfg.IntOpt('build_timeout',
-               default=600,
+               default=1200,
                help="Timeout in seconds to wait for a stack to build."),
     cfg.StrOpt('instance_type',
                default='m1.micro',
@@ -569,6 +626,9 @@
     cfg.IntOpt('max_template_size',
                default=524288,
                help="Value must match heat configuration of the same name."),
+    cfg.IntOpt('max_resources_per_stack',
+               default=1000,
+               help="Value must match heat configuration of the same name."),
 ]
 
 
@@ -584,6 +644,10 @@
                choices=['public', 'admin', 'internal',
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the telemetry service."),
+    cfg.BoolOpt('too_slow_to_test',
+                default=True,
+                help="This variable is used as flag to enable "
+                     "notification tests")
 ]
 
 
@@ -788,6 +852,26 @@
     cfg.BoolOpt('enable',
                 default=True,
                 help="Enable diagnostic commands"),
+    cfg.StrOpt('trace_requests',
+               default='',
+               help="""A regex to determine which requests should be traced.
+
+This is a regex to match the caller for rest client requests to be able to
+selectively trace calls out of specific classes and methods. It largely
+exists for test development, and is not expected to be used in a real deploy
+of tempest. This will be matched against the discovered ClassName:method
+in the test environment.
+
+Expected values for this field are:
+
+ * ClassName:test_method_name - traces one test_method
+ * ClassName:setUp(Class) - traces specific setup functions
+ * ClassName:tearDown(Class) - traces specific teardown functions
+ * ClassName:_run_cleanups - traces the cleanup functions
+
+If nothing is specified, this feature is not enabled. To trace everything
+specify .* as the regex.
+""")
 ]
 
 input_scenario_group = cfg.OptGroup(name="input-scenario",
@@ -818,13 +902,29 @@
 BaremetalGroup = [
     cfg.StrOpt('catalog_type',
                default='baremetal',
-               help="Catalog type of the baremetal provisioning service."),
+               help="Catalog type of the baremetal provisioning service"),
+    cfg.BoolOpt('driver_enabled',
+                default=False,
+                help="Whether the Ironic nova-compute driver is enabled"),
     cfg.StrOpt('endpoint_type',
                default='publicURL',
                choices=['public', 'admin', 'internal',
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the baremetal provisioning "
-                    "service."),
+                    "service"),
+    cfg.IntOpt('active_timeout',
+               default=300,
+               help="Timeout for Ironic node to completely provision"),
+    cfg.IntOpt('association_timeout',
+               default=10,
+               help="Timeout for association of Nova instance and Ironic "
+                    "node"),
+    cfg.IntOpt('power_timeout',
+               default=20,
+               help="Timeout for Ironic power transitions."),
+    cfg.IntOpt('unprovision_timeout',
+               default=20,
+               help="Timeout for unprovisioning an Ironic node.")
 ]
 
 cli_group = cfg.OptGroup(name='cli', title="cli Configuration Options")
@@ -938,6 +1038,13 @@
             self.compute_admin.username = self.identity.admin_username
             self.compute_admin.password = self.identity.admin_password
             self.compute_admin.tenant_name = self.identity.admin_tenant_name
+        cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
+                             group='identity')
+        cfg.CONF.set_default('alt_domain_name',
+                             self.identity.admin_domain_name,
+                             group='identity')
+        cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
+                             group='compute-admin')
 
     def __init__(self, parse_conf=True):
         """Initialize a configuration from a conf directory and conf file."""
@@ -973,8 +1080,21 @@
 class TempestConfigProxy(object):
     _config = None
 
+    _extra_log_defaults = [
+        'keystoneclient.session=INFO',
+        'paramiko.transport=INFO',
+        'requests.packages.urllib3.connectionpool=WARN'
+    ]
+
+    def _fix_log_levels(self):
+        """Tweak the oslo log defaults."""
+        for opt in logging.log_opts:
+            if opt.dest == 'default_log_levels':
+                opt.default.extend(self._extra_log_defaults)
+
     def __getattr__(self, attr):
         if not self._config:
+            self._fix_log_levels()
             self._config = TempestConfigPrivate()
 
         return getattr(self._config, attr)
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
new file mode 100644
index 0000000..4eb1cea
--- /dev/null
+++ b/tempest/exceptions.py
@@ -0,0 +1,213 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+
+class TempestException(Exception):
+    """
+    Base Tempest Exception
+
+    To correctly use this class, inherit from it and define
+    a 'message' property. That message will get printf'd
+    with the keyword arguments provided to the constructor.
+    """
+    message = "An unknown exception occurred"
+
+    def __init__(self, *args, **kwargs):
+        super(TempestException, self).__init__()
+        try:
+            self._error_string = self.message % kwargs
+        except Exception:
+            # at least get the core message out if something happened
+            self._error_string = self.message
+        if len(args) > 0:
+            # If there is a non-kwarg parameter, assume it's the error
+            # message or reason description and tack it on to the end
+            # of the exception message
+            # Convert all arguments into their string representations...
+            args = ["%s" % arg for arg in args]
+            self._error_string = (self._error_string +
+                                  "\nDetails: %s" % '\n'.join(args))
+
+    def __str__(self):
+        return self._error_string
+
+
+class RestClientException(TempestException,
+                          testtools.TestCase.failureException):
+    pass
+
+
+class RFCViolation(RestClientException):
+    message = "RFC Violation"
+
+
+class InvalidConfiguration(TempestException):
+    message = "Invalid Configuration"
+
+
+class InvalidCredentials(TempestException):
+    message = "Invalid Credentials"
+
+
+class InvalidHttpSuccessCode(RestClientException):
+    message = "The success code is different than the expected one"
+
+
+class NotFound(RestClientException):
+    message = "Object not found"
+
+
+class Unauthorized(RestClientException):
+    message = 'Unauthorized'
+
+
+class InvalidServiceTag(RestClientException):
+    message = "Invalid service tag"
+
+
+class TimeoutException(TempestException):
+    message = "Request timed out"
+
+
+class BuildErrorException(TempestException):
+    message = "Server %(server_id)s failed to build and is in ERROR status"
+
+
+class ImageKilledException(TempestException):
+    message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
+
+
+class AddImageException(TempestException):
+    message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
+
+
+class EC2RegisterImageException(TempestException):
+    message = ("Image %(image_id)s failed to become 'available' "
+               "in the allotted time")
+
+
+class VolumeBuildErrorException(TempestException):
+    message = "Volume %(volume_id)s failed to build and is in ERROR status"
+
+
+class SnapshotBuildErrorException(TempestException):
+    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
+
+
+class VolumeBackupException(TempestException):
+    message = "Volume backup %(backup_id)s failed and is in ERROR status"
+
+
+class StackBuildErrorException(TempestException):
+    message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
+               "due to '%(stack_status_reason)s'")
+
+
+class StackResourceBuildErrorException(TempestException):
+    message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
+               "in %(resource_status)s status due to "
+               "'%(resource_status_reason)s'")
+
+
+class BadRequest(RestClientException):
+    message = "Bad request"
+
+
+class UnprocessableEntity(RestClientException):
+    message = "Unprocessable entity"
+
+
+class AuthenticationFailure(RestClientException):
+    message = ("Authentication with user %(user)s and password "
+               "%(password)s failed auth using tenant %(tenant)s.")
+
+
+class EndpointNotFound(TempestException):
+    message = "Endpoint not found"
+
+
+class RateLimitExceeded(TempestException):
+    message = "Rate limit exceeded"
+
+
+class OverLimit(TempestException):
+    message = "Quota exceeded"
+
+
+class ServerFault(TempestException):
+    message = "Got server fault"
+
+
+class ImageFault(TempestException):
+    message = "Got image fault"
+
+
+class IdentityError(TempestException):
+    message = "Got identity error"
+
+
+class Conflict(RestClientException):
+    message = "An object with that identifier already exists"
+
+
+class SSHTimeout(TempestException):
+    message = ("Connection to the %(host)s via SSH timed out.\n"
+               "User: %(user)s, Password: %(password)s")
+
+
+class SSHExecCommandFailed(TempestException):
+    """Raised when remotely executed command returns nonzero status."""
+    message = ("Command '%(command)s', exit status: %(exit_status)d, "
+               "Error:\n%(strerror)s")
+
+
+class ServerUnreachable(TempestException):
+    message = "The server is not reachable via the configured network"
+
+
+class TearDownException(TempestException):
+    message = "%(num)d cleanUp operation failed"
+
+
+class ResponseWithNonEmptyBody(RFCViolation):
+    message = ("RFC Violation! Response with %(status)d HTTP Status Code "
+               "MUST NOT have a body")
+
+
+class ResponseWithEntity(RFCViolation):
+    message = ("RFC Violation! Response with 205 HTTP Status Code "
+               "MUST NOT have an entity")
+
+
+class InvalidHTTPResponseBody(RestClientException):
+    message = "HTTP response body is invalid json or xml"
+
+
+class InvalidHTTPResponseHeader(RestClientException):
+    message = "HTTP response header is invalid"
+
+
+class InvalidContentType(RestClientException):
+    message = "Invalid content type provided"
+
+
+class UnexpectedResponseCode(RestClientException):
+    message = "Unexpected response code received"
+
+
+class InvalidStructure(TempestException):
+    message = "Invalid structure of table with details"
diff --git a/tempest/exceptions/README.rst b/tempest/exceptions/README.rst
deleted file mode 100644
index dbe42b2..0000000
--- a/tempest/exceptions/README.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-Tempest Field Guide to Exceptions
-=================================
-
-
-What are these exceptions?
---------------------------
-
-These exceptions are used by Tempest for covering OpenStack specific exceptional
-cases.
-
-How to add new exceptions?
---------------------------
-
-Each exception-template for inheritance purposes should be added into 'base'
-submodule.
-All other exceptions can be added in two ways:
-- in main module
-- in submodule
-But only in one of the ways. Need to make sure, that new exception is not
-present already.
-
-How to use exceptions?
-----------------------
-
-Any exceptions from this module or its submodules should be used in appropriate
-places to handle exceptional cases.
-Classes from 'base' module should be used only for inheritance.
diff --git a/tempest/exceptions/__init__.py b/tempest/exceptions/__init__.py
deleted file mode 100644
index d313def..0000000
--- a/tempest/exceptions/__init__.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.exceptions import base
-
-
-class InvalidConfiguration(base.TempestException):
-    message = "Invalid Configuration"
-
-
-class InvalidCredentials(base.TempestException):
-    message = "Invalid Credentials"
-
-
-class InvalidHttpSuccessCode(base.RestClientException):
-    message = "The success code is different than the expected one"
-
-
-class NotFound(base.RestClientException):
-    message = "Object not found"
-
-
-class Unauthorized(base.RestClientException):
-    message = 'Unauthorized'
-
-
-class InvalidServiceTag(base.RestClientException):
-    message = "Invalid service tag"
-
-
-class TimeoutException(base.TempestException):
-    message = "Request timed out"
-
-
-class BuildErrorException(base.TempestException):
-    message = "Server %(server_id)s failed to build and is in ERROR status"
-
-
-class ImageKilledException(base.TempestException):
-    message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
-
-
-class AddImageException(base.TempestException):
-    message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
-
-
-class EC2RegisterImageException(base.TempestException):
-    message = ("Image %(image_id)s failed to become 'available' "
-               "in the allotted time")
-
-
-class VolumeBuildErrorException(base.TempestException):
-    message = "Volume %(volume_id)s failed to build and is in ERROR status"
-
-
-class SnapshotBuildErrorException(base.TempestException):
-    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
-
-
-class VolumeBackupException(base.TempestException):
-    message = "Volume backup %(backup_id)s failed and is in ERROR status"
-
-
-class StackBuildErrorException(base.TempestException):
-    message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
-               "due to '%(stack_status_reason)s'")
-
-
-class BadRequest(base.RestClientException):
-    message = "Bad request"
-
-
-class UnprocessableEntity(base.RestClientException):
-    message = "Unprocessable entity"
-
-
-class AuthenticationFailure(base.RestClientException):
-    message = ("Authentication with user %(user)s and password "
-               "%(password)s failed auth using tenant %(tenant)s.")
-
-
-class EndpointNotFound(base.TempestException):
-    message = "Endpoint not found"
-
-
-class RateLimitExceeded(base.TempestException):
-    message = "Rate limit exceeded"
-
-
-class OverLimit(base.TempestException):
-    message = "Quota exceeded"
-
-
-class ServerFault(base.TempestException):
-    message = "Got server fault"
-
-
-class ImageFault(base.TempestException):
-    message = "Got image fault"
-
-
-class IdentityError(base.TempestException):
-    message = "Got identity error"
-
-
-class Conflict(base.RestClientException):
-    message = "An object with that identifier already exists"
-
-
-class SSHTimeout(base.TempestException):
-    message = ("Connection to the %(host)s via SSH timed out.\n"
-               "User: %(user)s, Password: %(password)s")
-
-
-class SSHExecCommandFailed(base.TempestException):
-    """Raised when remotely executed command returns nonzero status."""
-    message = ("Command '%(command)s', exit status: %(exit_status)d, "
-               "Error:\n%(strerror)s")
-
-
-class ServerUnreachable(base.TempestException):
-    message = "The server is not reachable via the configured network"
-
-
-class TearDownException(base.TempestException):
-    message = "%(num)d cleanUp operation failed"
-
-
-class ResponseWithNonEmptyBody(base.RFCViolation):
-    message = ("RFC Violation! Response with %(status)d HTTP Status Code "
-               "MUST NOT have a body")
-
-
-class ResponseWithEntity(base.RFCViolation):
-    message = ("RFC Violation! Response with 205 HTTP Status Code "
-               "MUST NOT have an entity")
-
-
-class InvalidHTTPResponseBody(base.RestClientException):
-    message = "HTTP response body is invalid json or xml"
-
-
-class InvalidContentType(base.RestClientException):
-    message = "Invalid content type provided"
-
-
-class UnexpectedResponseCode(base.RestClientException):
-    message = "Unexpected response code received"
-
-
-class InvalidStructure(base.TempestException):
-    message = "Invalid structure of table with details"
diff --git a/tempest/exceptions/base.py b/tempest/exceptions/base.py
deleted file mode 100644
index b8e470e..0000000
--- a/tempest/exceptions/base.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testtools
-
-
-class TempestException(Exception):
-    """
-    Base Tempest Exception
-
-    To correctly use this class, inherit from it and define
-    a 'message' property. That message will get printf'd
-    with the keyword arguments provided to the constructor.
-    """
-    message = "An unknown exception occurred"
-
-    def __init__(self, *args, **kwargs):
-        super(TempestException, self).__init__()
-        try:
-            self._error_string = self.message % kwargs
-        except Exception:
-            # at least get the core message out if something happened
-            self._error_string = self.message
-        if len(args) > 0:
-            # If there is a non-kwarg parameter, assume it's the error
-            # message or reason description and tack it on to the end
-            # of the exception message
-            # Convert all arguments into their string representations...
-            args = ["%s" % arg for arg in args]
-            self._error_string = (self._error_string +
-                                  "\nDetails: %s" % '\n'.join(args))
-
-    def __str__(self):
-        return self._error_string
-
-
-class RestClientException(TempestException,
-                          testtools.TestCase.failureException):
-    pass
-
-
-class RFCViolation(RestClientException):
-    message = "RFC Violation"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 7f39905..183d422 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -12,17 +12,20 @@
 #   License for the specific language governing permissions and limitations
 #   under the License.
 
+import os
 import re
 
+import pep8
+
 
 PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
                   'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
-                  'marconi']
+                  'marconi', 'sahara']
 
 PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
 TEST_DEFINITION = re.compile(r'^\s*def test.*')
 SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
-SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
 VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
 
 
@@ -47,7 +50,7 @@
     T104: Scenario tests require a services decorator
     """
 
-    if 'tempest/scenario' in filename:
+    if 'tempest/scenario/test_' in filename:
         if TEST_DEFINITION.match(physical_line):
             if not SCENARIO_DECORATOR.match(previous_logical):
                 return (physical_line.find('def'),
@@ -55,6 +58,10 @@
 
 
 def no_setupclass_for_unit_tests(physical_line, filename):
+
+    if pep8.noqa(physical_line):
+        return
+
     if 'tempest/tests' in filename:
         if SETUPCLASS_DEFINITION.match(physical_line):
             return (physical_line.find('def'),
@@ -75,8 +82,32 @@
             return 0, "T106: Don't put vi configuration in source files"
 
 
+def service_tags_not_in_module_path(physical_line, filename):
+    """Check that a service tag isn't in the module path
+
+    A service tag should only be added if the service name isn't already in
+    the module path.
+
+    T107
+    """
+    # NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
+    # created for services like heat which would cause false negatives for
+    # those tests, so just exclude the scenario tests.
+    if 'tempest/scenario' not in filename:
+        matches = SCENARIO_DECORATOR.match(physical_line)
+        if matches:
+            services = matches.group(1).split(',')
+            for service in services:
+                service_name = service.strip().strip("'")
+                modulepath = os.path.split(filename)[0]
+                if service_name in modulepath:
+                    return (physical_line.find(service_name),
+                            "T107: service tag should not be in path")
+
+
 def factory(register):
     register(import_no_clients_in_api)
     register(scenario_tests_need_service_tags)
     register(no_setupclass_for_unit_tests)
     register(no_vi_headers)
+    register(service_tags_not_in_module_path)
diff --git a/tempest/manager.py b/tempest/manager.py
index 63235db..fb2842f 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -29,7 +29,7 @@
     and a client object for a test case to use in performing actions.
     """
 
-    def __init__(self, username=None, password=None, tenant_name=None):
+    def __init__(self, credentials=None):
         """
         We allow overriding of the credentials used within the various
         client classes managed by the Manager object. Left as None, the
@@ -38,29 +38,18 @@
         :param credentials: Override of the credentials
         """
         self.auth_version = CONF.identity.auth_version
-        # FIXME(andreaf) Change Manager __init__ to accept a credentials dict
-        if username is None or password is None:
-            # Tenant None is a valid use case
-            self.credentials = self.get_default_credentials()
+        if credentials is None:
+            self.credentials = auth.get_default_credentials('user')
         else:
-            self.credentials = dict(username=username, password=password,
-                                    tenant_name=tenant_name)
-        if self.auth_version == 'v3':
-            self.credentials['domain_name'] = 'Default'
+            self.credentials = credentials
+        # Check if passed or default credentials are valid
+        if not self.credentials.is_valid():
+            raise exceptions.InvalidCredentials()
         # Creates an auth provider for the credentials
         self.auth_provider = self.get_auth_provider(self.credentials)
         # FIXME(andreaf) unused
         self.client_attr_names = []
 
-    # we do this everywhere, have it be part of the super class
-    def _validate_credentials(self, username, password, tenant_name):
-        if None in (username, password, tenant_name):
-            msg = ("Missing required credentials. "
-                   "username: %(u)s, password: %(p)s, "
-                   "tenant_name: %(t)s" %
-                   {'u': username, 'p': password, 't': tenant_name})
-            raise exceptions.InvalidConfiguration(msg)
-
     @classmethod
     def get_auth_provider_class(cls, auth_version):
         if auth_version == 'v2':
@@ -68,13 +57,6 @@
         else:
             return auth.KeystoneV3AuthProvider
 
-    def get_default_credentials(self):
-        return dict(
-            username=CONF.identity.username,
-            password=CONF.identity.password,
-            tenant_name=CONF.identity.tenant_name
-        )
-
     def get_auth_provider(self, credentials):
         if credentials is None:
             raise exceptions.InvalidCredentials(
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index f06a850..7703d4d 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,21 +16,27 @@
 
 import logging
 import os
+import re
 import six
 import subprocess
+import time
 
+from heatclient import exc as heat_exceptions
 import netaddr
 from neutronclient.common import exceptions as exc
 from novaclient import exceptions as nova_exceptions
 
 from tempest.api.network import common as net_common
+from tempest import auth
 from tempest import clients
+from tempest.common import debug
 from tempest.common import isolated_creds
 from tempest.common.utils import data_utils
 from tempest.common.utils.linux import remote_client
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log
+from tempest.openstack.common import timeutils
 import tempest.test
 
 CONF = config.CONF
@@ -65,42 +71,42 @@
             cls.__name__, tempest_client=False,
             network_resources=cls.network_resources)
 
-        username, password, tenant_name = cls.credentials()
-
         cls.manager = clients.OfficialClientManager(
-            username, password, tenant_name)
+            credentials=cls.credentials())
         cls.compute_client = cls.manager.compute_client
         cls.image_client = cls.manager.image_client
+        cls.baremetal_client = cls.manager.baremetal_client
         cls.identity_client = cls.manager.identity_client
         cls.network_client = cls.manager.network_client
         cls.volume_client = cls.manager.volume_client
         cls.object_storage_client = cls.manager.object_storage_client
         cls.orchestration_client = cls.manager.orchestration_client
+        cls.data_processing_client = cls.manager.data_processing_client
         cls.resource_keys = {}
         cls.os_resources = []
 
     @classmethod
-    def _get_credentials(cls, get_creds, prefix):
+    def _get_credentials(cls, get_creds, ctype):
         if CONF.compute.allow_tenant_isolation:
-            username, tenant_name, password = get_creds()
+            creds = get_creds()
         else:
-            username = getattr(CONF.identity, prefix + 'username')
-            password = getattr(CONF.identity, prefix + 'password')
-            tenant_name = getattr(CONF.identity, prefix + 'tenant_name')
-        return username, password, tenant_name
+            creds = auth.get_default_credentials(ctype)
+        return creds
 
     @classmethod
     def credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_primary_creds, '')
+        return cls._get_credentials(cls.isolated_creds.get_primary_creds,
+                                    'user')
 
     @classmethod
     def alt_credentials(cls):
-        return cls._get_credentials(cls.isolated_creds.get_alt_creds, 'alt_')
+        return cls._get_credentials(cls.isolated_creds.get_alt_creds,
+                                    'alt_user')
 
     @classmethod
     def admin_credentials(cls):
         return cls._get_credentials(cls.isolated_creds.get_admin_creds,
-                                    'admin_')
+                                    'identity_admin')
 
     @staticmethod
     def cleanup_resource(resource, test_name):
@@ -113,8 +119,10 @@
             resource.delete()
         except Exception as e:
             # If the resource is already missing, mission accomplished.
-            # add status code as workaround for bug 1247568
-            if (e.__class__.__name__ == 'NotFound' or
+            # - Status code tolerated as a workaround for bug 1247568
+            # - HTTPNotFound tolerated as this is currently raised when
+            # attempting to delete an already-deleted heat stack.
+            if (e.__class__.__name__ in ('NotFound', 'HTTPNotFound') or
                     (hasattr(e, 'status_code') and e.status_code == 404)):
                 return
             raise
@@ -278,12 +286,11 @@
         for ruleset in rulesets:
             sg_rule = client.security_group_rules.create(secgroup_id,
                                                          **ruleset)
-            self.set_resource(sg_rule.id, sg_rule)
             rules.append(sg_rule)
         return rules
 
     def create_server(self, client=None, name=None, image=None, flavor=None,
-                      create_kwargs={}):
+                      wait=True, create_kwargs={}):
         if client is None:
             client = self.compute_client
         if name is None:
@@ -318,7 +325,8 @@
         server = client.servers.create(name, image, flavor, **create_kwargs)
         self.assertEqual(server.name, name)
         self.set_resource(name, server)
-        self.status_timeout(client.servers, server.id, 'ACTIVE')
+        if wait:
+            self.status_timeout(client.servers, server.id, 'ACTIVE')
         # The instance retrieved on creation is missing network
         # details, necessitating retrieval after it becomes active to
         # ensure correct details.
@@ -403,7 +411,7 @@
             'name': name,
             'container_format': fmt,
             'disk_format': fmt,
-            'is_public': 'True',
+            'is_public': 'False',
         }
         params.update(properties)
         image = self.image_client.images.create(**params)
@@ -439,6 +447,155 @@
         LOG.debug("image:%s" % self.image)
 
 
+# power/provision states as of icehouse
+class BaremetalPowerStates(object):
+    """Possible power states of an Ironic node."""
+    POWER_ON = 'power on'
+    POWER_OFF = 'power off'
+    REBOOT = 'rebooting'
+    SUSPEND = 'suspended'
+
+
+class BaremetalProvisionStates(object):
+    """Possible provision states of an Ironic node."""
+    NOSTATE = None
+    INIT = 'initializing'
+    ACTIVE = 'active'
+    BUILDING = 'building'
+    DEPLOYWAIT = 'wait call-back'
+    DEPLOYING = 'deploying'
+    DEPLOYFAIL = 'deploy failed'
+    DEPLOYDONE = 'deploy complete'
+    DELETING = 'deleting'
+    DELETED = 'deleted'
+    ERROR = 'error'
+
+
+class BaremetalScenarioTest(OfficialClientTest):
+    @classmethod
+    def setUpClass(cls):
+        super(BaremetalScenarioTest, cls).setUpClass()
+
+        if (not CONF.service_available.ironic or
+           not CONF.baremetal.driver_enabled):
+            msg = 'Ironic not available or Ironic compute driver not enabled'
+            raise cls.skipException(msg)
+
+        # use an admin client manager for baremetal client
+        admin_creds = cls.admin_credentials()
+        manager = clients.OfficialClientManager(credentials=admin_creds)
+        cls.baremetal_client = manager.baremetal_client
+
+        # allow any issues obtaining the node list to raise early
+        cls.baremetal_client.node.list()
+
+    def _node_state_timeout(self, node_id, state_attr,
+                            target_states, timeout=10, interval=1):
+        if not isinstance(target_states, list):
+            target_states = [target_states]
+
+        def check_state():
+            node = self.get_node(node_id=node_id)
+            if getattr(node, state_attr) in target_states:
+                return True
+            return False
+
+        if not tempest.test.call_until_true(
+            check_state, timeout, interval):
+            msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
+                   (node_id, state_attr, target_states))
+            raise exceptions.TimeoutException(msg)
+
+    def wait_provisioning_state(self, node_id, state, timeout):
+        self._node_state_timeout(
+            node_id=node_id, state_attr='provision_state',
+            target_states=state, timeout=timeout)
+
+    def wait_power_state(self, node_id, state):
+        self._node_state_timeout(
+            node_id=node_id, state_attr='power_state',
+            target_states=state, timeout=CONF.baremetal.power_timeout)
+
+    def wait_node(self, instance_id):
+        """Waits for a node to be associated with instance_id."""
+        from ironicclient import exc as ironic_exceptions
+
+        def _get_node():
+            node = None
+            try:
+                node = self.get_node(instance_id=instance_id)
+            except ironic_exceptions.HTTPNotFound:
+                pass
+            return node is not None
+
+        if not tempest.test.call_until_true(
+            _get_node, CONF.baremetal.association_timeout, 1):
+            msg = ('Timed out waiting to get Ironic node by instance id %s'
+                   % instance_id)
+            raise exceptions.TimeoutException(msg)
+
+    def get_node(self, node_id=None, instance_id=None):
+        if node_id:
+            return self.baremetal_client.node.get(node_id)
+        elif instance_id:
+            return self.baremetal_client.node.get_by_instance_uuid(instance_id)
+
+    def get_ports(self, node_id):
+        ports = []
+        for port in self.baremetal_client.node.list_ports(node_id):
+            ports.append(self.baremetal_client.port.get(port.uuid))
+        return ports
+
+    def add_keypair(self):
+        self.keypair = self.create_keypair()
+
+    def verify_connectivity(self, ip=None):
+        if ip:
+            dest = self.get_remote_client(ip)
+        else:
+            dest = self.get_remote_client(self.instance)
+        dest.validate_authentication()
+
+    def boot_instance(self):
+        create_kwargs = {
+            'key_name': self.keypair.id
+        }
+        self.instance = self.create_server(
+            wait=False, create_kwargs=create_kwargs)
+
+        self.set_resource('instance', self.instance)
+
+        self.wait_node(self.instance.id)
+        self.node = self.get_node(instance_id=self.instance.id)
+
+        self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_ON)
+
+        self.wait_provisioning_state(
+            self.node.uuid,
+            [BaremetalProvisionStates.DEPLOYWAIT,
+             BaremetalProvisionStates.ACTIVE],
+            timeout=15)
+
+        self.wait_provisioning_state(self.node.uuid,
+                                     BaremetalProvisionStates.ACTIVE,
+                                     timeout=CONF.baremetal.active_timeout)
+
+        self.status_timeout(
+            self.compute_client.servers, self.instance.id, 'ACTIVE')
+
+        self.node = self.get_node(instance_id=self.instance.id)
+        self.instance = self.compute_client.servers.get(self.instance.id)
+
+    def terminate_instance(self):
+        self.instance.delete()
+        self.remove_resource('instance')
+        self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_OFF)
+        self.wait_provisioning_state(
+            self.node.uuid,
+            BaremetalProvisionStates.NOSTATE,
+            timeout=CONF.baremetal.unprovision_timeout)
+
+
 class NetworkScenarioTest(OfficialClientTest):
     """
     Base class for network scenario tests
@@ -462,13 +619,7 @@
     @classmethod
     def setUpClass(cls):
         super(NetworkScenarioTest, cls).setUpClass()
-        if CONF.compute.allow_tenant_isolation:
-            cls.tenant_id = cls.isolated_creds.get_primary_tenant().id
-        else:
-            cls.tenant_id = cls.manager._get_identity_client(
-                CONF.identity.username,
-                CONF.identity.password,
-                CONF.identity.tenant_name).tenant_id
+        cls.tenant_id = cls.manager.identity_client.tenant_id
 
     def _create_network(self, tenant_id, namestart='network-smoke-'):
         name = data_utils.rand_name(namestart)
@@ -704,6 +855,51 @@
                                                   private_key)
             linux_client.validate_authentication()
 
+    def _check_public_network_connectivity(self, ip_address, username,
+                                           private_key, should_connect=True,
+                                           msg=None, servers=None):
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        LOG.debug('checking network connections to IP %s with user: %s' %
+                  (ip_address, username))
+        try:
+            self._check_vm_connectivity(ip_address,
+                                        username,
+                                        private_key,
+                                        should_connect=should_connect)
+        except Exception:
+            ex_msg = 'Public network connectivity check failed'
+            if msg:
+                ex_msg += ": " + msg
+            LOG.exception(ex_msg)
+            self._log_console_output(servers)
+            debug.log_net_debug()
+            raise
+
+    def _check_tenant_network_connectivity(self, server,
+                                           username,
+                                           private_key,
+                                           should_connect=True,
+                                           servers_for_debug=None):
+        if not CONF.network.tenant_networks_reachable:
+            msg = 'Tenant networks not configured to be reachable.'
+            LOG.info(msg)
+            return
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        try:
+            for net_name, ip_addresses in server.networks.iteritems():
+                for ip_address in ip_addresses:
+                    self._check_vm_connectivity(ip_address,
+                                                username,
+                                                private_key,
+                                                should_connect=should_connect)
+        except Exception:
+            LOG.exception('Tenant network connectivity check failed')
+            self._log_console_output(servers_for_debug)
+            debug.log_net_debug()
+            raise
+
     def _check_remote_connectivity(self, source, dest, should_succeed=True):
         """
         check ping server via source ssh connection
@@ -845,7 +1041,6 @@
             client=client,
             **sg_rule['security_group_rule']
         )
-        self.set_resource(sg_rule.id, sg_rule)
         self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
         self.assertEqual(secgroup.id, sg_rule.security_group_id)
 
@@ -974,10 +1169,10 @@
 
     @classmethod
     def credentials(cls):
-        username = CONF.identity.admin_username
-        password = CONF.identity.admin_password
-        tenant_name = CONF.identity.tenant_name
-        return username, password, tenant_name
+        admin_creds = auth.get_default_credentials('identity_admin')
+        creds = auth.get_default_credentials('user')
+        admin_creds.tenant_name = creds.tenant_name
+        return admin_creds
 
     def _load_template(self, base_file, file_name):
         filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
@@ -995,3 +1190,98 @@
         for net in networks['networks']:
             if net['name'] == CONF.compute.fixed_network_name:
                 return net
+
+    @staticmethod
+    def _stack_output(stack, output_key):
+        """Return a stack output value for a given key."""
+        return next((o['output_value'] for o in stack.outputs
+                    if o['output_key'] == output_key), None)
+
+    def _ping_ip_address(self, ip_address, should_succeed=True):
+        cmd = ['ping', '-c1', '-w1', ip_address]
+
+        def ping():
+            proc = subprocess.Popen(cmd,
+                                    stdout=subprocess.PIPE,
+                                    stderr=subprocess.PIPE)
+            proc.wait()
+            return (proc.returncode == 0) == should_succeed
+
+        return tempest.test.call_until_true(
+            ping, CONF.orchestration.build_timeout, 1)
+
+    def _wait_for_resource_status(self, stack_identifier, resource_name,
+                                  status, failure_pattern='^.*_FAILED$'):
+        """Waits for a Resource to reach a given status."""
+        fail_regexp = re.compile(failure_pattern)
+        build_timeout = CONF.orchestration.build_timeout
+        build_interval = CONF.orchestration.build_interval
+
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            try:
+                res = self.client.resources.get(
+                    stack_identifier, resource_name)
+            except heat_exceptions.HTTPNotFound:
+                # ignore this, as the resource may not have
+                # been created yet
+                pass
+            else:
+                if res.resource_status == status:
+                    return
+                if fail_regexp.search(res.resource_status):
+                    raise exceptions.StackResourceBuildErrorException(
+                        resource_name=res.resource_name,
+                        stack_identifier=stack_identifier,
+                        resource_status=res.resource_status,
+                        resource_status_reason=res.resource_status_reason)
+            time.sleep(build_interval)
+
+        message = ('Resource %s failed to reach %s status within '
+                   'the required time (%s s).' %
+                   (res.resource_name, status, build_timeout))
+        raise exceptions.TimeoutException(message)
+
+    def _wait_for_stack_status(self, stack_identifier, status,
+                               failure_pattern='^.*_FAILED$'):
+        """
+        Waits for a Stack to reach a given status.
+
+        Note this compares the full $action_$status, e.g
+        CREATE_COMPLETE, not just COMPLETE which is exposed
+        via the status property of Stack in heatclient
+        """
+        fail_regexp = re.compile(failure_pattern)
+        build_timeout = CONF.orchestration.build_timeout
+        build_interval = CONF.orchestration.build_interval
+
+        start = timeutils.utcnow()
+        while timeutils.delta_seconds(start,
+                                      timeutils.utcnow()) < build_timeout:
+            try:
+                stack = self.client.stacks.get(stack_identifier)
+            except heat_exceptions.HTTPNotFound:
+                # ignore this, as the stackource may not have
+                # been created yet
+                pass
+            else:
+                if stack.stack_status == status:
+                    return
+                if fail_regexp.search(stack.stack_status):
+                    raise exceptions.StackBuildErrorException(
+                        stack_identifier=stack_identifier,
+                        stack_status=stack.stack_status,
+                        stack_status_reason=stack.stack_status_reason)
+            time.sleep(build_interval)
+
+        message = ('Stack %s failed to reach %s status within '
+                   'the required time (%s s).' %
+                   (stack.stack_name, status, build_timeout))
+        raise exceptions.TimeoutException(message)
+
+    def _stack_delete(self, stack_identifier):
+        try:
+            self.client.stacks.delete(stack_identifier)
+        except heat_exceptions.HTTPNotFound:
+            pass
diff --git a/tempest/scenario/orchestration/cfn_init_signal.yaml b/tempest/scenario/orchestration/cfn_init_signal.yaml
new file mode 100644
index 0000000..c95aabf
--- /dev/null
+++ b/tempest/scenario/orchestration/cfn_init_signal.yaml
@@ -0,0 +1,82 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+  Template which uses a wait condition to confirm that a minimal
+  cfn-init and cfn-signal has worked
+Parameters:
+  key_name:
+    Type: String
+  flavor:
+    Type: String
+  image:
+    Type: String
+  network:
+    Type: String
+  timeout:
+    Type: Number
+Resources:
+  CfnUser:
+    Type: AWS::IAM::User
+  SmokeSecurityGroup:
+    Type: AWS::EC2::SecurityGroup
+    Properties:
+      GroupDescription: Enable only ping and SSH access
+      SecurityGroupIngress:
+      - {CidrIp: 0.0.0.0/0, FromPort: '-1', IpProtocol: icmp, ToPort: '-1'}
+      - {CidrIp: 0.0.0.0/0, FromPort: '22', IpProtocol: tcp, ToPort: '22'}
+  SmokeKeys:
+    Type: AWS::IAM::AccessKey
+    Properties:
+      UserName: {Ref: CfnUser}
+  SmokeServer:
+    Type: OS::Nova::Server
+    Metadata:
+      AWS::CloudFormation::Init:
+        config:
+          files:
+            /tmp/smoke-status:
+              content: smoke test complete
+            /etc/cfn/cfn-credentials:
+              content:
+                Fn::Replace:
+                - SmokeKeys: {Ref: SmokeKeys}
+                  SecretAccessKey:
+                    'Fn::GetAtt': [SmokeKeys, SecretAccessKey]
+                - |
+                  AWSAccessKeyId=SmokeKeys
+                  AWSSecretKey=SecretAccessKey
+              mode: '000400'
+              owner: root
+              group: root
+    Properties:
+      image: {Ref: image}
+      flavor: {Ref: flavor}
+      key_name: {Ref: key_name}
+      security_groups:
+      - {Ref: SmokeSecurityGroup}
+      networks:
+      - uuid: {Ref: network}
+      user_data:
+        Fn::Replace:
+        - WaitHandle: {Ref: WaitHandle}
+        - |
+          #!/bin/bash -v
+          /opt/aws/bin/cfn-init
+          /opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" \
+              --id smoke_status "WaitHandle"
+  WaitHandle:
+    Type: AWS::CloudFormation::WaitConditionHandle
+  WaitCondition:
+    Type: AWS::CloudFormation::WaitCondition
+    DependsOn: SmokeServer
+    Properties:
+      Handle: {Ref: WaitHandle}
+      Timeout: {Ref: timeout}
+Outputs:
+  WaitConditionStatus:
+    Description: Contents of /tmp/smoke-status on SmokeServer
+    Value:
+      Fn::GetAtt: [WaitCondition, Data]
+  SmokeServerIp:
+    Description: IP address of server
+    Value:
+      Fn::GetAtt: [SmokeServer, first_address]
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
new file mode 100644
index 0000000..36e6126
--- /dev/null
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -0,0 +1,130 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from tempest import config
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CfnInitScenarioTest(manager.OrchestrationScenarioTest):
+
+    def setUp(self):
+        super(CfnInitScenarioTest, self).setUp()
+        if not CONF.orchestration.image_ref:
+            raise self.skipException("No image available to test")
+        self.client = self.orchestration_client
+        self.template_name = 'cfn_init_signal.yaml'
+
+    def assign_keypair(self):
+        self.stack_name = self._stack_rand_name()
+        if CONF.orchestration.keypair_name:
+            self.keypair = None
+            self.keypair_name = CONF.orchestration.keypair_name
+        else:
+            self.keypair = self.create_keypair()
+            self.keypair_name = self.keypair.id
+
+    def launch_stack(self):
+        net = self._get_default_network()
+        self.parameters = {
+            'key_name': self.keypair_name,
+            'flavor': CONF.orchestration.instance_type,
+            'image': CONF.orchestration.image_ref,
+            'timeout': CONF.orchestration.build_timeout,
+            'network': net['id'],
+        }
+
+        # create the stack
+        self.template = self._load_template(__file__, self.template_name)
+        self.client.stacks.create(
+            stack_name=self.stack_name,
+            template=self.template,
+            parameters=self.parameters)
+
+        self.stack = self.client.stacks.get(self.stack_name)
+        self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
+        self.addCleanup(self._stack_delete, self.stack_identifier)
+
+    def check_stack(self):
+        sid = self.stack_identifier
+        self._wait_for_resource_status(
+            sid, 'WaitHandle', 'CREATE_COMPLETE')
+        self._wait_for_resource_status(
+            sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
+        self._wait_for_resource_status(
+            sid, 'SmokeKeys', 'CREATE_COMPLETE')
+        self._wait_for_resource_status(
+            sid, 'CfnUser', 'CREATE_COMPLETE')
+        self._wait_for_resource_status(
+            sid, 'SmokeServer', 'CREATE_COMPLETE')
+
+        server_resource = self.client.resources.get(sid, 'SmokeServer')
+        server_id = server_resource.physical_resource_id
+        server = self.compute_client.servers.get(server_id)
+        server_ip = server.networks[CONF.compute.network_for_ssh][0]
+
+        if not self._ping_ip_address(server_ip):
+            self._log_console_output(servers=[server])
+            self.fail(
+                "Timed out waiting for %s to become reachable" % server_ip)
+
+        try:
+            self._wait_for_resource_status(
+                sid, 'WaitCondition', 'CREATE_COMPLETE')
+        except (exceptions.StackResourceBuildErrorException,
+                exceptions.TimeoutException) as e:
+            raise e
+        finally:
+            # attempt to log the server console regardless of WaitCondition
+            # going to complete. This allows successful and failed cloud-init
+            # logs to be compared
+            self._log_console_output(servers=[server])
+
+        self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
+
+        stack = self.client.stacks.get(sid)
+
+        # This is an assert of great significance, as it means the following
+        # has happened:
+        # - cfn-init read the provided metadata and wrote out a file
+        # - a user was created and credentials written to the server
+        # - a cfn-signal was built which was signed with provided credentials
+        # - the wait condition was fulfilled and the stack has changed state
+        wait_status = json.loads(
+            self._stack_output(stack, 'WaitConditionStatus'))
+        self.assertEqual('smoke test complete', wait_status['smoke_status'])
+
+        if self.keypair:
+            # Check that the user can authenticate with the generated
+            # keypair
+            try:
+                linux_client = self.get_remote_client(
+                    server_ip, username='ec2-user')
+                linux_client.validate_authentication()
+            except (exceptions.ServerUnreachable,
+                    exceptions.SSHTimeout) as e:
+                self._log_console_output(servers=[server])
+                raise e
+
+    @test.attr(type='slow')
+    @test.services('orchestration', 'compute')
+    def test_server_cfn_init(self):
+        self.assign_keypair()
+        self.launch_stack()
+        self.check_stack()
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 8e34c16..6817c48 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -54,8 +54,8 @@
     def _get_host_name(self):
         hosts = self.compute_client.hosts.list()
         self.assertTrue(len(hosts) >= 1)
-        hostname = hosts[0].host_name
-        return hostname
+        computes = [x for x in hosts if x.service == 'compute']
+        return computes[0].host_name
 
     def _add_host(self, aggregate_name, host):
         aggregate = self.compute_client.aggregates.add_host(aggregate_name,
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
new file mode 100644
index 0000000..f197c15
--- /dev/null
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -0,0 +1,62 @@
+#
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class BaremetalBasicOpsPXESSH(manager.BaremetalScenarioTest):
+    """
+    This smoke test tests the pxe_ssh Ironic driver.  It follows this basic
+    set of operations:
+        * Creates a keypair
+        * Boots an instance using the keypair
+        * Monitors the associated Ironic node for power and
+          expected state transitions
+        * Validates Ironic node's port data has been properly updated
+        * Verifies SSH connectivity using created keypair via fixed IP
+        * Associates a floating ip
+        * Verifies SSH connectivity using created keypair via floating IP
+        * Deletes instance
+        * Monitors the associated Ironic node for power and
+          expected state transitions
+    """
+    def add_floating_ip(self):
+        floating_ip = self.compute_client.floating_ips.create()
+        self.instance.add_floating_ip(floating_ip)
+        return floating_ip.ip
+
+    def validate_ports(self):
+        for port in self.get_ports(self.node.uuid):
+            n_port_id = port.extra['vif_port_id']
+            n_port = self.network_client.show_port(n_port_id)['port']
+            self.assertEqual(n_port['device_id'], self.instance.id)
+            self.assertEqual(n_port['mac_address'], port.address)
+
+    @test.services('baremetal', 'compute', 'image', 'network')
+    def test_baremetal_server_ops(self):
+        self.add_keypair()
+        self.boot_instance()
+        self.validate_ports()
+        self.verify_connectivity()
+        floating_ip = self.add_floating_ip()
+        self.verify_connectivity(ip=floating_ip)
+        self.terminate_instance()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index b7a30f8..ed5743c 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -31,7 +31,7 @@
     Test large operations.
 
     This test below:
-    * Spin up multiple instances in one nova call
+    * Spin up multiple instances in one nova call, and repeat three times
     * as a regular user
     * TODO: same thing for cinder
 
@@ -63,9 +63,20 @@
             self.set_resource(server.name, server)
         self._wait_for_server_status('ACTIVE')
 
-    @test.services('compute', 'image')
-    def test_large_ops_scenario(self):
+    def _large_ops_scenario(self):
         if CONF.scenario.large_ops_number < 1:
             return
         self.glance_image_create()
         self.nova_boot()
+
+    @test.services('compute', 'image')
+    def test_large_ops_scenario_1(self):
+        self._large_ops_scenario()
+
+    @test.services('compute', 'image')
+    def test_large_ops_scenario_2(self):
+        self._large_ops_scenario()
+
+    @test.services('compute', 'image')
+    def test_large_ops_scenario_3(self):
+        self._large_ops_scenario()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index f7a3d6f..1c24b5c 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -13,11 +13,14 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+
+import httplib
+import tempfile
 import time
-import urllib
+import urllib2
 
 from tempest.api.network import common as net_common
-from tempest.common.utils import data_utils
+from tempest.common import commands
 from tempest import config
 from tempest import exceptions
 from tempest.scenario import manager
@@ -59,24 +62,46 @@
     def setUpClass(cls):
         super(TestLoadBalancerBasic, cls).setUpClass()
         cls.check_preconditions()
-        cls.security_groups = {}
         cls.servers_keypairs = {}
         cls.members = []
         cls.floating_ips = {}
-        cls.server_ip = None
-        cls.vip_ip = None
+        cls.server_ips = {}
         cls.port1 = 80
         cls.port2 = 88
 
-    def _create_security_groups(self):
-        self.security_groups[self.tenant_id] =\
-            self._create_security_group_neutron(tenant_id=self.tenant_id)
+    def setUp(self):
+        super(TestLoadBalancerBasic, self).setUp()
+        self.server_ips = {}
+        self.server_fixed_ips = {}
+        self._create_security_group()
 
-    def _create_server(self):
-        tenant_id = self.tenant_id
-        name = data_utils.rand_name("smoke_server-")
+    def cleanup_wrapper(self, resource):
+        self.cleanup_resource(resource, self.__class__.__name__)
+
+    def _create_security_group(self):
+        self.security_group = self._create_security_group_neutron(
+            tenant_id=self.tenant_id)
+        self._create_security_group_rules_for_port(self.port1)
+        self._create_security_group_rules_for_port(self.port2)
+        self.addCleanup(self.cleanup_wrapper, self.security_group)
+
+    def _create_security_group_rules_for_port(self, port):
+        rule = {
+            'direction': 'ingress',
+            'protocol': 'tcp',
+            'port_range_min': port,
+            'port_range_max': port,
+        }
+        self._create_security_group_rule(
+            client=self.network_client,
+            secgroup=self.security_group,
+            tenant_id=self.tenant_id,
+            **rule)
+
+    def _create_server(self, name):
         keypair = self.create_keypair(name='keypair-%s' % name)
-        security_groups = [self.security_groups[tenant_id].name]
+        self.addCleanup(self.cleanup_wrapper, keypair)
+        security_groups = [self.security_group.name]
         net = self._list_networks(tenant_id=self.tenant_id)[0]
         create_kwargs = {
             'nics': [
@@ -87,51 +112,89 @@
         }
         server = self.create_server(name=name,
                                     create_kwargs=create_kwargs)
-        self.servers_keypairs[server] = keypair
+        self.addCleanup(self.cleanup_wrapper, server)
+        self.servers_keypairs[server.id] = keypair
         if (config.network.public_network_id and not
                 config.network.tenant_networks_reachable):
             public_network_id = config.network.public_network_id
             floating_ip = self._create_floating_ip(
                 server, public_network_id)
+            self.addCleanup(self.cleanup_wrapper, floating_ip)
             self.floating_ips[floating_ip] = server
-            self.server_ip = floating_ip.floating_ip_address
+            self.server_ips[server.id] = floating_ip.floating_ip_address
         else:
-            self.server_ip = server.networks[net['name']][0]
+            self.server_ips[server.id] = server.networks[net['name']][0]
+        self.server_fixed_ips[server.id] = server.networks[net['name']][0]
         self.assertTrue(self.servers_keypairs)
         return server
 
-    def _start_servers(self, server):
+    def _create_servers(self):
+        for count in range(2):
+            self._create_server(name=("server%s" % (count + 1)))
+        self.assertEqual(len(self.servers_keypairs), 2)
+
+    def _start_servers(self):
         """
+        Start two backends
+
         1. SSH to the instance
         2. Start two http backends listening on ports 80 and 88 respectively
         """
 
-        private_key = self.servers_keypairs[server].private_key
-        ssh_client = self.get_remote_client(
-            server_or_ip=self.server_ip,
-            private_key=private_key).ssh_client
-        start_server = "while true; do echo -e 'HTTP/1.0 200 OK\r\n\r\n" \
-                       "%(server)s' | sudo nc -l -p %(port)s ; done &"
-        cmd = start_server % {'server': 'server1',
-                              'port': self.port1}
-        ssh_client.exec_command(cmd)
-        cmd = start_server % {'server': 'server2',
-                              'port': self.port2}
-        ssh_client.exec_command(cmd)
+        for server_id, ip in self.server_ips.iteritems():
+            private_key = self.servers_keypairs[server_id].private_key
+            server_name = self.compute_client.servers.get(server_id).name
+            username = config.scenario.ssh_user
+            ssh_client = self.get_remote_client(
+                server_or_ip=ip,
+                private_key=private_key)
+            ssh_client.validate_authentication()
 
-    def _check_connection(self, check_ip):
-        def try_connect(ip):
+            # Write a backend's responce into a file
+            resp = """HTTP/1.0 200 OK\r\nContent-Length: 8\r\n\r\n%s"""
+            with tempfile.NamedTemporaryFile() as script:
+                script.write(resp % server_name)
+                script.flush()
+                with tempfile.NamedTemporaryFile() as key:
+                    key.write(private_key)
+                    key.flush()
+                    commands.copy_file_to_host(script.name,
+                                               "~/script1",
+                                               ip,
+                                               username, key.name)
+            # Start netcat
+            start_server = """sudo nc -ll -p %(port)s -e cat """ \
+                           """~/%(script)s &"""
+            cmd = start_server % {'port': self.port1,
+                                  'script': 'script1'}
+            ssh_client.exec_command(cmd)
+            if len(self.server_ips) == 1:
+                with tempfile.NamedTemporaryFile() as script:
+                    script.write(resp % 'server2')
+                    script.flush()
+                    with tempfile.NamedTemporaryFile() as key:
+                        key.write(private_key)
+                        key.flush()
+                        commands.copy_file_to_host(script.name,
+                                                   "~/script2", ip,
+                                                   username, key.name)
+                cmd = start_server % {'port': self.port2,
+                                      'script': 'script2'}
+                ssh_client.exec_command(cmd)
+
+    def _check_connection(self, check_ip, port=80):
+        def try_connect(ip, port):
             try:
-                urllib.urlopen("http://{0}/".format(ip))
-                return True
+                resp = urllib2.urlopen("http://{0}:{1}/".format(ip, port))
+                if resp.getcode() == 200:
+                    return True
+                return False
             except IOError:
                 return False
         timeout = config.compute.ping_timeout
-        timer = 0
-        while not try_connect(check_ip):
-            time.sleep(1)
-            timer += 1
-            if timer >= timeout:
+        start = time.time()
+        while not try_connect(check_ip, port):
+            if (time.time() - start) > timeout:
                 message = "Timed out trying to connect to %s" % check_ip
                 raise exceptions.TimeoutException(message)
 
@@ -142,30 +205,37 @@
         self.subnet = net_common.DeletableSubnet(client=self.network_client,
                                                  **subnet)
         self.pool = super(TestLoadBalancerBasic, self)._create_pool(
-            'ROUND_ROBIN',
-            'HTTP',
-            self.subnet.id)
+            lb_method='ROUND_ROBIN',
+            protocol='HTTP',
+            subnet_id=self.subnet.id)
+        self.addCleanup(self.cleanup_wrapper, self.pool)
         self.assertTrue(self.pool)
 
-    def _create_members(self, server_ids):
+    def _create_members(self):
         """
         Create two members.
 
         In case there is only one server, create both members with the same ip
         but with different ports to listen on.
         """
-        servers = self.compute_client.servers.list()
-        for server in servers:
-            if server.id in server_ids:
-                ip = self.server_ip
-                pool_id = self.pool.id
-                if len(set(server_ids)) == 1 or len(servers) == 1:
-                    member1 = self._create_member(ip, self.port1, pool_id)
-                    member2 = self._create_member(ip, self.port2, pool_id)
-                    self.members.extend([member1, member2])
-                else:
-                    member = self._create_member(ip, self.port1, pool_id)
-                    self.members.append(member)
+
+        for server_id, ip in self.server_fixed_ips.iteritems():
+            if len(self.server_fixed_ips) == 1:
+                member1 = self._create_member(address=ip,
+                                              protocol_port=self.port1,
+                                              pool_id=self.pool.id)
+                self.addCleanup(self.cleanup_wrapper, member1)
+                member2 = self._create_member(address=ip,
+                                              protocol_port=self.port2,
+                                              pool_id=self.pool.id)
+                self.addCleanup(self.cleanup_wrapper, member2)
+                self.members.extend([member1, member2])
+            else:
+                member = self._create_member(address=ip,
+                                             protocol_port=self.port1,
+                                             pool_id=self.pool.id)
+                self.addCleanup(self.cleanup_wrapper, member)
+                self.members.append(member)
         self.assertTrue(self.members)
 
     def _assign_floating_ip_to_vip(self, vip):
@@ -173,22 +243,23 @@
         port_id = vip.port_id
         floating_ip = self._create_floating_ip(vip, public_network_id,
                                                port_id=port_id)
+        self.addCleanup(self.cleanup_wrapper, floating_ip)
         self.floating_ips.setdefault(vip.id, [])
         self.floating_ips[vip.id].append(floating_ip)
 
     def _create_load_balancer(self):
         self._create_pool()
-        self._create_members([self.servers_keypairs.keys()[0].id])
-        subnet_id = self.subnet.id
-        pool_id = self.pool.id
-        self.vip = super(TestLoadBalancerBasic, self)._create_vip('HTTP', 80,
-                                                                  subnet_id,
-                                                                  pool_id)
-        self._status_timeout(NeutronRetriever(self.network_client,
-                                              self.network_client.vip_path,
-                                              net_common.DeletableVip),
-                             self.vip.id,
-                             expected_status='ACTIVE')
+        self._create_members()
+        self.vip = self._create_vip(protocol='HTTP',
+                                    protocol_port=80,
+                                    subnet_id=self.subnet.id,
+                                    pool_id=self.pool.id)
+        self.addCleanup(self.cleanup_wrapper, self.vip)
+        self.status_timeout(NeutronRetriever(self.network_client,
+                                             self.network_client.vip_path,
+                                             net_common.DeletableVip),
+                            self.vip.id,
+                            expected_status='ACTIVE')
         if (config.network.public_network_id and not
                 config.network.tenant_networks_reachable):
             self._assign_floating_ip_to_vip(self.vip)
@@ -206,27 +277,48 @@
         """
 
         self._check_connection(self.vip_ip)
-        resp = []
-        for count in range(10):
-            resp.append(
-                urllib.urlopen(
-                    "http://{0}/".format(self.vip_ip)).read())
-        self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
-        self.assertEqual(5, resp.count("server1\n"))
-        self.assertEqual(5, resp.count("server2\n"))
+        self._send_requests(self.vip_ip, set(["server1", "server2"]))
 
-    @test.skip_because(bug='1295165')
+    def _send_requests(self, vip_ip, expected, num_req=10):
+        count = 0
+        while count < num_req:
+            try:
+                resp = []
+                for i in range(len(self.members)):
+                    resp.append(
+                        urllib2.urlopen(
+                            "http://{0}/".format(vip_ip)).read())
+                count += 1
+                self.assertEqual(expected,
+                                 set(resp))
+            # NOTE: There always is a slim chance of getting this exception
+            #       due to special aspects of haproxy internal behavior.
+            except httplib.BadStatusLine:
+                pass
+
     @test.attr(type='smoke')
     @test.services('compute', 'network')
     def test_load_balancer_basic(self):
-        self._create_security_groups()
-        server = self._create_server()
-        self._start_servers(server)
+        self._create_server('server1')
+        self._start_servers()
         self._create_load_balancer()
         self._check_load_balancing()
 
 
 class NeutronRetriever(object):
+    """
+    Helper class to make possible handling neutron objects returned by GET
+    requests as attribute dicts.
+
+    Whet get() method is called, the returned dictionary is wrapped into
+    a corresponding DeletableResource class which provides attribute access
+    to dictionary values.
+
+    Usage:
+        This retriever is used to allow using status_timeout from
+        tempest.manager with Neutron objects.
+    """
+
     def __init__(self, network_client, path, resource):
         self.network_client = network_client
         self.path = path
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index e7e97b5..f1cd320 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -13,12 +13,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import debug
+import testtools
+
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
-from tempest.test import services
+from tempest import test
 
 CONF = config.CONF
 LOG = logging.getLogger(__name__)
@@ -83,62 +84,23 @@
                                                     public_network_id)
         self.addCleanup(self.cleanup_wrapper, self.floating_ip)
 
-    def _check_tenant_network_connectivity(self, server,
-                                           username,
-                                           private_key,
-                                           should_connect=True):
-        if not CONF.network.tenant_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            for net_name, ip_addresses in server.networks.iteritems():
-                for ip_address in ip_addresses:
-                    self._check_vm_connectivity(ip_address,
-                                                username,
-                                                private_key,
-                                                should_connect=should_connect)
-        except Exception:
-            LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers=[server])
-            debug.log_ip_ns()
-            raise
-
-    def _check_public_network_connectivity(self, floating_ip,
-                                           username,
-                                           private_key,
-                                           should_connect=True):
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            self._check_vm_connectivity(floating_ip, username, private_key,
-                                        should_connect=should_connect)
-        except Exception:
-            LOG.exception("Public network connectivity check failed")
-            debug.log_ip_ns()
-            raise
-
     def _check_network_connectivity(self, should_connect=True):
         username = CONF.compute.image_ssh_user
         private_key = self.keypair.private_key
-        self._check_tenant_network_connectivity(self.server,
-                                                username,
-                                                private_key,
-                                                should_connect=should_connect)
+        self._check_tenant_network_connectivity(
+            self.server, username, private_key, should_connect=should_connect,
+            servers_for_debug=[self.server])
         floating_ip = self.floating_ip.floating_ip_address
-        self._check_public_network_connectivity(floating_ip,
-                                                username,
-                                                private_key,
-                                                should_connect=should_connect)
+        self._check_public_network_connectivity(floating_ip, username,
+                                                private_key, should_connect,
+                                                servers=[self.server])
 
     def _wait_server_status_and_check_network_connectivity(self):
         self.status_timeout(self.compute_client.servers, self.server.id,
                             'ACTIVE')
         self._check_network_connectivity()
 
-    @services('compute', 'network')
+    @test.services('compute', 'network')
     def test_server_connectivity_stop_start(self):
         self.server.stop()
         self.status_timeout(self.compute_client.servers, self.server.id,
@@ -147,18 +109,20 @@
         self.server.start()
         self._wait_server_status_and_check_network_connectivity()
 
-    @services('compute', 'network')
+    @test.services('compute', 'network')
     def test_server_connectivity_reboot(self):
         self.server.reboot()
         self._wait_server_status_and_check_network_connectivity()
 
-    @services('compute', 'network')
+    @test.services('compute', 'network')
     def test_server_connectivity_rebuild(self):
         image_ref_alt = CONF.compute.image_ref_alt
         self.server.rebuild(image_ref_alt)
         self._wait_server_status_and_check_network_connectivity()
 
-    @services('compute', 'network')
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
+    @test.services('compute', 'network')
     def test_server_connectivity_pause_unpause(self):
         self.server.pause()
         self.status_timeout(self.compute_client.servers, self.server.id,
@@ -167,7 +131,9 @@
         self.server.unpause()
         self._wait_server_status_and_check_network_connectivity()
 
-    @services('compute', 'network')
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
+    @test.services('compute', 'network')
     def test_server_connectivity_suspend_resume(self):
         self.server.suspend()
         self.status_timeout(self.compute_client.servers, self.server.id,
@@ -176,11 +142,10 @@
         self.server.resume()
         self._wait_server_status_and_check_network_connectivity()
 
-    @services('compute', 'network')
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize is not available.')
+    @test.services('compute', 'network')
     def test_server_connectivity_resize(self):
-        if not CONF.compute_feature_enabled.resize:
-            msg = "Skipping test - resize not available on this host"
-            raise self.skipException(msg)
         resize_flavor = CONF.compute.flavor_ref_alt
         if resize_flavor == CONF.compute.flavor_ref:
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index d5ab3d3..c84d4b9 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -87,6 +87,8 @@
 
     @classmethod
     def setUpClass(cls):
+        # Create no network resources for these tests.
+        cls.set_network_resources()
         super(TestNetworkBasicOps, cls).setUpClass()
         for ext in ['router', 'security-group']:
             if not test.is_extension_enabled(ext, 'network'):
@@ -156,24 +158,13 @@
         return dict(server=server, keypair=keypair)
 
     def _check_tenant_network_connectivity(self):
-        if not CONF.network.tenant_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
         ssh_login = CONF.compute.image_ssh_user
-        try:
-            for server, key in self.servers.iteritems():
-                for net_name, ip_addresses in server.networks.iteritems():
-                    for ip_address in ip_addresses:
-                        self._check_vm_connectivity(ip_address, ssh_login,
-                                                    key.private_key)
-        except Exception:
-            LOG.exception('Tenant connectivity check failed')
-            self._log_console_output(servers=self.servers.keys())
-            debug.log_net_debug()
-            raise
+        for server, key in self.servers.iteritems():
+            # call the common method in the parent class
+            super(TestNetworkBasicOps, self).\
+                _check_tenant_network_connectivity(
+                    server, ssh_login, key.private_key,
+                    servers_for_debug=self.servers.keys())
 
     def _create_and_associate_floating_ips(self):
         public_network_id = CONF.network.public_network_id
@@ -184,28 +175,16 @@
 
     def _check_public_network_connectivity(self, should_connect=True,
                                            msg=None):
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
         ssh_login = CONF.compute.image_ssh_user
-        LOG.debug('checking network connections')
         floating_ip, server = self.floating_ip_tuple
         ip_address = floating_ip.floating_ip_address
         private_key = None
         if should_connect:
             private_key = self.servers[server].private_key
-        try:
-            self._check_vm_connectivity(ip_address,
-                                        ssh_login,
-                                        private_key,
-                                        should_connect=should_connect)
-        except Exception:
-            ex_msg = 'Public network connectivity check failed'
-            if msg:
-                ex_msg += ": " + msg
-            LOG.exception(ex_msg)
-            self._log_console_output(servers=self.servers.keys())
-            debug.log_net_debug()
-            raise
+        # call the common method in the parent class
+        super(TestNetworkBasicOps, self)._check_public_network_connectivity(
+            ip_address, ssh_login, private_key, should_connect, msg,
+            self.servers.keys())
 
     def _disassociate_floating_ips(self):
         floating_ip, server = self.floating_ip_tuple
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index b9ee040..b4e509a 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -98,17 +98,10 @@
             access point
         """
 
-        def __init__(self, tenant_id, tenant_user, tenant_pass, tenant_name):
-            self.manager = clients.OfficialClientManager(
-                tenant_user,
-                tenant_pass,
-                tenant_name
-            )
-            self.keypair = None
-            self.tenant_id = tenant_id
-            self.tenant_name = tenant_name
-            self.tenant_user = tenant_user
-            self.tenant_pass = tenant_pass
+        def __init__(self, credentials):
+            self.manager = clients.OfficialClientManager(credentials)
+            # Credentials from manager are filled with both names and IDs
+            self.creds = self.manager.credentials
             self.network = None
             self.subnet = None
             self.router = None
@@ -121,12 +114,14 @@
             self.router = router
 
         def _get_tenant_credentials(self):
-            return self.tenant_user, self.tenant_pass, self.tenant_name
+            # FIXME(andreaf) Unused method
+            return self.creds
 
     @classmethod
     def check_preconditions(cls):
         super(TestSecurityGroupsBasicOps, cls).check_preconditions()
-        if (cls.alt_tenant_id is None) or (cls.tenant_id is cls.alt_tenant_id):
+        if (cls.alt_creds is None) or \
+                (cls.tenant_id is cls.alt_creds.tenant_id):
             msg = 'No alt_tenant defined'
             cls.enabled = False
             raise cls.skipException(msg)
@@ -139,22 +134,23 @@
 
     @classmethod
     def setUpClass(cls):
+        # Create no network resources for these tests.
+        cls.set_network_resources()
         super(TestSecurityGroupsBasicOps, cls).setUpClass()
-        alt_creds = cls.alt_credentials()
-        cls.alt_tenant_id = cls.manager._get_identity_client(
-            *alt_creds
-        ).tenant_id
+        cls.alt_creds = cls.alt_credentials()
+        cls.alt_manager = clients.OfficialClientManager(cls.alt_creds)
+        # Credentials from the manager are filled with both IDs and Names
+        cls.alt_creds = cls.alt_manager.credentials
         cls.check_preconditions()
         # TODO(mnewby) Consider looking up entities as needed instead
         # of storing them as collections on the class.
         cls.floating_ips = {}
         cls.tenants = {}
-        cls.primary_tenant = cls.TenantProperties(cls.tenant_id,
-                                                  *cls.credentials())
-        cls.alt_tenant = cls.TenantProperties(cls.alt_tenant_id,
-                                              *alt_creds)
+        creds = cls.credentials()
+        cls.primary_tenant = cls.TenantProperties(creds)
+        cls.alt_tenant = cls.TenantProperties(cls.alt_creds)
         for tenant in [cls.primary_tenant, cls.alt_tenant]:
-            cls.tenants[tenant.tenant_id] = tenant
+            cls.tenants[tenant.creds.tenant_id] = tenant
         cls.floating_ip_access = not CONF.network.public_router_id
 
     def cleanup_wrapper(self, resource):
@@ -175,14 +171,14 @@
     def _create_tenant_security_groups(self, tenant):
         access_sg = self._create_empty_security_group(
             namestart='secgroup_access-',
-            tenant_id=tenant.tenant_id
+            tenant_id=tenant.creds.tenant_id
         )
         self.addCleanup(self.cleanup_wrapper, access_sg)
 
         # don't use default secgroup since it allows in-tenant traffic
         def_sg = self._create_empty_security_group(
             namestart='secgroup_general-',
-            tenant_id=tenant.tenant_id
+            tenant_id=tenant.creds.tenant_id
         )
         self.addCleanup(self.cleanup_wrapper, def_sg)
         tenant.security_groups.update(access=access_sg, default=def_sg)
@@ -239,7 +235,7 @@
             ],
             'key_name': tenant.keypair.name,
             'security_groups': security_groups,
-            'tenant_id': tenant.tenant_id
+            'tenant_id': tenant.creds.tenant_id
         }
         server = self.create_server(name=name, create_kwargs=create_kwargs)
         self.addCleanup(self.cleanup_wrapper, server)
@@ -248,7 +244,7 @@
     def _create_tenant_servers(self, tenant, num=1):
         for i in range(num):
             name = 'server-{tenant}-gen-{num}-'.format(
-                   tenant=tenant.tenant_name,
+                   tenant=tenant.creds.tenant_name,
                    num=i
             )
             name = data_utils.rand_name(name)
@@ -262,8 +258,8 @@
         workaround ip namespace
         """
         secgroups = [sg.name for sg in tenant.security_groups.values()]
-        name = 'server-{tenant}-access_point-'.format(tenant=tenant.tenant_name
-                                                      )
+        name = 'server-{tenant}-access_point-'.format(
+            tenant=tenant.creds.tenant_name)
         name = data_utils.rand_name(name)
         server = self._create_server(name, tenant,
                                      security_groups=secgroups)
@@ -277,7 +273,7 @@
         self.floating_ips.setdefault(server, floating_ip)
 
     def _create_tenant_network(self, tenant):
-        network, subnet, router = self._create_networks(tenant.tenant_id)
+        network, subnet, router = self._create_networks(tenant.creds.tenant_id)
         for r in [network, router, subnet]:
             self.addCleanup(self.cleanup_wrapper, r)
         tenant.set_network(network, subnet, router)
@@ -300,7 +296,7 @@
             tenant_id = tenant_or_id
         else:
             tenant = tenant_or_id
-            tenant_id = tenant.tenant_id
+            tenant_id = tenant.creds.tenant_id
         self._set_compute_context(tenant)
         self._create_tenant_keypairs(tenant_id)
         self._create_tenant_network(tenant)
@@ -335,8 +331,6 @@
         if should_succeed:
             msg = "Timed out waiting for %s to become reachable" % ip
         else:
-            # todo(yfried): remove this line when bug 1252620 is fixed
-            return True
             msg = "%s is reachable" % ip
         try:
             self.assertTrue(self._check_remote_connectivity(access_point, ip,
@@ -422,11 +416,15 @@
         access_point_ssh = self._connect_to_access_point(tenant)
         mac_addr = access_point_ssh.get_mac_address()
         mac_addr = mac_addr.strip().lower()
-        port_list = self.network_client.list_ports()['ports']
+        # Get the fixed_ips and mac_address fields of all ports. Select
+        # only those two columns to reduce the size of the response.
+        port_list = self.network_client.list_ports(
+            fields=['fixed_ips', 'mac_address'])['ports']
         port_detail_list = [
             (port['fixed_ips'][0]['subnet_id'],
              port['fixed_ips'][0]['ip_address'],
-             port['mac_address'].lower()) for port in port_list
+             port['mac_address'].lower())
+            for port in port_list if port['fixed_ips']
         ]
         server_ip = self._get_server_ip(tenant.access_point)
         subnet_id = tenant.subnet.id
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index c0eb6e7..5a1dc04 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
@@ -37,16 +39,12 @@
         cls.set_network_resources()
         super(TestServerAdvancedOps, cls).setUpClass()
 
-        if not CONF.compute_feature_enabled.resize:
-            msg = "Skipping test - resize not available on this host"
-            raise cls.skipException(msg)
-
-        resize_flavor = CONF.compute.flavor_ref_alt
-
-        if resize_flavor == CONF.compute.flavor_ref:
+        if CONF.compute.flavor_ref_alt == CONF.compute.flavor_ref:
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
             raise cls.skipException(msg)
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize is not available.')
     @test.services('compute')
     def test_resize_server_confirm(self):
         # We create an instance for use in this test
@@ -65,6 +63,8 @@
         self.status_timeout(
             self.compute_client.servers, instance_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+                          'Suspend is not available.')
     @test.services('compute')
     def test_server_sequence_suspend_resume(self):
         # We create an instance for use in this test
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d369f12..13e00a5 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -14,19 +14,17 @@
 #    under the License.
 
 from tempest.common.utils import data_utils
-from tempest.common.utils import test_utils
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
+from tempest.scenario import utils as test_utils
 from tempest import test
 
-import testscenarios
-
 CONF = config.CONF
 
 LOG = logging.getLogger(__name__)
 
-load_tests = testscenarios.load_tests_apply_scenarios
+load_tests = test_utils.load_tests_input_scenario_utils
 
 
 class TestServerBasicOps(manager.OfficialClientTest):
@@ -43,13 +41,6 @@
      * Terminate the instance
     """
 
-    scenario_utils = test_utils.InputScenarioUtils()
-    scenario_flavor = scenario_utils.scenario_flavors
-    scenario_image = scenario_utils.scenario_images
-
-    scenarios = testscenarios.multiply_scenarios(scenario_image,
-                                                 scenario_flavor)
-
     def setUp(self):
         super(TestServerBasicOps, self).setUp()
         # Setup image and flavor the test instance
@@ -99,42 +90,6 @@
                                       create_kwargs=create_kwargs)
         self.set_resource('instance', instance)
 
-    def pause_server(self):
-        instance = self.get_resource('instance')
-        instance_id = instance.id
-        LOG.debug("Pausing instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.pause()
-        self.status_timeout(
-            self.compute_client.servers, instance_id, 'PAUSED')
-
-    def unpause_server(self):
-        instance = self.get_resource('instance')
-        instance_id = instance.id
-        LOG.debug("Unpausing instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.unpause()
-        self.status_timeout(
-            self.compute_client.servers, instance_id, 'ACTIVE')
-
-    def suspend_server(self):
-        instance = self.get_resource('instance')
-        instance_id = instance.id
-        LOG.debug("Suspending instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.suspend()
-        self.status_timeout(self.compute_client.servers,
-                            instance_id, 'SUSPENDED')
-
-    def resume_server(self):
-        instance = self.get_resource('instance')
-        instance_id = instance.id
-        LOG.debug("Resuming instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.resume()
-        self.status_timeout(
-            self.compute_client.servers, instance_id, 'ACTIVE')
-
     def terminate_instance(self):
         instance = self.get_resource('instance')
         instance.delete()
@@ -149,10 +104,11 @@
             instance.add_floating_ip(floating_ip)
             # Check ssh
             try:
-                self.get_remote_client(
+                linux_client = self.get_remote_client(
                     server_or_ip=floating_ip.ip,
                     username=self.image_utils.ssh_user(self.image_ref),
-                    private_key=self.keypair.private)
+                    private_key=self.keypair.private_key)
+                linux_client.validate_authentication()
             except Exception:
                 LOG.exception('ssh to server failed')
                 self._log_console_output()
@@ -163,9 +119,5 @@
         self.add_keypair()
         self.create_security_group()
         self.boot_instance()
-        self.pause_server()
-        self.unpause_server()
-        self.suspend_server()
-        self.resume_server()
         self.verify_ssh()
         self.terminate_instance()
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 128ec17..5235871 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -50,6 +50,13 @@
     14. Check the existence of a file which created at 6. in volume2
     """
 
+    @classmethod
+    def setUpClass(cls):
+        super(TestStampPattern, cls).setUpClass()
+
+        if not CONF.volume_feature_enabled.snapshot:
+            raise cls.skipException("Cinder volume snapshots are disabled")
+
     def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
         self.status_timeout(self.volume_client.volume_snapshots,
                             volume_snapshot.id, status)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index e89ea70..faca31f 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -35,6 +35,12 @@
      * Boot an additional instance from the new snapshot based volume
      * Check written content in the instance booted from snapshot
     """
+    @classmethod
+    def setUpClass(cls):
+        super(TestVolumeBootPattern, cls).setUpClass()
+
+        if not CONF.volume_feature_enabled.snapshot:
+            raise cls.skipException("Cinder volume snapshots are disabled")
 
     def _create_volume_from_image(self):
         img_uuid = CONF.compute.image_ref
diff --git a/tempest/common/utils/test_utils.py b/tempest/scenario/utils.py
similarity index 80%
rename from tempest/common/utils/test_utils.py
rename to tempest/scenario/utils.py
index cc0d831..e2adb34 100644
--- a/tempest/common/utils/test_utils.py
+++ b/tempest/scenario/utils.py
@@ -12,15 +12,20 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest import clients
-from tempest.common.utils import misc
-from tempest import config
 
 import json
 import re
 import string
 import unicodedata
 
+import testscenarios
+import testtools
+
+from tempest import auth
+from tempest import clients
+from tempest.common.utils import misc
+from tempest import config
+
 CONF = config.CONF
 
 
@@ -35,9 +40,8 @@
         self.non_ssh_image_pattern = \
             CONF.input_scenario.non_ssh_image_regex
         # Setup clients
-        ocm = clients.OfficialClientManager(CONF.identity.username,
-                                            CONF.identity.password,
-                                            CONF.identity.tenant_name)
+        ocm = clients.OfficialClientManager(
+            auth.get_default_credentials('user'))
         self.client = ocm.compute_client
 
     def ssh_user(self, image_id):
@@ -79,7 +83,7 @@
 
     class TestInputScenario(manager.OfficialClientTest):
 
-        scenario_utils = test_utils.InputScenarioUtils()
+        scenario_utils = utils.InputScenarioUtils()
         scenario_flavor = scenario_utils.scenario_flavors
         scenario_image = scenario_utils.scenario_images
         scenarios = testscenarios.multiply_scenarios(scenario_image,
@@ -95,9 +99,8 @@
                                             digit=string.digits)
 
     def __init__(self):
-        ocm = clients.OfficialClientManager(CONF.identity.username,
-                                            CONF.identity.password,
-                                            CONF.identity.tenant_name)
+        ocm = clients.OfficialClientManager(
+            auth.get_default_credentials('user', fill_in=False))
         self.client = ocm.compute_client
         self.image_pattern = CONF.input_scenario.image_regex
         self.flavor_pattern = CONF.input_scenario.flavor_regex
@@ -134,3 +137,22 @@
                 for f in flavors if re.search(self.flavor_pattern, str(f.name))
             ]
         return self._scenario_flavors
+
+
+def load_tests_input_scenario_utils(*args):
+    """
+    Wrapper for testscenarios to set the scenarios to avoid running a getattr
+    on the CONF object at import.
+    """
+    if getattr(args[0], 'suiteClass', None) is not None:
+        loader, standard_tests, pattern = args
+    else:
+        standard_tests, module, loader = args
+    scenario_utils = InputScenarioUtils()
+    scenario_flavor = scenario_utils.scenario_flavors
+    scenario_image = scenario_utils.scenario_images
+    for test in testtools.iterate_tests(standard_tests):
+        setattr(test, 'scenarios', testscenarios.multiply_scenarios(
+            scenario_image,
+            scenario_flavor))
+    return testscenarios.load_tests_apply_scenarios(*args)
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index 5f6b513..321b08b 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -12,6 +12,7 @@
 
 import functools
 import json
+import urllib
 
 import six
 
@@ -103,16 +104,19 @@
 
         return patch
 
-    def _list_request(self, resource, permanent=False):
+    def _list_request(self, resource, permanent=False, **kwargs):
         """
         Get the list of objects of the specified type.
 
         :param resource: The name of the REST resource, e.g., 'nodes'.
+        "param **kw: Parameters for the request.
         :return: A tuple with the server response and deserialized JSON list
                  of objects
 
         """
         uri = self._get_uri(resource, permanent=permanent)
+        if kwargs:
+            uri += "?%s" % urllib.urlencode(kwargs)
 
         resp, body = self.get(uri)
 
@@ -195,3 +199,14 @@
 
         """
         return self._list_request(version, permanent=True)
+
+    def _put_request(self, resource, put_object):
+        """
+        Update specified object with JSON-patch.
+
+        """
+        uri = self._get_uri(resource)
+        put_body = json.dumps(put_object)
+
+        resp, body = self.put(uri, body=put_body)
+        return resp, body
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 3f4c509..52479b5 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -37,9 +37,24 @@
         return self._list_request('chassis')
 
     @base.handle_errors
-    def list_ports(self):
+    def list_ports(self, **kwargs):
         """List all existing ports."""
-        return self._list_request('ports')
+        return self._list_request('ports', **kwargs)
+
+    @base.handle_errors
+    def list_nodestates(self, uuid):
+        """List all existing states."""
+        return self._list_request('/nodes/%s/states' % uuid)
+
+    @base.handle_errors
+    def list_ports_detail(self):
+        """Details list all existing ports."""
+        return self._list_request('/ports/detail')
+
+    @base.handle_errors
+    def list_drivers(self):
+        """List all existing drivers."""
+        return self._list_request('drivers')
 
     @base.handle_errors
     def show_node(self, uuid):
@@ -116,12 +131,20 @@
         Create a port with the specified parameters.
 
         :param node_id: The ID of the node which owns the port.
-        :param address: MAC address of the port. Default: 01:23:45:67:89:0A.
+        :param address: MAC address of the port.
+        :param extra: Meta data of the port. Default: {'foo': 'bar'}.
+        :param uuid: UUID of the port.
         :return: A tuple with the server response and the created port.
 
         """
-        port = {'address': kwargs.get('address', '01:23:45:67:89:0A'),
-                'node_uuid': node_id}
+        port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
+                'uuid': kwargs['uuid']}
+
+        if node_id is not None:
+            port['node_uuid'] = node_id
+
+        if kwargs['address'] is not None:
+            port['address'] = kwargs['address']
 
         return self._create_request('ports', 'port', port)
 
@@ -192,15 +215,27 @@
         return self._patch_request('chassis', uuid, patch)
 
     @base.handle_errors
-    def update_port(self, uuid, **kwargs):
+    def update_port(self, uuid, patch):
         """
         Update the specified port.
 
         :param uuid: The unique identifier of the port.
+        :param patch: List of dicts representing json patches.
         :return: A tuple with the server response and the updated port.
 
         """
-        port_attributes = ('address',)
-        patch = self._make_patch(port_attributes, **kwargs)
 
         return self._patch_request('ports', uuid, patch)
+
+    @base.handle_errors
+    def set_node_power_state(self, node_uuid, state):
+        """
+        Set power state of the specified node.
+
+        :param node_uuid: The unique identifier of the node.
+        :state: desired state to set (on/off/reboot).
+
+        """
+        target = {'target': state}
+        return self._put_request('nodes/%s/states/power' % node_uuid,
+                                 target)
diff --git a/tempest/services/compute/json/agents_client.py b/tempest/services/compute/json/agents_client.py
new file mode 100644
index 0000000..98d8896
--- /dev/null
+++ b/tempest/services/compute/json/agents_client.py
@@ -0,0 +1,61 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import agents as common_schema
+from tempest.api_schema.compute.v2 import agents as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class AgentsClientJSON(rest_client.RestClient):
+    """
+    Tests Agents API
+    """
+
+    def __init__(self, auth_provider):
+        super(AgentsClientJSON, self).__init__(auth_provider)
+        self.service = CONF.compute.catalog_type
+
+    def list_agents(self, params=None):
+        """List all agent builds."""
+        url = 'os-agents'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(common_schema.list_agents, resp, body)
+        return resp, body['agents']
+
+    def create_agent(self, **kwargs):
+        """Create an agent build."""
+        post_body = json.dumps({'agent': kwargs})
+        resp, body = self.post('os-agents', post_body)
+        return resp, self._parse_resp(body)
+
+    def delete_agent(self, agent_id):
+        """Delete an existing agent build."""
+        resp, body = self.delete("os-agents/%s" % str(agent_id))
+        self.validate_response(schema.delete_agent, resp, body)
+        return resp, body
+
+    def update_agent(self, agent_id, **kwargs):
+        """Update an agent build."""
+        put_body = json.dumps({'para': kwargs})
+        resp, body = self.put('os-agents/%s' % str(agent_id), put_body)
+        return resp, self._parse_resp(body)
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index ccb85c4..71d6f63 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -16,6 +16,7 @@
 import json
 
 from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v2 import aggregates as v2_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -40,6 +41,7 @@
         """Get details of the given aggregate."""
         resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
         body = json.loads(body)
+        self.validate_response(schema.get_aggregate, resp, body)
         return resp, body['aggregate']
 
     def create_aggregate(self, **kwargs):
@@ -48,6 +50,7 @@
         resp, body = self.post('os-aggregates', post_body)
 
         body = json.loads(body)
+        self.validate_response(v2_schema.create_aggregate, resp, body)
         return resp, body['aggregate']
 
     def update_aggregate(self, aggregate_id, name, availability_zone=None):
@@ -60,11 +63,14 @@
         resp, body = self.put('os-aggregates/%s' % str(aggregate_id), put_body)
 
         body = json.loads(body)
+        self.validate_response(schema.update_aggregate, resp, body)
         return resp, body['aggregate']
 
     def delete_aggregate(self, aggregate_id):
         """Deletes the given aggregate."""
-        return self.delete("os-aggregates/%s" % str(aggregate_id))
+        resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+        self.validate_response(v2_schema.delete_aggregate, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         try:
@@ -82,6 +88,7 @@
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema.aggregate_add_remove_host, resp, body)
         return resp, body['aggregate']
 
     def remove_host(self, aggregate_id, host):
@@ -93,6 +100,7 @@
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema.aggregate_add_remove_host, resp, body)
         return resp, body['aggregate']
 
     def set_metadata(self, aggregate_id, meta):
@@ -104,4 +112,5 @@
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema.aggregate_set_metadata, resp, body)
         return resp, body['aggregate']
diff --git a/tempest/services/compute/json/availability_zone_client.py b/tempest/services/compute/json/availability_zone_client.py
index 9278d5b..1c067e8 100644
--- a/tempest/services/compute/json/availability_zone_client.py
+++ b/tempest/services/compute/json/availability_zone_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute.v2 import availability_zone as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,9 +32,12 @@
     def get_availability_zone_list(self):
         resp, body = self.get('os-availability-zone')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list, resp, body)
         return resp, body['availabilityZoneInfo']
 
     def get_availability_zone_list_detail(self):
         resp, body = self.get('os-availability-zone/detail')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list_detail, resp,
+                               body)
         return resp, body['availabilityZoneInfo']
diff --git a/tempest/services/compute/json/certificates_client.py b/tempest/services/compute/json/certificates_client.py
index c05e352..1d04628 100644
--- a/tempest/services/compute/json/certificates_client.py
+++ b/tempest/services/compute/json/certificates_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v2 import certificates as v2schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,6 +33,7 @@
         url = "os-certificates/%s" % (id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_certificate, resp, body)
         return resp, body['certificate']
 
     def create_certificate(self):
@@ -38,4 +41,5 @@
         url = "os-certificates"
         resp, body = self.post(url, None)
         body = json.loads(body)
+        self.validate_response(v2schema.create_certificate, resp, body)
         return resp, body['certificate']
diff --git a/tempest/services/compute/json/extensions_client.py b/tempest/services/compute/json/extensions_client.py
index 5ad8b98..ed2b14d 100644
--- a/tempest/services/compute/json/extensions_client.py
+++ b/tempest/services/compute/json/extensions_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute.v2 import extensions as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,6 +32,7 @@
         url = 'extensions'
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_extensions, resp, body)
         return resp, body['extensions']
 
     def is_enabled(self, extension):
diff --git a/tempest/services/compute/json/fixed_ips_client.py b/tempest/services/compute/json/fixed_ips_client.py
index 5fdd564..f2d5cbe 100644
--- a/tempest/services/compute/json/fixed_ips_client.py
+++ b/tempest/services/compute/json/fixed_ips_client.py
@@ -39,4 +39,5 @@
         """This reserves and unreserves fixed ips."""
         url = "os-fixed-ips/%s/action" % (ip)
         resp, body = self.post(url, json.dumps(body))
+        self.validate_response(schema.fixed_ip_action, resp, body)
         return resp, body
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index bc64117..89cbe1d 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -16,7 +16,11 @@
 import json
 import urllib
 
+from tempest.api_schema.compute import flavors as common_schema
 from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute import flavors_extra_specs \
+    as schema_extra_specs
+from tempest.api_schema.compute.v2 import flavors as v2schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -36,6 +40,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(common_schema.list_flavors, resp, body)
         return resp, body['flavors']
 
     def list_flavors_with_detail(self, params=None):
@@ -45,11 +50,13 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(v2schema.list_flavors_details, resp, body)
         return resp, body['flavors']
 
     def get_flavor_details(self, flavor_id):
         resp, body = self.get("flavors/%s" % str(flavor_id))
         body = json.loads(body)
+        self.validate_response(v2schema.create_get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -73,11 +80,14 @@
         resp, body = self.post('flavors', post_body)
 
         body = json.loads(body)
+        self.validate_response(v2schema.create_get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def delete_flavor(self, flavor_id):
         """Deletes the given flavor."""
-        return self.delete("flavors/%s" % str(flavor_id))
+        resp, body = self.delete("flavors/{0}".format(flavor_id))
+        self.validate_response(v2schema.delete_flavor, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         # Did not use get_flavor_details(id) for verification as it gives
@@ -95,12 +105,16 @@
         resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec(self, flavor_id):
         """Gets extra Specs details of the mentioned flavor."""
         resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -108,6 +122,8 @@
         resp, body = self.get('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
                               key))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
@@ -115,18 +131,23 @@
         resp, body = self.put('flavors/%s/os-extra_specs/%s' %
                               (flavor_id, key), json.dumps(kwargs))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def unset_flavor_extra_spec(self, flavor_id, key):
         """Unsets extra Specs from the mentioned flavor."""
-        return self.delete('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
-                           key))
+        resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
+                                 (str(flavor_id), key))
+        self.validate_response(v2schema.unset_flavor_extra_specs, resp, body)
+        return resp, body
 
     def list_flavor_access(self, flavor_id):
         """Gets flavor access information given the flavor id."""
         resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
         body = json.loads(body)
-        self.validate_response(schema_access.list_flavor_access, resp, body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
 
     def add_flavor_access(self, flavor_id, tenant_id):
@@ -139,6 +160,8 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
         body = json.loads(body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
 
     def remove_flavor_access(self, flavor_id, tenant_id):
@@ -151,4 +174,6 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
         body = json.loads(body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
diff --git a/tempest/services/compute/json/hosts_client.py b/tempest/services/compute/json/hosts_client.py
index 0130f27..342f946 100644
--- a/tempest/services/compute/json/hosts_client.py
+++ b/tempest/services/compute/json/hosts_client.py
@@ -16,6 +16,7 @@
 import urllib
 
 from tempest.api_schema.compute import hosts as schema
+from tempest.api_schema.compute.v2 import hosts as v2_schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -45,6 +46,7 @@
 
         resp, body = self.get("os-hosts/%s" % str(hostname))
         body = json.loads(body)
+        self.validate_response(schema.show_host_detail, resp, body)
         return resp, body['host']
 
     def update_host(self, hostname, **kwargs):
@@ -59,6 +61,7 @@
 
         resp, body = self.put("os-hosts/%s" % str(hostname), request_body)
         body = json.loads(body)
+        self.validate_response(v2_schema.update_host, resp, body)
         return resp, body
 
     def startup_host(self, hostname):
@@ -66,6 +69,7 @@
 
         resp, body = self.get("os-hosts/%s/startup" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v2_schema.startup_host, resp, body)
         return resp, body['host']
 
     def shutdown_host(self, hostname):
@@ -73,6 +77,7 @@
 
         resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v2_schema.shutdown_host, resp, body)
         return resp, body['host']
 
     def reboot_host(self, hostname):
@@ -80,4 +85,5 @@
 
         resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v2_schema.reboot_host, resp, body)
         return resp, body['host']
diff --git a/tempest/services/compute/json/hypervisor_client.py b/tempest/services/compute/json/hypervisor_client.py
index c6b13b0..30228b3 100644
--- a/tempest/services/compute/json/hypervisor_client.py
+++ b/tempest/services/compute/json/hypervisor_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute import hypervisors as common_schema
+from tempest.api_schema.compute.v2 import hypervisors as v2schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,40 +33,51 @@
         """List hypervisors information."""
         resp, body = self.get('os-hypervisors')
         body = json.loads(body)
+        self.validate_response(common_schema.common_hypervisors_detail,
+                               resp, body)
         return resp, body['hypervisors']
 
     def get_hypervisor_list_details(self):
         """Show detailed hypervisors information."""
         resp, body = self.get('os-hypervisors/detail')
         body = json.loads(body)
+        self.validate_response(common_schema.common_list_hypervisors_detail,
+                               resp, body)
         return resp, body['hypervisors']
 
     def get_hypervisor_show_details(self, hyper_id):
         """Display the details of the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s' % hyper_id)
         body = json.loads(body)
+        self.validate_response(common_schema.common_show_hypervisor,
+                               resp, body)
         return resp, body['hypervisor']
 
     def get_hypervisor_servers(self, hyper_name):
         """List instances belonging to the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/servers' % hyper_name)
         body = json.loads(body)
+        self.validate_response(v2schema.hypervisors_servers, resp, body)
         return resp, body['hypervisors']
 
     def get_hypervisor_stats(self):
         """Get hypervisor statistics over all compute nodes."""
         resp, body = self.get('os-hypervisors/statistics')
         body = json.loads(body)
+        self.validate_response(common_schema.hypervisor_statistics, resp, body)
         return resp, body['hypervisor_statistics']
 
     def get_hypervisor_uptime(self, hyper_id):
         """Display the uptime of the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/uptime' % hyper_id)
         body = json.loads(body)
+        self.validate_response(common_schema.hypervisor_uptime, resp, body)
         return resp, body['hypervisor']
 
     def search_hypervisor(self, hyper_name):
         """Search specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/search' % hyper_name)
         body = json.loads(body)
+        self.validate_response(common_schema.common_hypervisors_detail,
+                               resp, body)
         return resp, body['hypervisors']
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 2f128f2..af7752a 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -48,6 +48,7 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
+        self.validate_response(schema.create_image, resp, body)
         return resp, body
 
     def list_images(self, params=None):
@@ -69,6 +70,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_images_details, resp, body)
         return resp, body['images']
 
     def get_image(self, image_id):
@@ -81,7 +83,9 @@
 
     def delete_image(self, image_id):
         """Deletes the provided image."""
-        return self.delete("images/%s" % str(image_id))
+        resp, body = self.delete("images/%s" % str(image_id))
+        self.validate_response(schema.delete, resp, body)
+        return resp, body
 
     def wait_for_image_status(self, image_id, status):
         """Waits for an image to reach a given status."""
@@ -91,6 +95,7 @@
         """Lists all metadata items for an image."""
         resp, body = self.get("images/%s/metadata" % str(image_id))
         body = json.loads(body)
+        self.validate_response(schema.image_metadata, resp, body)
         return resp, body['metadata']
 
     def set_image_metadata(self, image_id, meta):
@@ -98,6 +103,7 @@
         post_body = json.dumps({'metadata': meta})
         resp, body = self.put('images/%s/metadata' % str(image_id), post_body)
         body = json.loads(body)
+        self.validate_response(schema.image_metadata, resp, body)
         return resp, body['metadata']
 
     def update_image_metadata(self, image_id, meta):
@@ -105,12 +111,14 @@
         post_body = json.dumps({'metadata': meta})
         resp, body = self.post('images/%s/metadata' % str(image_id), post_body)
         body = json.loads(body)
+        self.validate_response(schema.image_metadata, resp, body)
         return resp, body['metadata']
 
     def get_image_metadata_item(self, image_id, key):
         """Returns the value for a specific image metadata key."""
         resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key))
         body = json.loads(body)
+        self.validate_response(schema.image_meta_item, resp, body)
         return resp, body['meta']
 
     def set_image_metadata_item(self, image_id, key, meta):
@@ -119,12 +127,14 @@
         resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
                               post_body)
         body = json.loads(body)
+        self.validate_response(schema.image_meta_item, resp, body)
         return resp, body['meta']
 
     def delete_image_metadata_item(self, image_id, key):
         """Deletes a single image metadata key/value pair."""
         resp, body = self.delete("images/%s/metadata/%s" %
                                  (str(image_id), key))
+        self.validate_response(schema.delete, resp, body)
         return resp, body
 
     def is_resource_deleted(self, id):
diff --git a/tempest/services/compute/json/instance_usage_audit_log_client.py b/tempest/services/compute/json/instance_usage_audit_log_client.py
index 1f6e988..4700ca7 100644
--- a/tempest/services/compute/json/instance_usage_audit_log_client.py
+++ b/tempest/services/compute/json/instance_usage_audit_log_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute.v2 import instance_usage_audit_logs \
+    as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -32,10 +34,13 @@
         url = 'os-instance_usage_audit_log'
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_instance_usage_audit_log,
+                               resp, body)
         return resp, body["instance_usage_audit_logs"]
 
     def get_instance_usage_audit_log(self, time_before):
         url = 'os-instance_usage_audit_log/%s' % time_before
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_instance_usage_audit_log, resp, body)
         return resp, body["instance_usage_audit_log"]
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index 9928b94..cdac8b7 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -16,6 +16,9 @@
 import json
 import time
 
+from tempest.api_schema.compute import interfaces as common_schema
+from tempest.api_schema.compute import servers as servers_schema
+from tempest.api_schema.compute.v2 import interfaces as schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -32,6 +35,7 @@
     def list_interfaces(self, server):
         resp, body = self.get('servers/%s/os-interface' % server)
         body = json.loads(body)
+        self.validate_response(schema.list_interfaces, resp, body)
         return resp, body['interfaceAttachments']
 
     def create_interface(self, server, port_id=None, network_id=None,
@@ -58,6 +62,7 @@
     def delete_interface(self, server, port_id):
         resp, body = self.delete('servers/%s/os-interface/%s' % (server,
                                                                  port_id))
+        self.validate_response(common_schema.delete_interface, resp, body)
         return resp, body
 
     def wait_for_interface_status(self, server, port_id, status):
@@ -90,6 +95,8 @@
         })
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
+        self.validate_response(servers_schema.server_actions_common_schema,
+                               resp, body)
         return resp, body
 
     def remove_fixed_ip(self, server_id, ip_address):
@@ -101,4 +108,6 @@
         })
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
+        self.validate_response(servers_schema.server_actions_common_schema,
+                               resp, body)
         return resp, body
diff --git a/tempest/services/compute/json/keypairs_client.py b/tempest/services/compute/json/keypairs_client.py
index 71f235d..be93789 100644
--- a/tempest/services/compute/json/keypairs_client.py
+++ b/tempest/services/compute/json/keypairs_client.py
@@ -53,7 +53,10 @@
         post_body = json.dumps(post_body)
         resp, body = self.post("os-keypairs", body=post_body)
         body = json.loads(body)
+        self.validate_response(schema.create_keypair, resp, body)
         return resp, body['keypair']
 
     def delete_keypair(self, key_name):
-        return self.delete("os-keypairs/%s" % str(key_name))
+        resp, body = self.delete("os-keypairs/%s" % str(key_name))
+        self.validate_response(schema.delete_keypair, resp, body)
+        return resp, body
diff --git a/tempest/services/compute/json/migrations_client.py b/tempest/services/compute/json/migrations_client.py
new file mode 100644
index 0000000..beef5d2
--- /dev/null
+++ b/tempest/services/compute/json/migrations_client.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import migrations as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class MigrationsClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(MigrationsClientJSON, self).__init__(auth_provider)
+        self.service = CONF.compute.catalog_type
+
+    def list_migrations(self, params=None):
+        """Lists all migrations."""
+
+        url = 'os-migrations'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(schema.list_migrations, resp, body)
+        return resp, body['migrations']
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 9346183..7e828d8 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -48,8 +48,8 @@
         self.validate_response(schema.quota_set, resp, body)
         return resp, body['quota_set']
 
-    def update_quota_set(self, tenant_id, force=None,
-                         injected_file_content_bytes=None,
+    def update_quota_set(self, tenant_id, user_id=None,
+                         force=None, injected_file_content_bytes=None,
                          metadata_items=None, ram=None, floating_ips=None,
                          fixed_ips=None, key_pairs=None, instances=None,
                          security_group_rules=None, injected_files=None,
@@ -101,11 +101,20 @@
             post_body['security_groups'] = security_groups
 
         post_body = json.dumps({'quota_set': post_body})
-        resp, body = self.put('os-quota-sets/%s' % str(tenant_id), post_body)
+
+        if user_id:
+            resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+                                  (str(tenant_id), str(user_id)), post_body)
+        else:
+            resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+                                  post_body)
 
         body = json.loads(body)
+        self.validate_response(schema.quota_set_update, resp, body)
         return resp, body['quota_set']
 
     def delete_quota_set(self, tenant_id):
         """Delete the tenant's quota set."""
-        return self.delete('os-quota-sets/%s' % str(tenant_id))
+        resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
+        self.validate_response(schema.delete_quota, resp, body)
+        return resp, body
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 9267be7..a86f3df 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -47,6 +47,7 @@
         url = "os-security-groups/%s" % str(security_group_id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_security_group, resp, body)
         return resp, body['security_group']
 
     def create_security_group(self, name, description):
@@ -62,6 +63,7 @@
         post_body = json.dumps({'security_group': post_body})
         resp, body = self.post('os-security-groups', post_body)
         body = json.loads(body)
+        self.validate_response(schema.get_security_group, resp, body)
         return resp, body['security_group']
 
     def update_security_group(self, security_group_id, name=None,
@@ -81,11 +83,15 @@
         resp, body = self.put('os-security-groups/%s' % str(security_group_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_security_group, resp, body)
         return resp, body['security_group']
 
     def delete_security_group(self, security_group_id):
         """Deletes the provided Security Group."""
-        return self.delete('os-security-groups/%s' % str(security_group_id))
+        resp, body = self.delete(
+            'os-security-groups/%s' % str(security_group_id))
+        self.validate_response(schema.delete_security_group, resp, body)
+        return resp, body
 
     def create_security_group_rule(self, parent_group_id, ip_proto, from_port,
                                    to_port, **kwargs):
@@ -111,11 +117,15 @@
         url = 'os-security-group-rules'
         resp, body = self.post(url, post_body)
         body = json.loads(body)
+        self.validate_response(schema.create_security_group_rule, resp, body)
         return resp, body['security_group_rule']
 
     def delete_security_group_rule(self, group_rule_id):
         """Deletes the provided Security Group rule."""
-        return self.delete('os-security-group-rules/%s' % str(group_rule_id))
+        resp, body = self.delete('os-security-group-rules/%s' %
+                                 str(group_rule_id))
+        self.validate_response(schema.delete_security_group_rule, resp, body)
+        return resp, body
 
     def list_security_group_rules(self, security_group_id):
         """List all rules for a security group."""
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index d6705db..9d3b3b6 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -18,6 +18,7 @@
 import time
 import urllib
 
+from tempest.api_schema.compute import servers as common_schema
 from tempest.api_schema.compute.v2 import servers as schema
 from tempest.common import rest_client
 from tempest.common import waiters
@@ -56,6 +57,7 @@
         max_count: Count of maximum number of instances to launch.
         disk_config: Determines if user or admin controls disk configuration.
         return_reservation_id: Enable/Disable the return of reservation id
+        block_device_mapping: Block device mapping for the server.
         """
         post_body = {
             'name': name,
@@ -68,7 +70,7 @@
                        'availability_zone', 'accessIPv4', 'accessIPv6',
                        'min_count', 'max_count', ('metadata', 'meta'),
                        ('OS-DCF:diskConfig', 'disk_config'),
-                       'return_reservation_id']:
+                       'return_reservation_id', 'block_device_mapping']:
             if isinstance(option, tuple):
                 post_param = option[0]
                 key = option[1]
@@ -125,6 +127,7 @@
         post_body = json.dumps({'server': post_body})
         resp, body = self.put("servers/%s" % str(server_id), post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_server, resp, body)
         return resp, body['server']
 
     def get_server(self, server_id):
@@ -135,7 +138,9 @@
 
     def delete_server(self, server_id):
         """Deletes the given server."""
-        return self.delete("servers/%s" % str(server_id))
+        resp, body = self.delete("servers/%s" % str(server_id))
+        self.validate_response(common_schema.delete_server, resp, body)
+        return resp, body
 
     def list_servers(self, params=None):
         """Lists all servers for a user."""
@@ -146,6 +151,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(common_schema.list_servers, resp, body)
         return resp, body
 
     def list_servers_with_detail(self, params=None):
@@ -188,6 +194,7 @@
         """Lists all addresses for a server."""
         resp, body = self.get("servers/%s/ips" % str(server_id))
         body = json.loads(body)
+        self.validate_response(schema.list_addresses, resp, body)
         return resp, body['addresses']
 
     def list_addresses_by_network(self, server_id, network_id):
@@ -195,14 +202,28 @@
         resp, body = self.get("servers/%s/ips/%s" %
                               (str(server_id), network_id))
         body = json.loads(body)
+        self.validate_response(schema.list_addresses_by_network, resp, body)
         return resp, body
 
-    def action(self, server_id, action_name, response_key, **kwargs):
+    def action(self, server_id, action_name, response_key,
+               schema=common_schema.server_actions_common_schema, **kwargs):
         post_body = json.dumps({action_name: kwargs})
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
         if response_key is not None:
-            body = json.loads(body)[response_key]
+            body = json.loads(body)
+            # Check for Schema as 'None' because if we do not have any server
+            # action schema implemented yet then they can pass 'None' to skip
+            # the validation.Once all server action has their schema
+            # implemented then, this check can be removed if every actions are
+            # supposed to validate their response.
+            # TODO(GMann): Remove the below 'if' check once all server actions
+            # schema are implemented.
+            if schema is not None:
+                self.validate_response(schema, resp, body)
+            body = body[response_key]
+        else:
+            self.validate_response(schema, resp, body)
         return resp, body
 
     def create_backup(self, server_id, backup_type, rotation, name):
@@ -221,6 +242,7 @@
         resp, body = self.get("servers/%s/os-server-password" %
                               str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.get_password, resp, body)
         return resp, body
 
     def delete_password(self, server_id):
@@ -229,8 +251,11 @@
         Note that this does not actually change the instance server
         password.
         """
-        return self.delete("servers/%s/os-server-password" %
-                           str(server_id))
+        resp, body = self.delete("servers/%s/os-server-password" %
+                                 str(server_id))
+        self.validate_response(common_schema.server_actions_delete_password,
+                               resp, body)
+        return resp, body
 
     def reboot(self, server_id, reboot_type):
         """Reboots a server."""
@@ -242,7 +267,7 @@
         if 'disk_config' in kwargs:
             kwargs['OS-DCF:diskConfig'] = kwargs['disk_config']
             del kwargs['disk_config']
-        return self.action(server_id, 'rebuild', 'server', **kwargs)
+        return self.action(server_id, 'rebuild', 'server', None, **kwargs)
 
     def resize(self, server_id, flavor_ref, **kwargs):
         """Changes the flavor of a server."""
@@ -254,19 +279,18 @@
 
     def confirm_resize(self, server_id, **kwargs):
         """Confirms the flavor change for a server."""
-        return self.action(server_id, 'confirmResize', None, **kwargs)
+        return self.action(server_id, 'confirmResize',
+                           None, schema.server_actions_confirm_resize,
+                           **kwargs)
 
     def revert_resize(self, server_id, **kwargs):
         """Reverts a server back to its original flavor."""
         return self.action(server_id, 'revertResize', None, **kwargs)
 
-    def create_image(self, server_id, name):
-        """Creates an image of the given server."""
-        return self.action(server_id, 'createImage', None, name=name)
-
     def list_server_metadata(self, server_id):
         resp, body = self.get("servers/%s/metadata" % str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.list_server_metadata, resp, body)
         return resp, body['metadata']
 
     def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -277,6 +301,7 @@
         resp, body = self.put('servers/%s/metadata' % str(server_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.set_server_metadata, resp, body)
         return resp, body['metadata']
 
     def update_server_metadata(self, server_id, meta):
@@ -284,11 +309,15 @@
         resp, body = self.post('servers/%s/metadata' % str(server_id),
                                post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.update_server_metadata,
+                               resp, body)
         return resp, body['metadata']
 
     def get_server_metadata_item(self, server_id, key):
         resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
         body = json.loads(body)
+        self.validate_response(schema.set_get_server_metadata_item,
+                               resp, body)
         return resp, body['meta']
 
     def set_server_metadata_item(self, server_id, key, meta):
@@ -296,11 +325,15 @@
         resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
                               post_body)
         body = json.loads(body)
+        self.validate_response(schema.set_get_server_metadata_item,
+                               resp, body)
         return resp, body['meta']
 
     def delete_server_metadata_item(self, server_id, key):
         resp, body = self.delete("servers/%s/metadata/%s" %
                                  (str(server_id), key))
+        self.validate_response(common_schema.delete_server_metadata_item,
+                               resp, body)
         return resp, body
 
     def stop(self, server_id, **kwargs):
@@ -319,12 +352,15 @@
         })
         resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
                                post_body)
+        body = json.loads(body)
+        self.validate_response(schema.attach_volume, resp, body)
         return resp, body
 
     def detach_volume(self, server_id, volume_id):
         """Detaches a volume from a server instance."""
         resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
                                  (server_id, volume_id))
+        self.validate_response(schema.detach_volume, resp, body)
         return resp, body
 
     def add_security_group(self, server_id, name):
@@ -347,6 +383,8 @@
         req_body = json.dumps({'os-migrateLive': migrate_params})
 
         resp, body = self.post("servers/%s/action" % str(server_id), req_body)
+        self.validate_response(common_schema.server_actions_common_schema,
+                               resp, body)
         return resp, body
 
     def migrate_server(self, server_id, **kwargs):
@@ -395,7 +433,7 @@
 
     def get_console_output(self, server_id, length):
         return self.action(server_id, 'os-getConsoleOutput', 'output',
-                           length=length)
+                           common_schema.get_console_output, length=length)
 
     def list_virtual_interfaces(self, server_id):
         """
@@ -403,11 +441,13 @@
         """
         resp, body = self.get('/'.join(['servers', server_id,
                               'os-virtual-interfaces']))
-        return resp, json.loads(body)
+        body = json.loads(body)
+        self.validate_response(schema.list_virtual_interfaces, resp, body)
+        return resp, body
 
     def rescue_server(self, server_id, **kwargs):
         """Rescue the provided server."""
-        return self.action(server_id, 'rescue', None, **kwargs)
+        return self.action(server_id, 'rescue', 'adminPass', None, **kwargs)
 
     def unrescue_server(self, server_id):
         """Unrescue the provided server."""
@@ -451,4 +491,42 @@
     def get_vnc_console(self, server_id, console_type):
         """Get URL of VNC console."""
         return self.action(server_id, "os-getVNCConsole",
-                           "console", type=console_type)
+                           "console", common_schema.get_vnc_console,
+                           type=console_type)
+
+    def create_server_group(self, name, policies):
+        """
+        Create the server group
+        name : Name of the server-group
+        policies : List of the policies - affinity/anti-affinity)
+        """
+        post_body = {
+            'name': name,
+            'policies': policies,
+        }
+
+        post_body = json.dumps({'server_group': post_body})
+        resp, body = self.post('os-server-groups', post_body)
+
+        body = json.loads(body)
+        self.validate_response(schema.create_get_server_group, resp, body)
+        return resp, body['server_group']
+
+    def delete_server_group(self, server_group_id):
+        """Delete the given server-group."""
+        resp, body = self.delete("os-server-groups/%s" % str(server_group_id))
+        self.validate_response(schema.delete_server_group, resp, body)
+        return resp, body
+
+    def list_server_groups(self):
+        """List the server-groups."""
+        resp, body = self.get("os-server-groups")
+        body = json.loads(body)
+        return resp, body['server_groups']
+
+    def get_server_group(self, server_group_id):
+        """Get the details of given server_group."""
+        resp, body = self.get("os-server-groups/%s" % str(server_group_id))
+        body = json.loads(body)
+        self.validate_response(schema.create_get_server_group, resp, body)
+        return resp, body['server_group']
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
index 0f7d4cb..d58ca6f 100644
--- a/tempest/services/compute/json/services_client.py
+++ b/tempest/services/compute/json/services_client.py
@@ -49,6 +49,7 @@
         post_body = json.dumps({'binary': binary, 'host': host_name})
         resp, body = self.put('os-services/enable', post_body)
         body = json.loads(body)
+        self.validate_response(schema.enable_service, resp, body)
         return resp, body['service']
 
     def disable_service(self, host_name, binary):
diff --git a/tempest/services/compute/json/tenant_usages_client.py b/tempest/services/compute/json/tenant_usages_client.py
index f3a67dd..f8adae7 100644
--- a/tempest/services/compute/json/tenant_usages_client.py
+++ b/tempest/services/compute/json/tenant_usages_client.py
@@ -16,6 +16,7 @@
 import json
 import urllib
 
+from tempest.api_schema.compute.v2 import tenant_usages as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -35,6 +36,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_tenant, resp, body)
         return resp, body['tenant_usages'][0]
 
     def get_tenant_usage(self, tenant_id, params=None):
@@ -44,4 +46,5 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_tenant, resp, body)
         return resp, body['tenant_usage']
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 17468eb..d1014af 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -61,7 +61,7 @@
         url = "os-volumes/%s" % str(volume_id)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.validate_response(schema.get_volume, resp, body)
+        self.validate_response(schema.create_get_volume, resp, body)
         return resp, body['volume']
 
     def create_volume(self, size, **kwargs):
@@ -81,11 +81,14 @@
         post_body = json.dumps({'volume': post_body})
         resp, body = self.post('os-volumes', post_body)
         body = json.loads(body)
+        self.validate_response(schema.create_get_volume, resp, body)
         return resp, body['volume']
 
     def delete_volume(self, volume_id):
         """Deletes the Specified Volume."""
-        return self.delete("os-volumes/%s" % str(volume_id))
+        resp, body = self.delete("os-volumes/%s" % str(volume_id))
+        self.validate_response(schema.delete_volume, resp, body)
+        return resp, body
 
     def wait_for_volume_status(self, volume_id, status):
         """Waits for a Volume to reach a given status."""
diff --git a/tempest/services/compute/v3/json/agents_client.py b/tempest/services/compute/v3/json/agents_client.py
index 6893af2..48be54c 100644
--- a/tempest/services/compute/v3/json/agents_client.py
+++ b/tempest/services/compute/v3/json/agents_client.py
@@ -15,6 +15,8 @@
 import json
 import urllib
 
+from tempest.api_schema.compute import agents as common_schema
+from tempest.api_schema.compute.v3 import agents as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -33,7 +35,9 @@
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
-        return resp, self._parse_resp(body)
+        body = json.loads(body)
+        self.validate_response(common_schema.list_agents, resp, body)
+        return resp, body['agents']
 
     def create_agent(self, **kwargs):
         """Create an agent build."""
@@ -43,7 +47,9 @@
 
     def delete_agent(self, agent_id):
         """Delete an existing agent build."""
-        return self.delete('os-agents/%s' % str(agent_id))
+        resp, body = self.delete("os-agents/%s" % str(agent_id))
+        self.validate_response(schema.delete_agent, resp, body)
+        return resp, body
 
     def update_agent(self, agent_id, **kwargs):
         """Update an agent build."""
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 7f73622..d9b7930 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -16,6 +16,7 @@
 import json
 
 from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v3 import aggregates as v3_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -40,6 +41,7 @@
         """Get details of the given aggregate."""
         resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
         body = json.loads(body)
+        self.validate_response(schema.get_aggregate, resp, body)
         return resp, body['aggregate']
 
     def create_aggregate(self, **kwargs):
@@ -48,6 +50,7 @@
         resp, body = self.post('os-aggregates', post_body)
 
         body = json.loads(body)
+        self.validate_response(v3_schema.create_aggregate, resp, body)
         return resp, body['aggregate']
 
     def update_aggregate(self, aggregate_id, name, availability_zone=None):
@@ -60,11 +63,14 @@
         resp, body = self.put('os-aggregates/%s' % str(aggregate_id), put_body)
 
         body = json.loads(body)
+        self.validate_response(schema.update_aggregate, resp, body)
         return resp, body['aggregate']
 
     def delete_aggregate(self, aggregate_id):
         """Deletes the given aggregate."""
-        return self.delete("os-aggregates/%s" % str(aggregate_id))
+        resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+        self.validate_response(v3_schema.delete_aggregate, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         try:
@@ -82,6 +88,7 @@
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(v3_schema.aggregate_add_remove_host, resp, body)
         return resp, body['aggregate']
 
     def remove_host(self, aggregate_id, host):
@@ -93,6 +100,7 @@
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(v3_schema.aggregate_add_remove_host, resp, body)
         return resp, body['aggregate']
 
     def set_metadata(self, aggregate_id, meta):
@@ -104,4 +112,5 @@
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema.aggregate_set_metadata, resp, body)
         return resp, body['aggregate']
diff --git a/tempest/services/compute/v3/json/availability_zone_client.py b/tempest/services/compute/v3/json/availability_zone_client.py
index bad2de9..bf74e68 100644
--- a/tempest/services/compute/v3/json/availability_zone_client.py
+++ b/tempest/services/compute/v3/json/availability_zone_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute.v3 import availability_zone as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,9 +32,12 @@
     def get_availability_zone_list(self):
         resp, body = self.get('os-availability-zone')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list, resp, body)
         return resp, body['availability_zone_info']
 
     def get_availability_zone_list_detail(self):
         resp, body = self.get('os-availability-zone/detail')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list_detail, resp,
+                               body)
         return resp, body['availability_zone_info']
diff --git a/tempest/services/compute/v3/json/certificates_client.py b/tempest/services/compute/v3/json/certificates_client.py
index f8beeb9..be9b3c3 100644
--- a/tempest/services/compute/v3/json/certificates_client.py
+++ b/tempest/services/compute/v3/json/certificates_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v3 import certificates as v3schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,6 +33,7 @@
         url = "os-certificates/%s" % (id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_certificate, resp, body)
         return resp, body['certificate']
 
     def create_certificate(self):
@@ -38,4 +41,5 @@
         url = "os-certificates"
         resp, body = self.post(url, None)
         body = json.loads(body)
+        self.validate_response(v3schema.create_certificate, resp, body)
         return resp, body['certificate']
diff --git a/tempest/services/compute/v3/json/extensions_client.py b/tempest/services/compute/v3/json/extensions_client.py
index 46f17a4..13292db 100644
--- a/tempest/services/compute/v3/json/extensions_client.py
+++ b/tempest/services/compute/v3/json/extensions_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute.v3 import extensions as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,6 +32,7 @@
         url = 'extensions'
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_extensions, resp, body)
         return resp, body['extensions']
 
     def is_enabled(self, extension):
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index 655e279..5afab5a 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -16,7 +16,11 @@
 import json
 import urllib
 
+from tempest.api_schema.compute import flavors as common_schema
 from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute import flavors_extra_specs \
+    as schema_extra_specs
+from tempest.api_schema.compute.v3 import flavors as v3schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -36,6 +40,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(common_schema.list_flavors, resp, body)
         return resp, body['flavors']
 
     def list_flavors_with_detail(self, params=None):
@@ -45,11 +50,13 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(v3schema.list_flavors_details, resp, body)
         return resp, body['flavors']
 
     def get_flavor_details(self, flavor_id):
         resp, body = self.get("flavors/%s" % str(flavor_id))
         body = json.loads(body)
+        self.validate_response(v3schema.get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -73,11 +80,14 @@
         resp, body = self.post('flavors', post_body)
 
         body = json.loads(body)
+        self.validate_response(v3schema.create_flavor_details, resp, body)
         return resp, body['flavor']
 
     def delete_flavor(self, flavor_id):
         """Deletes the given flavor."""
-        return self.delete("flavors/%s" % str(flavor_id))
+        resp, body = self.delete("flavors/{0}".format(flavor_id))
+        self.validate_response(v3schema.delete_flavor, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         # Did not use get_flavor_details(id) for verification as it gives
@@ -95,12 +105,15 @@
         resp, body = self.post('flavors/%s/flavor-extra-specs' % flavor_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(v3schema.set_flavor_extra_specs, resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec(self, flavor_id):
         """Gets extra Specs details of the mentioned flavor."""
         resp, body = self.get('flavors/%s/flavor-extra-specs' % flavor_id)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -108,6 +121,8 @@
         resp, body = self.get('flavors/%s/flavor-extra-specs/%s' %
                               (str(flavor_id), key))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
@@ -115,18 +130,23 @@
         resp, body = self.put('flavors/%s/flavor-extra-specs/%s' %
                               (flavor_id, key), json.dumps(kwargs))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def unset_flavor_extra_spec(self, flavor_id, key):
         """Unsets extra Specs from the mentioned flavor."""
-        return self.delete('flavors/%s/flavor-extra-specs/%s' %
-                           (str(flavor_id), key))
+        resp, body = self.delete('flavors/%s/flavor-extra-specs/%s' %
+                                 (str(flavor_id), key))
+        self.validate_response(v3schema.unset_flavor_extra_specs, resp, body)
+        return resp, body
 
     def list_flavor_access(self, flavor_id):
         """Gets flavor access information given the flavor id."""
         resp, body = self.get('flavors/%s/flavor-access' % flavor_id)
         body = json.loads(body)
-        self.validate_response(schema_access.list_flavor_access, resp, body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
 
     def add_flavor_access(self, flavor_id, tenant_id):
@@ -139,6 +159,8 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
         body = json.loads(body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
 
     def remove_flavor_access(self, flavor_id, tenant_id):
@@ -151,4 +173,6 @@
         post_body = json.dumps(post_body)
         resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
         body = json.loads(body)
+        self.validate_response(schema_access.add_remove_list_flavor_access,
+                               resp, body)
         return resp, body['flavor_access']
diff --git a/tempest/services/compute/v3/json/hosts_client.py b/tempest/services/compute/v3/json/hosts_client.py
index bcb9d36..d2eb43d 100644
--- a/tempest/services/compute/v3/json/hosts_client.py
+++ b/tempest/services/compute/v3/json/hosts_client.py
@@ -16,6 +16,7 @@
 import urllib
 
 from tempest.api_schema.compute import hosts as schema
+from tempest.api_schema.compute.v3 import hosts as v3_schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -45,6 +46,7 @@
 
         resp, body = self.get("os-hosts/%s" % str(hostname))
         body = json.loads(body)
+        self.validate_response(schema.show_host_detail, resp, body)
         return resp, body['host']
 
     def update_host(self, hostname, **kwargs):
@@ -59,6 +61,7 @@
 
         resp, body = self.put("os-hosts/%s" % str(hostname), request_body)
         body = json.loads(body)
+        self.validate_response(v3_schema.update_host, resp, body)
         return resp, body
 
     def startup_host(self, hostname):
@@ -66,6 +69,7 @@
 
         resp, body = self.get("os-hosts/%s/startup" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v3_schema.startup_host, resp, body)
         return resp, body['host']
 
     def shutdown_host(self, hostname):
@@ -73,6 +77,7 @@
 
         resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v3_schema.shutdown_host, resp, body)
         return resp, body['host']
 
     def reboot_host(self, hostname):
@@ -80,4 +85,5 @@
 
         resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
         body = json.loads(body)
+        self.validate_response(v3_schema.reboot_host, resp, body)
         return resp, body['host']
diff --git a/tempest/services/compute/v3/json/hypervisor_client.py b/tempest/services/compute/v3/json/hypervisor_client.py
index 30e391f..51468c9 100644
--- a/tempest/services/compute/v3/json/hypervisor_client.py
+++ b/tempest/services/compute/v3/json/hypervisor_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute import hypervisors as common_schema
+from tempest.api_schema.compute.v3 import hypervisors as v3schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,40 +33,49 @@
         """List hypervisors information."""
         resp, body = self.get('os-hypervisors')
         body = json.loads(body)
+        self.validate_response(common_schema.common_hypervisors_detail,
+                               resp, body)
         return resp, body['hypervisors']
 
     def get_hypervisor_list_details(self):
         """Show detailed hypervisors information."""
         resp, body = self.get('os-hypervisors/detail')
         body = json.loads(body)
+        self.validate_response(v3schema.list_hypervisors_detail, resp, body)
         return resp, body['hypervisors']
 
     def get_hypervisor_show_details(self, hyper_id):
         """Display the details of the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s' % hyper_id)
         body = json.loads(body)
+        self.validate_response(v3schema.show_hypervisor, resp, body)
         return resp, body['hypervisor']
 
     def get_hypervisor_servers(self, hyper_name):
         """List instances belonging to the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/servers' % hyper_name)
         body = json.loads(body)
+        self.validate_response(v3schema.hypervisors_servers, resp, body)
         return resp, body['hypervisor']
 
     def get_hypervisor_stats(self):
         """Get hypervisor statistics over all compute nodes."""
         resp, body = self.get('os-hypervisors/statistics')
         body = json.loads(body)
+        self.validate_response(common_schema.hypervisor_statistics, resp, body)
         return resp, body['hypervisor_statistics']
 
     def get_hypervisor_uptime(self, hyper_id):
         """Display the uptime of the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/uptime' % hyper_id)
         body = json.loads(body)
+        self.validate_response(common_schema.hypervisor_uptime, resp, body)
         return resp, body['hypervisor']
 
     def search_hypervisor(self, hyper_name):
         """Search specified hypervisor."""
         resp, body = self.get('os-hypervisors/search?query=%s' % hyper_name)
         body = json.loads(body)
+        self.validate_response(common_schema.common_hypervisors_detail,
+                               resp, body)
         return resp, body['hypervisors']
diff --git a/tempest/services/compute/v3/json/interfaces_client.py b/tempest/services/compute/v3/json/interfaces_client.py
index b45426c..e66ccaa 100644
--- a/tempest/services/compute/v3/json/interfaces_client.py
+++ b/tempest/services/compute/v3/json/interfaces_client.py
@@ -16,6 +16,9 @@
 import json
 import time
 
+from tempest.api_schema.compute import interfaces as common_schema
+from tempest.api_schema.compute import servers as servers_schema
+from tempest.api_schema.compute.v3 import interfaces as schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -32,6 +35,7 @@
     def list_interfaces(self, server):
         resp, body = self.get('servers/%s/os-attach-interfaces' % server)
         body = json.loads(body)
+        self.validate_response(schema.list_interfaces, resp, body)
         return resp, body['interface_attachments']
 
     def create_interface(self, server, port_id=None, network_id=None,
@@ -59,6 +63,7 @@
         resp, body =\
             self.delete('servers/%s/os-attach-interfaces/%s' % (server,
                                                                 port_id))
+        self.validate_response(common_schema.delete_interface, resp, body)
         return resp, body
 
     def wait_for_interface_status(self, server, port_id, status):
@@ -91,6 +96,8 @@
         })
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
+        self.validate_response(servers_schema.server_actions_common_schema,
+                               resp, body)
         return resp, body
 
     def remove_fixed_ip(self, server_id, ip_address):
@@ -102,4 +109,6 @@
         })
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
+        self.validate_response(servers_schema.server_actions_common_schema,
+                               resp, body)
         return resp, body
diff --git a/tempest/services/compute/v3/json/keypairs_client.py b/tempest/services/compute/v3/json/keypairs_client.py
index d315bc4..f090d7d 100644
--- a/tempest/services/compute/v3/json/keypairs_client.py
+++ b/tempest/services/compute/v3/json/keypairs_client.py
@@ -53,7 +53,10 @@
         post_body = json.dumps(post_body)
         resp, body = self.post("keypairs", body=post_body)
         body = json.loads(body)
+        self.validate_response(schema.create_keypair, resp, body)
         return resp, body['keypair']
 
     def delete_keypair(self, key_name):
-        return self.delete("keypairs/%s" % str(key_name))
+        resp, body = self.delete("keypairs/%s" % str(key_name))
+        self.validate_response(schema.delete_keypair, resp, body)
+        return resp, body
diff --git a/tempest/services/compute/v3/json/migration_client.py b/tempest/services/compute/v3/json/migration_client.py
new file mode 100644
index 0000000..c821567
--- /dev/null
+++ b/tempest/services/compute/v3/json/migration_client.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import migrations as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class MigrationsV3ClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(MigrationsV3ClientJSON, self).__init__(auth_provider)
+        self.service = CONF.compute.catalog_v3_type
+
+    def list_migrations(self, params=None):
+        """Lists all migrations."""
+
+        url = 'os-migrations'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(schema.list_migrations, resp, body)
+        return resp, body['migrations']
diff --git a/tempest/services/compute/v3/json/quotas_client.py b/tempest/services/compute/v3/json/quotas_client.py
index a8507c4..37a8906 100644
--- a/tempest/services/compute/v3/json/quotas_client.py
+++ b/tempest/services/compute/v3/json/quotas_client.py
@@ -45,6 +45,7 @@
         url = 'os-quota-sets/%s/detail' % str(tenant_id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.quota_set_detail, resp, body)
         return resp, body['quota_set']
 
     def get_default_quota_set(self, tenant_id):
@@ -56,7 +57,7 @@
         self.validate_response(schema.quota_set, resp, body)
         return resp, body['quota_set']
 
-    def update_quota_set(self, tenant_id, force=None,
+    def update_quota_set(self, tenant_id, user_id=None, force=None,
                          metadata_items=None, ram=None, floating_ips=None,
                          fixed_ips=None, key_pairs=None, instances=None,
                          security_group_rules=None, cores=None,
@@ -97,7 +98,13 @@
             post_body['security_groups'] = security_groups
 
         post_body = json.dumps({'quota_set': post_body})
-        resp, body = self.put('os-quota-sets/%s' % str(tenant_id), post_body)
+
+        if user_id:
+            resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+                                  (str(tenant_id), str(user_id)), post_body)
+        else:
+            resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+                                  post_body)
 
         body = json.loads(body)
         self.validate_response(schema.quota_set, resp, body)
@@ -105,4 +112,6 @@
 
     def delete_quota_set(self, tenant_id):
         """Delete the tenant's quota set."""
-        return self.delete('os-quota-sets/%s' % str(tenant_id))
+        resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
+        self.validate_response(schema.delete_quota, resp, body)
+        return resp, body
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index 6f492d0..0ccbe7f 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -19,6 +19,7 @@
 import time
 import urllib
 
+from tempest.api_schema.compute import servers as common_schema
 from tempest.api_schema.compute.v3 import servers as schema
 from tempest.common import rest_client
 from tempest.common import waiters
@@ -54,6 +55,7 @@
         max_count: Count of maximum number of instances to launch.
         disk_config: Determines if user or admin controls disk configuration.
         return_reservation_id: Enable/Disable the return of reservation id
+        block_device_mapping: Block device mapping for the server.
         """
         post_body = {
             'name': name,
@@ -74,7 +76,9 @@
                        ('metadata', 'meta'),
                        ('os-disk-config:disk_config', 'disk_config'),
                        ('os-multiple-create:return_reservation_id',
-                        'return_reservation_id')]:
+                        'return_reservation_id'),
+                       ('os-block-device-mapping:block_device_mapping',
+                        'block_device_mapping')]:
             if isinstance(option, tuple):
                 post_param = option[0]
                 key = option[1]
@@ -125,6 +129,7 @@
         post_body = json.dumps({'server': post_body})
         resp, body = self.put("servers/%s" % str(server_id), post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_server, resp, body)
         return resp, body['server']
 
     def get_server(self, server_id):
@@ -135,7 +140,9 @@
 
     def delete_server(self, server_id):
         """Deletes the given server."""
-        return self.delete("servers/%s" % str(server_id))
+        resp, body = self.delete("servers/%s" % str(server_id))
+        self.validate_response(common_schema.delete_server, resp, body)
+        return resp, body
 
     def list_servers(self, params=None):
         """Lists all servers for a user."""
@@ -146,6 +153,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(common_schema.list_servers, resp, body)
         return resp, body
 
     def list_servers_with_detail(self, params=None):
@@ -188,6 +196,7 @@
         """Lists all addresses for a server."""
         resp, body = self.get("servers/%s/ips" % str(server_id))
         body = json.loads(body)
+        self.validate_response(schema.list_addresses, resp, body)
         return resp, body['addresses']
 
     def list_addresses_by_network(self, server_id, network_id):
@@ -195,14 +204,28 @@
         resp, body = self.get("servers/%s/ips/%s" %
                               (str(server_id), network_id))
         body = json.loads(body)
+        self.validate_response(schema.list_addresses_by_network, resp, body)
         return resp, body
 
-    def action(self, server_id, action_name, response_key, **kwargs):
+    def action(self, server_id, action_name, response_key,
+               schema=common_schema.server_actions_common_schema, **kwargs):
         post_body = json.dumps({action_name: kwargs})
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
         if response_key is not None:
-            body = json.loads(body)[response_key]
+            body = json.loads(body)
+            # Check for Schema as 'None' because if we do not have any server
+            # action schema implemented yet then they can pass 'None' to skip
+            # the validation.Once all server action has their schema
+            # implemented then, this check can be removed if every actions are
+            # supposed to validate their response.
+            # TODO(GMann): Remove the below 'if' check once all server actions
+            # schema are implemented.
+            if schema is not None:
+                self.validate_response(schema, resp, body)
+            body = body[response_key]
+        else:
+            self.validate_response(schema, resp, body)
         return resp, body
 
     def create_backup(self, server_id, backup_type, rotation, name):
@@ -214,13 +237,15 @@
 
     def change_password(self, server_id, admin_password):
         """Changes the root password for the server."""
-        return self.action(server_id, 'change_password', None,
+        return self.action(server_id, 'change_password',
+                           None, schema.server_actions_change_password,
                            admin_password=admin_password)
 
     def get_password(self, server_id):
         resp, body = self.get("servers/%s/os-server-password" %
                               str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.get_password, resp, body)
         return resp, body
 
     def delete_password(self, server_id):
@@ -229,8 +254,11 @@
         Note that this does not actually change the instance server
         password.
         """
-        return self.delete("servers/%s/os-server-password" %
-                           str(server_id))
+        resp, body = self.delete("servers/%s/os-server-password" %
+                                 str(server_id))
+        self.validate_response(common_schema.server_actions_delete_password,
+                               resp, body)
+        return resp, body
 
     def reboot(self, server_id, reboot_type):
         """Reboots a server."""
@@ -242,7 +270,7 @@
         if 'disk_config' in kwargs:
             kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
             del kwargs['disk_config']
-        return self.action(server_id, 'rebuild', 'server', **kwargs)
+        return self.action(server_id, 'rebuild', 'server', None, **kwargs)
 
     def resize(self, server_id, flavor_ref, **kwargs):
         """Changes the flavor of a server."""
@@ -280,6 +308,7 @@
     def list_server_metadata(self, server_id):
         resp, body = self.get("servers/%s/metadata" % str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.list_server_metadata, resp, body)
         return resp, body['metadata']
 
     def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -290,6 +319,7 @@
         resp, body = self.put('servers/%s/metadata' % str(server_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.set_server_metadata, resp, body)
         return resp, body['metadata']
 
     def update_server_metadata(self, server_id, meta):
@@ -297,11 +327,14 @@
         resp, body = self.post('servers/%s/metadata' % str(server_id),
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_server_metadata, resp, body)
         return resp, body['metadata']
 
     def get_server_metadata_item(self, server_id, key):
         resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
         body = json.loads(body)
+        self.validate_response(schema.set_get_server_metadata_item,
+                               resp, body)
         return resp, body['metadata']
 
     def set_server_metadata_item(self, server_id, key, meta):
@@ -309,11 +342,15 @@
         resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
                               post_body)
         body = json.loads(body)
+        self.validate_response(schema.set_get_server_metadata_item,
+                               resp, body)
         return resp, body['metadata']
 
     def delete_server_metadata_item(self, server_id, key):
         resp, body = self.delete("servers/%s/metadata/%s" %
                                  (str(server_id), key))
+        self.validate_response(common_schema.delete_server_metadata_item,
+                               resp, body)
         return resp, body
 
     def stop(self, server_id, **kwargs):
@@ -324,12 +361,17 @@
 
     def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
         """Attaches a volume to a server instance."""
-        return self.action(server_id, 'attach', None, volume_id=volume_id,
-                           device=device)
+        resp, body = self.action(server_id, 'attach', None,
+                                 volume_id=volume_id, device=device)
+        self.validate_response(schema.attach_detach_volume, resp, body)
+        return resp, body
 
     def detach_volume(self, server_id, volume_id):
         """Detaches a volume from a server instance."""
-        return self.action(server_id, 'detach', None, volume_id=volume_id)
+        resp, body = self.action(server_id, 'detach', None,
+                                 volume_id=volume_id)
+        self.validate_response(schema.attach_detach_volume, resp, body)
+        return resp, body
 
     def live_migrate_server(self, server_id, dest_host, use_block_migration):
         """This should be called with administrator privileges ."""
@@ -344,6 +386,8 @@
 
         resp, body = self.post("servers/%s/action" % str(server_id),
                                req_body)
+        self.validate_response(common_schema.server_actions_common_schema,
+                               resp, body)
         return resp, body
 
     def migrate_server(self, server_id, **kwargs):
@@ -391,12 +435,16 @@
         return self.action(server_id, 'shelve_offload', None, **kwargs)
 
     def get_console_output(self, server_id, length):
+        if length is None:
+            # NOTE(mriedem): -1 means optional/unlimited in the nova v3 API.
+            length = -1
         return self.action(server_id, 'get_console_output', 'output',
-                           length=length)
+                           common_schema.get_console_output, length=length)
 
     def rescue_server(self, server_id, **kwargs):
         """Rescue the provided server."""
-        return self.action(server_id, 'rescue', None, **kwargs)
+        return self.action(server_id, 'rescue', 'admin_password',
+                           None, **kwargs)
 
     def unrescue_server(self, server_id):
         """Unrescue the provided server."""
@@ -408,19 +456,19 @@
                               str(server_id))
         return resp, json.loads(body)
 
-    def list_instance_actions(self, server_id):
+    def list_server_actions(self, server_id):
         """List the provided server action."""
-        resp, body = self.get("servers/%s/os-instance-actions" %
+        resp, body = self.get("servers/%s/os-server-actions" %
                               str(server_id))
         body = json.loads(body)
-        return resp, body['instance_actions']
+        return resp, body['server_actions']
 
-    def get_instance_action(self, server_id, request_id):
+    def get_server_action(self, server_id, request_id):
         """Returns the action details of the provided server."""
-        resp, body = self.get("servers/%s/os-instance-actions/%s" %
+        resp, body = self.get("servers/%s/os-server-actions/%s" %
                               (str(server_id), str(request_id)))
         body = json.loads(body)
-        return resp, body['instance_action']
+        return resp, body['server_action']
 
     def force_delete_server(self, server_id, **kwargs):
         """Force delete a server."""
@@ -440,6 +488,7 @@
         resp, body = self.post('servers/%s/action' % str(server_id),
                                post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.get_vnc_console, resp, body)
         return resp, body['console']
 
     def reset_network(self, server_id, **kwargs):
@@ -449,3 +498,13 @@
     def inject_network_info(self, server_id, **kwargs):
         """Inject the Network Info into server"""
         return self.action(server_id, 'inject_network_info', None, **kwargs)
+
+    def get_spice_console(self, server_id, console_type):
+        """Get URL of Spice console."""
+        return self.action(server_id, "get_spice_console"
+                           "console", None, type=console_type)
+
+    def get_rdp_console(self, server_id, console_type):
+        """Get URL of RDP console."""
+        return self.action(server_id, "get_rdp_console"
+                           "console", None, type=console_type)
diff --git a/tempest/services/compute/v3/json/services_client.py b/tempest/services/compute/v3/json/services_client.py
index 88c4d16..96ff580 100644
--- a/tempest/services/compute/v3/json/services_client.py
+++ b/tempest/services/compute/v3/json/services_client.py
@@ -54,6 +54,7 @@
         })
         resp, body = self.put('os-services/enable', post_body)
         body = json.loads(body)
+        self.validate_response(schema.enable_service, resp, body)
         return resp, body['service']
 
     def disable_service(self, host_name, binary):
diff --git a/tempest/services/compute/v3/json/version_client.py b/tempest/services/compute/v3/json/version_client.py
index b560c58..568678d 100644
--- a/tempest/services/compute/v3/json/version_client.py
+++ b/tempest/services/compute/v3/json/version_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute import version as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -30,4 +31,5 @@
     def get_version(self):
         resp, body = self.get('')
         body = json.loads(body)
+        self.validate_response(schema.version, resp, body)
         return resp, body['version']
diff --git a/tempest/services/compute/xml/aggregates_client.py b/tempest/services/compute/xml/aggregates_client.py
index 5b250ee..9c2d4aa 100644
--- a/tempest/services/compute/xml/aggregates_client.py
+++ b/tempest/services/compute/xml/aggregates_client.py
@@ -16,12 +16,9 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -34,7 +31,7 @@
         self.service = CONF.compute.catalog_type
 
     def _format_aggregate(self, g):
-        agg = xml_to_json(g)
+        agg = xml_utils.xml_to_json(g)
         aggregate = {}
         for key, value in agg.items():
             if key == 'hosts':
@@ -64,21 +61,25 @@
 
     def create_aggregate(self, name, availability_zone=None):
         """Creates a new aggregate."""
-        post_body = Element("aggregate",
-                            name=name,
-                            availability_zone=availability_zone)
+        if availability_zone is not None:
+            post_body = xml_utils.Element("aggregate", name=name,
+                                          availability_zone=availability_zone)
+        else:
+            post_body = xml_utils.Element("aggregate", name=name)
         resp, body = self.post('os-aggregates',
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         aggregate = self._format_aggregate(etree.fromstring(body))
         return resp, aggregate
 
     def update_aggregate(self, aggregate_id, name, availability_zone=None):
         """Update a aggregate."""
-        put_body = Element("aggregate",
-                           name=name,
-                           availability_zone=availability_zone)
+        if availability_zone is not None:
+            put_body = xml_utils.Element("aggregate", name=name,
+                                         availability_zone=availability_zone)
+        else:
+            put_body = xml_utils.Element("aggregate", name=name)
         resp, body = self.put('os-aggregates/%s' % str(aggregate_id),
-                              str(Document(put_body)))
+                              str(xml_utils.Document(put_body)))
         aggregate = self._format_aggregate(etree.fromstring(body))
         return resp, aggregate
 
@@ -95,30 +96,30 @@
 
     def add_host(self, aggregate_id, host):
         """Adds a host to the given aggregate."""
-        post_body = Element("add_host", host=host)
+        post_body = xml_utils.Element("add_host", host=host)
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         aggregate = self._format_aggregate(etree.fromstring(body))
         return resp, aggregate
 
     def remove_host(self, aggregate_id, host):
         """Removes a host from the given aggregate."""
-        post_body = Element("remove_host", host=host)
+        post_body = xml_utils.Element("remove_host", host=host)
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         aggregate = self._format_aggregate(etree.fromstring(body))
         return resp, aggregate
 
     def set_metadata(self, aggregate_id, meta):
         """Replaces the aggregate's existing metadata with new metadata."""
-        post_body = Element("set_metadata")
-        metadata = Element("metadata")
+        post_body = xml_utils.Element("set_metadata")
+        metadata = xml_utils.Element("metadata")
         post_body.append(metadata)
         for k, v in meta.items():
-            meta = Element(k)
-            meta.append(Text(v))
+            meta = xml_utils.Element(k)
+            meta.append(xml_utils.Text(v))
             metadata.append(meta)
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         aggregate = self._format_aggregate(etree.fromstring(body))
         return resp, aggregate
diff --git a/tempest/services/compute/xml/availability_zone_client.py b/tempest/services/compute/xml/availability_zone_client.py
index 4d71186..38446b8 100644
--- a/tempest/services/compute/xml/availability_zone_client.py
+++ b/tempest/services/compute/xml/availability_zone_client.py
@@ -16,8 +16,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -31,7 +31,7 @@
         self.service = CONF.compute.catalog_type
 
     def _parse_array(self, node):
-        return [xml_to_json(x) for x in node]
+        return [xml_utils.xml_to_json(x) for x in node]
 
     def get_availability_zone_list(self):
         resp, body = self.get('os-availability-zone')
diff --git a/tempest/services/compute/xml/extensions_client.py b/tempest/services/compute/xml/extensions_client.py
index 3e8254c..d924dff 100644
--- a/tempest/services/compute/xml/extensions_client.py
+++ b/tempest/services/compute/xml/extensions_client.py
@@ -16,8 +16,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -32,7 +32,7 @@
     def _parse_array(self, node):
         array = []
         for child in node:
-            array.append(xml_to_json(child))
+            array.append(xml_utils.xml_to_json(child))
         return array
 
     def list_extensions(self):
@@ -48,5 +48,5 @@
 
     def get_extension(self, extension_alias):
         resp, body = self.get('extensions/%s' % extension_alias)
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
diff --git a/tempest/services/compute/xml/fixed_ips_client.py b/tempest/services/compute/xml/fixed_ips_client.py
index 0475530..e14ced6 100644
--- a/tempest/services/compute/xml/fixed_ips_client.py
+++ b/tempest/services/compute/xml/fixed_ips_client.py
@@ -15,10 +15,8 @@
 
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
 
 CONF = config.CONF
 
@@ -43,7 +41,7 @@
         # accept any action key value here to permit tests to cover cases with
         # invalid actions raising badrequest.
         key, value = body.popitem()
-        xml_body = Element(key)
-        xml_body.append(Text(value))
-        resp, body = self.post(url, str(Document(xml_body)))
+        xml_body = xml_utils.Element(key)
+        xml_body.append(xml_utils.Text(value))
+        resp, body = self.post(url, str(xml_utils.Document(xml_body)))
         return resp, body
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 68a27c9..68ef323 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -18,12 +18,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
 
 CONF = config.CONF
 
@@ -76,7 +72,7 @@
         return flavor
 
     def _parse_array(self, node):
-        return [self._format_flavor(xml_to_json(x)) for x in node]
+        return [self._format_flavor(xml_utils.xml_to_json(x)) for x in node]
 
     def _list_flavors(self, url, params):
         if params:
@@ -96,19 +92,19 @@
 
     def get_flavor_details(self, flavor_id):
         resp, body = self.get("flavors/%s" % str(flavor_id))
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         flavor = self._format_flavor(body)
         return resp, flavor
 
     def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
         """Creates a new flavor or instance type."""
-        flavor = Element("flavor",
-                         xmlns=XMLNS_11,
-                         ram=ram,
-                         vcpus=vcpus,
-                         disk=disk,
-                         id=flavor_id,
-                         name=name)
+        flavor = xml_utils.Element("flavor",
+                                   xmlns=xml_utils.XMLNS_11,
+                                   ram=ram,
+                                   vcpus=vcpus,
+                                   disk=disk,
+                                   id=flavor_id,
+                                   name=name)
         if kwargs.get('rxtx'):
             flavor.add_attr('rxtx_factor', kwargs.get('rxtx'))
         if kwargs.get('swap'):
@@ -121,8 +117,8 @@
                             kwargs.get('is_public'))
         flavor.add_attr('xmlns:OS-FLV-EXT-DATA', XMLNS_OS_FLV_EXT_DATA)
         flavor.add_attr('xmlns:os-flavor-access', XMLNS_OS_FLV_ACCESS)
-        resp, body = self.post('flavors', str(Document(flavor)))
-        body = xml_to_json(etree.fromstring(body))
+        resp, body = self.post('flavors', str(xml_utils.Document(flavor)))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         flavor = self._format_flavor(body)
         return resp, flavor
 
@@ -142,18 +138,18 @@
 
     def set_flavor_extra_spec(self, flavor_id, specs):
         """Sets extra Specs to the mentioned flavor."""
-        extra_specs = Element("extra_specs")
+        extra_specs = xml_utils.Element("extra_specs")
         for key in specs.keys():
             extra_specs.add_attr(key, specs[key])
         resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
-                               str(Document(extra_specs)))
-        body = xml_to_json(etree.fromstring(body))
+                               str(xml_utils.Document(extra_specs)))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def get_flavor_extra_spec(self, flavor_id):
         """Gets extra Specs of the mentioned flavor."""
         resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -163,21 +159,21 @@
         body = {}
         element = etree.fromstring(xml_body)
         key = element.get('key')
-        body[key] = xml_to_json(element)
+        body[key] = xml_utils.xml_to_json(element)
         return resp, body
 
     def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
         """Update extra Specs details of the mentioned flavor and key."""
-        doc = Document()
+        doc = xml_utils.Document()
         for (k, v) in kwargs.items():
-            element = Element(k)
+            element = xml_utils.Element(k)
             doc.append(element)
-            value = Text(v)
+            value = xml_utils.Text(v)
             element.append(value)
 
         resp, body = self.put('flavors/%s/os-extra_specs/%s' %
                               (flavor_id, key), str(doc))
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, {key: body}
 
     def unset_flavor_extra_spec(self, flavor_id, key):
@@ -186,7 +182,7 @@
                            key))
 
     def _parse_array_access(self, node):
-        return [xml_to_json(x) for x in node]
+        return [xml_utils.xml_to_json(x) for x in node]
 
     def list_flavor_access(self, flavor_id):
         """Gets flavor access information given the flavor id."""
@@ -196,8 +192,8 @@
 
     def add_flavor_access(self, flavor_id, tenant_id):
         """Add flavor access for the specified tenant."""
-        doc = Document()
-        server = Element("addTenantAccess")
+        doc = xml_utils.Document()
+        server = xml_utils.Element("addTenantAccess")
         doc.append(server)
         server.add_attr("tenant", tenant_id)
         resp, body = self.post('flavors/%s/action' % str(flavor_id), str(doc))
@@ -206,8 +202,8 @@
 
     def remove_flavor_access(self, flavor_id, tenant_id):
         """Remove flavor access from the specified tenant."""
-        doc = Document()
-        server = Element("removeTenantAccess")
+        doc = xml_utils.Document()
+        server = xml_utils.Element("removeTenantAccess")
         doc.append(server)
         server.add_attr("tenant", tenant_id)
         resp, body = self.post('flavors/%s/action' % str(flavor_id), str(doc))
diff --git a/tempest/services/compute/xml/floating_ips_client.py b/tempest/services/compute/xml/floating_ips_client.py
index be54753..fa4aa07 100644
--- a/tempest/services/compute/xml/floating_ips_client.py
+++ b/tempest/services/compute/xml/floating_ips_client.py
@@ -17,12 +17,9 @@
 import urllib
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -37,11 +34,11 @@
     def _parse_array(self, node):
         array = []
         for child in node.getchildren():
-            array.append(xml_to_json(child))
+            array.append(xml_utils.xml_to_json(child))
         return array
 
     def _parse_floating_ip(self, body):
-        json = xml_to_json(body)
+        json = xml_utils.xml_to_json(body)
         return json
 
     def list_floating_ips(self, params=None):
@@ -67,9 +64,9 @@
         """Allocate a floating IP to the project."""
         url = 'os-floating-ips'
         if pool_name:
-            doc = Document()
-            pool = Element("pool")
-            pool.append(Text(pool_name))
+            doc = xml_utils.Document()
+            pool = xml_utils.Element("pool")
+            pool.append(xml_utils.Text(pool_name))
             doc.append(pool)
             resp, body = self.post(url, str(doc))
         else:
@@ -86,8 +83,8 @@
     def associate_floating_ip_to_server(self, floating_ip, server_id):
         """Associate the provided floating IP to a specific server."""
         url = "servers/%s/action" % str(server_id)
-        doc = Document()
-        server = Element("addFloatingIp")
+        doc = xml_utils.Document()
+        server = xml_utils.Element("addFloatingIp")
         doc.append(server)
         server.add_attr("address", floating_ip)
         resp, body = self.post(url, str(doc))
@@ -96,8 +93,8 @@
     def disassociate_floating_ip_from_server(self, floating_ip, server_id):
         """Disassociate the provided floating IP from a specific server."""
         url = "servers/%s/action" % str(server_id)
-        doc = Document()
-        server = Element("removeFloatingIp")
+        doc = xml_utils.Document()
+        server = xml_utils.Element("removeFloatingIp")
         doc.append(server)
         server.add_attr("address", floating_ip)
         resp, body = self.post(url, str(doc))
diff --git a/tempest/services/compute/xml/hosts_client.py b/tempest/services/compute/xml/hosts_client.py
index b74cd04..23a7dd6 100644
--- a/tempest/services/compute/xml/hosts_client.py
+++ b/tempest/services/compute/xml/hosts_client.py
@@ -16,10 +16,8 @@
 
 from lxml import etree
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -40,7 +38,7 @@
 
         resp, body = self.get(url)
         node = etree.fromstring(body)
-        body = [xml_to_json(x) for x in node.getchildren()]
+        body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
         return resp, body
 
     def show_host_detail(self, hostname):
@@ -48,20 +46,20 @@
 
         resp, body = self.get("os-hosts/%s" % str(hostname))
         node = etree.fromstring(body)
-        body = [xml_to_json(node)]
+        body = [xml_utils.xml_to_json(node)]
         return resp, body
 
     def update_host(self, hostname, **kwargs):
         """Update a host."""
 
-        request_body = Element("updates")
+        request_body = xml_utils.Element("updates")
         if kwargs:
             for k, v in kwargs.iteritems():
-                request_body.append(Element(k, v))
+                request_body.append(xml_utils.Element(k, v))
         resp, body = self.put("os-hosts/%s" % str(hostname),
-                              str(Document(request_body)))
+                              str(xml_utils.Document(request_body)))
         node = etree.fromstring(body)
-        body = [xml_to_json(x) for x in node.getchildren()]
+        body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
         return resp, body
 
     def startup_host(self, hostname):
@@ -69,7 +67,7 @@
 
         resp, body = self.get("os-hosts/%s/startup" % str(hostname))
         node = etree.fromstring(body)
-        body = [xml_to_json(x) for x in node.getchildren()]
+        body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
         return resp, body
 
     def shutdown_host(self, hostname):
@@ -77,7 +75,7 @@
 
         resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
         node = etree.fromstring(body)
-        body = [xml_to_json(x) for x in node.getchildren()]
+        body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
         return resp, body
 
     def reboot_host(self, hostname):
@@ -85,5 +83,5 @@
 
         resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
         node = etree.fromstring(body)
-        body = [xml_to_json(x) for x in node.getchildren()]
+        body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
         return resp, body
diff --git a/tempest/services/compute/xml/hypervisor_client.py b/tempest/services/compute/xml/hypervisor_client.py
index ecd7541..1452708 100644
--- a/tempest/services/compute/xml/hypervisor_client.py
+++ b/tempest/services/compute/xml/hypervisor_client.py
@@ -16,8 +16,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -30,7 +30,7 @@
         self.service = CONF.compute.catalog_type
 
     def _parse_array(self, node):
-        return [xml_to_json(x) for x in node]
+        return [xml_utils.xml_to_json(x) for x in node]
 
     def get_hypervisor_list(self):
         """List hypervisors information."""
@@ -47,7 +47,7 @@
     def get_hypervisor_show_details(self, hyper_id):
         """Display the details of the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s' % hyper_id)
-        hypervisor = xml_to_json(etree.fromstring(body))
+        hypervisor = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, hypervisor
 
     def get_hypervisor_servers(self, hyper_name):
@@ -59,13 +59,13 @@
     def get_hypervisor_stats(self):
         """Get hypervisor statistics over all compute nodes."""
         resp, body = self.get('os-hypervisors/statistics')
-        stats = xml_to_json(etree.fromstring(body))
+        stats = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, stats
 
     def get_hypervisor_uptime(self, hyper_id):
         """Display the uptime of the specified hypervisor."""
         resp, body = self.get('os-hypervisors/%s/uptime' % hyper_id)
-        uptime = xml_to_json(etree.fromstring(body))
+        uptime = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, uptime
 
     def search_hypervisor(self, hyper_name):
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index 9d529be..6b15404 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -19,13 +19,9 @@
 
 from tempest.common import rest_client
 from tempest.common import waiters
+from tempest.common import xml_utils
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
 
 CONF = config.CONF
 
@@ -40,24 +36,24 @@
         self.build_timeout = CONF.compute.build_timeout
 
     def _parse_server(self, node):
-        data = xml_to_json(node)
+        data = xml_utils.xml_to_json(node)
         return self._parse_links(node, data)
 
     def _parse_image(self, node):
         """Parses detailed XML image information into dictionary."""
-        data = xml_to_json(node)
+        data = xml_utils.xml_to_json(node)
 
         self._parse_links(node, data)
 
         # parse all metadata
         if 'metadata' in data:
-            tag = node.find('{%s}metadata' % XMLNS_11)
+            tag = node.find('{%s}metadata' % xml_utils.XMLNS_11)
             data['metadata'] = dict((x.get('key'), x.text)
                                     for x in tag.getchildren())
 
         # parse server information
         if 'server' in data:
-            tag = node.find('{%s}server' % XMLNS_11)
+            tag = node.find('{%s}server' % xml_utils.XMLNS_11)
             data['server'] = self._parse_server(tag)
         return data
 
@@ -67,7 +63,7 @@
         if 'link' in data:
             # remove single link element
             del data['link']
-            data['links'] = [xml_to_json(x) for x in
+            data['links'] = [xml_utils.xml_to_json(x) for x in
                              node.findall('{http://www.w3.org/2005/Atom}link')]
         return data
 
@@ -93,17 +89,17 @@
 
     def create_image(self, server_id, name, meta=None):
         """Creates an image of the original server."""
-        post_body = Element('createImage', name=name)
+        post_body = xml_utils.Element('createImage', name=name)
 
         if meta:
-            metadata = Element('metadata')
+            metadata = xml_utils.Element('metadata')
             post_body.append(metadata)
             for k, v in meta.items():
-                data = Element('meta', key=k)
-                data.append(Text(v))
+                data = xml_utils.Element('meta', key=k)
+                data.append(xml_utils.Text(v))
                 metadata.append(data)
         resp, body = self.post('servers/%s/action' % str(server_id),
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         return resp, body
 
     def list_images(self, params=None):
@@ -144,10 +140,10 @@
         waiters.wait_for_image_status(self, image_id, status)
 
     def _metadata_body(self, meta):
-        post_body = Element('metadata')
+        post_body = xml_utils.Element('metadata')
         for k, v in meta.items():
-            data = Element('meta', key=k)
-            data.append(Text(v))
+            data = xml_utils.Element('meta', key=k)
+            data.append(xml_utils.Text(v))
             post_body.append(data)
         return post_body
 
@@ -161,7 +157,7 @@
         """Sets the metadata for an image."""
         post_body = self._metadata_body(meta)
         resp, body = self.put('images/%s/metadata' % image_id,
-                              str(Document(post_body)))
+                              str(xml_utils.Document(post_body)))
         body = self._parse_key_value(etree.fromstring(body))
         return resp, body
 
@@ -169,7 +165,7 @@
         """Updates the metadata for an image."""
         post_body = self._metadata_body(meta)
         resp, body = self.post('images/%s/metadata' % str(image_id),
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         body = self._parse_key_value(etree.fromstring(body))
         return resp, body
 
@@ -183,19 +179,19 @@
     def set_image_metadata_item(self, image_id, key, meta):
         """Sets the value for a specific image metadata key."""
         for k, v in meta.items():
-            post_body = Element('meta', key=key)
-            post_body.append(Text(v))
+            post_body = xml_utils.Element('meta', key=key)
+            post_body.append(xml_utils.Text(v))
         resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
-                              str(Document(post_body)))
-        body = xml_to_json(etree.fromstring(body))
+                              str(xml_utils.Document(post_body)))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def update_image_metadata_item(self, image_id, key, meta):
         """Sets the value for a specific image metadata key."""
-        post_body = Document('meta', Text(meta), key=key)
+        post_body = xml_utils.Document('meta', xml_utils.Text(meta), key=key)
         resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
                               post_body)
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body['meta']
 
     def delete_image_metadata_item(self, image_id, key):
diff --git a/tempest/services/compute/xml/instance_usage_audit_log_client.py b/tempest/services/compute/xml/instance_usage_audit_log_client.py
index 1cd8c07..b139db1 100644
--- a/tempest/services/compute/xml/instance_usage_audit_log_client.py
+++ b/tempest/services/compute/xml/instance_usage_audit_log_client.py
@@ -16,8 +16,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -33,11 +33,13 @@
     def list_instance_usage_audit_logs(self):
         url = 'os-instance_usage_audit_log'
         resp, body = self.get(url)
-        instance_usage_audit_logs = xml_to_json(etree.fromstring(body))
+        instance_usage_audit_logs = xml_utils.xml_to_json(
+            etree.fromstring(body))
         return resp, instance_usage_audit_logs
 
     def get_instance_usage_audit_log(self, time_before):
         url = 'os-instance_usage_audit_log/%s' % time_before
         resp, body = self.get(url)
-        instance_usage_audit_log = xml_to_json(etree.fromstring(body))
+        instance_usage_audit_log = xml_utils.xml_to_json(
+            etree.fromstring(body))
         return resp, instance_usage_audit_log
diff --git a/tempest/services/compute/xml/interfaces_client.py b/tempest/services/compute/xml/interfaces_client.py
index 8d4bfcc..e30a97c 100644
--- a/tempest/services/compute/xml/interfaces_client.py
+++ b/tempest/services/compute/xml/interfaces_client.py
@@ -18,13 +18,9 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
 
 CONF = config.CONF
 
@@ -37,9 +33,9 @@
         self.service = CONF.compute.catalog_type
 
     def _process_xml_interface(self, node):
-        iface = xml_to_json(node)
+        iface = xml_utils.xml_to_json(node)
         # NOTE(danms): if multiple addresses per interface is ever required,
-        # xml_to_json will need to be fixed or replaced in this case
+        # xml_utils.xml_to_json will need to be fixed or replaced in this case
         iface['fixed_ips'] = [dict(iface['fixed_ips']['fixed_ip'].items())]
         return iface
 
@@ -52,21 +48,21 @@
 
     def create_interface(self, server, port_id=None, network_id=None,
                          fixed_ip=None):
-        doc = Document()
-        iface = Element('interfaceAttachment')
+        doc = xml_utils.Document()
+        iface = xml_utils.Element('interfaceAttachment')
         if port_id:
-            _port_id = Element('port_id')
-            _port_id.append(Text(port_id))
+            _port_id = xml_utils.Element('port_id')
+            _port_id.append(xml_utils.Text(port_id))
             iface.append(_port_id)
         if network_id:
-            _network_id = Element('net_id')
-            _network_id.append(Text(network_id))
+            _network_id = xml_utils.Element('net_id')
+            _network_id.append(xml_utils.Text(network_id))
             iface.append(_network_id)
         if fixed_ip:
-            _fixed_ips = Element('fixed_ips')
-            _fixed_ip = Element('fixed_ip')
-            _ip_address = Element('ip_address')
-            _ip_address.append(Text(fixed_ip))
+            _fixed_ips = xml_utils.Element('fixed_ips')
+            _fixed_ip = xml_utils.Element('fixed_ip')
+            _ip_address = xml_utils.Element('ip_address')
+            _ip_address.append(xml_utils.Text(fixed_ip))
             _fixed_ip.append(_ip_address)
             _fixed_ips.append(_fixed_ip)
             iface.append(_fixed_ips)
@@ -108,18 +104,18 @@
 
     def add_fixed_ip(self, server_id, network_id):
         """Add a fixed IP to input server instance."""
-        post_body = Element("addFixedIp",
-                            xmlns=XMLNS_11,
-                            networkId=network_id)
+        post_body = xml_utils.Element("addFixedIp",
+                                      xmlns=xml_utils.XMLNS_11,
+                                      networkId=network_id)
         resp, body = self.post('servers/%s/action' % str(server_id),
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         return resp, body
 
     def remove_fixed_ip(self, server_id, ip_address):
         """Remove input fixed IP from input server instance."""
-        post_body = Element("removeFixedIp",
-                            xmlns=XMLNS_11,
-                            address=ip_address)
+        post_body = xml_utils.Element("removeFixedIp",
+                                      xmlns=xml_utils.XMLNS_11,
+                                      address=ip_address)
         resp, body = self.post('servers/%s/action' % str(server_id),
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         return resp, body
diff --git a/tempest/services/compute/xml/keypairs_client.py b/tempest/services/compute/xml/keypairs_client.py
index fb498c0..8ff37ac 100644
--- a/tempest/services/compute/xml/keypairs_client.py
+++ b/tempest/services/compute/xml/keypairs_client.py
@@ -17,11 +17,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -36,34 +33,35 @@
     def list_keypairs(self):
         resp, body = self.get("os-keypairs")
         node = etree.fromstring(body)
-        body = [{'keypair': xml_to_json(x)} for x in node.getchildren()]
+        body = [{'keypair': xml_utils.xml_to_json(x)} for x in
+                node.getchildren()]
         return resp, body
 
     def get_keypair(self, key_name):
         resp, body = self.get("os-keypairs/%s" % str(key_name))
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def create_keypair(self, name, pub_key=None):
-        doc = Document()
+        doc = xml_utils.Document()
 
-        keypair_element = Element("keypair")
+        keypair_element = xml_utils.Element("keypair")
 
         if pub_key:
-            public_key_element = Element("public_key")
-            public_key_text = Text(pub_key)
+            public_key_element = xml_utils.Element("public_key")
+            public_key_text = xml_utils.Text(pub_key)
             public_key_element.append(public_key_text)
             keypair_element.append(public_key_element)
 
-        name_element = Element("name")
-        name_text = Text(name)
+        name_element = xml_utils.Element("name")
+        name_text = xml_utils.Text(name)
         name_element.append(name_text)
         keypair_element.append(name_element)
 
         doc.append(keypair_element)
 
         resp, body = self.post("os-keypairs", body=str(doc))
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def delete_keypair(self, key_name):
diff --git a/tempest/services/compute/xml/quotas_client.py b/tempest/services/compute/xml/quotas_client.py
index 911c476..5502fcc 100644
--- a/tempest/services/compute/xml/quotas_client.py
+++ b/tempest/services/compute/xml/quotas_client.py
@@ -16,11 +16,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
 
 CONF = config.CONF
 
@@ -51,7 +48,7 @@
         if user_id:
             url += '?user_id=%s' % str(user_id)
         resp, body = self.get(url)
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         body = self._format_quota(body)
         return resp, body
 
@@ -60,12 +57,12 @@
 
         url = 'os-quota-sets/%s/defaults' % str(tenant_id)
         resp, body = self.get(url)
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         body = self._format_quota(body)
         return resp, body
 
-    def update_quota_set(self, tenant_id, force=None,
-                         injected_file_content_bytes=None,
+    def update_quota_set(self, tenant_id, user_id=None,
+                         force=None, injected_file_content_bytes=None,
                          metadata_items=None, ram=None, floating_ips=None,
                          fixed_ips=None, key_pairs=None, instances=None,
                          security_group_rules=None, injected_files=None,
@@ -74,8 +71,8 @@
         """
         Updates the tenant's quota limits for one or more resources
         """
-        post_body = Element("quota_set",
-                            xmlns=XMLNS_11)
+        post_body = xml_utils.Element("quota_set",
+                                      xmlns=xml_utils.XMLNS_11)
 
         if force is not None:
             post_body.add_attr('force', force)
@@ -118,9 +115,15 @@
         if security_groups is not None:
             post_body.add_attr('security_groups', security_groups)
 
-        resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
-                              str(Document(post_body)))
-        body = xml_to_json(etree.fromstring(body))
+        if user_id:
+            resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+                                  (str(tenant_id), str(user_id)),
+                                  str(xml_utils.Document(post_body)))
+        else:
+            resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+                                  str(xml_utils.Document(post_body)))
+
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         body = self._format_quota(body)
         return resp, body
 
diff --git a/tempest/services/compute/xml/security_groups_client.py b/tempest/services/compute/xml/security_groups_client.py
index d53e8da..9eccb90 100644
--- a/tempest/services/compute/xml/security_groups_client.py
+++ b/tempest/services/compute/xml/security_groups_client.py
@@ -17,13 +17,9 @@
 import urllib
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
 
 CONF = config.CONF
 
@@ -38,11 +34,11 @@
     def _parse_array(self, node):
         array = []
         for child in node.getchildren():
-            array.append(xml_to_json(child))
+            array.append(xml_utils.xml_to_json(child))
         return array
 
     def _parse_body(self, body):
-        json = xml_to_json(body)
+        json = xml_utils.xml_to_json(body)
         return json
 
     def list_security_groups(self, params=None):
@@ -69,12 +65,12 @@
         name (Required): Name of security group.
         description (Required): Description of security group.
         """
-        security_group = Element("security_group", name=name)
-        des = Element("description")
-        des.append(Text(content=description))
+        security_group = xml_utils.Element("security_group", name=name)
+        des = xml_utils.Element("description")
+        des.append(xml_utils.Text(content=description))
         security_group.append(des)
         resp, body = self.post('os-security-groups',
-                               str(Document(security_group)))
+                               str(xml_utils.Document(security_group)))
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
@@ -86,18 +82,18 @@
         name: new name of security group
         description: new description of security group
         """
-        security_group = Element("security_group")
+        security_group = xml_utils.Element("security_group")
         if name:
-            sg_name = Element("name")
-            sg_name.append(Text(content=name))
+            sg_name = xml_utils.Element("name")
+            sg_name.append(xml_utils.Text(content=name))
             security_group.append(sg_name)
         if description:
-            des = Element("description")
-            des.append(Text(content=description))
+            des = xml_utils.Element("description")
+            des.append(xml_utils.Text(content=description))
             security_group.append(des)
         resp, body = self.put('os-security-groups/%s' %
                               str(security_group_id),
-                              str(Document(security_group)))
+                              str(xml_utils.Document(security_group)))
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
@@ -117,7 +113,7 @@
         cidr     : CIDR for address range.
         group_id : ID of the Source group
         """
-        group_rule = Element("security_group_rule")
+        group_rule = xml_utils.Element("security_group_rule")
 
         elements = dict()
         elements['cidr'] = kwargs.get('cidr')
@@ -129,12 +125,12 @@
 
         for k, v in elements.items():
             if v is not None:
-                element = Element(k)
-                element.append(Text(content=str(v)))
+                element = xml_utils.Element(k)
+                element.append(xml_utils.Text(content=str(v)))
                 group_rule.append(element)
 
         url = 'os-security-group-rules'
-        resp, body = self.post(url, str(Document(group_rule)))
+        resp, body = self.post(url, str(xml_utils.Document(group_rule)))
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
@@ -151,8 +147,8 @@
         secgroups = body.getchildren()
         for secgroup in secgroups:
             if secgroup.get('id') == security_group_id:
-                node = secgroup.find('{%s}rules' % XMLNS_11)
-                rules = [xml_to_json(x) for x in node.getchildren()]
+                node = secgroup.find('{%s}rules' % xml_utils.XMLNS_11)
+                rules = [xml_utils.xml_to_json(x) for x in node.getchildren()]
                 return resp, rules
         raise exceptions.NotFound('No such Security Group')
 
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 7a2a071..626e655 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -21,14 +21,10 @@
 
 from tempest.common import rest_client
 from tempest.common import waiters
+from tempest.common import xml_utils
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log as logging
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
 
 CONF = config.CONF
 
@@ -60,12 +56,13 @@
 
 def _translate_network_xml_to_json(network):
     return [_translate_ip_xml_json(ip.attrib)
-            for ip in network.findall('{%s}ip' % XMLNS_11)]
+            for ip in network.findall('{%s}ip' % xml_utils.XMLNS_11)]
 
 
 def _translate_addresses_xml_to_json(xml_addresses):
     return dict((network.attrib['id'], _translate_network_xml_to_json(network))
-                for network in xml_addresses.findall('{%s}network' % XMLNS_11))
+                for network in xml_addresses.findall('{%s}network' %
+                                                     xml_utils.XMLNS_11))
 
 
 def _translate_server_xml_to_json(xml_dom):
@@ -97,16 +94,16 @@
                                         'version': 6}],
                    'foo_novanetwork': [{'addr': '192.168.0.4', 'version': 4}]}}
     """
-    nsmap = {'api': XMLNS_11}
+    nsmap = {'api': xml_utils.XMLNS_11}
     addresses = xml_dom.xpath('/api:server/api:addresses', namespaces=nsmap)
     if addresses:
         if len(addresses) > 1:
             raise ValueError('Expected only single `addresses` element.')
         json_addresses = _translate_addresses_xml_to_json(addresses[0])
-        json = xml_to_json(xml_dom)
+        json = xml_utils.xml_to_json(xml_dom)
         json['addresses'] = json_addresses
     else:
-        json = xml_to_json(xml_dom)
+        json = xml_utils.xml_to_json(xml_dom)
     diskConfig = ('{http://docs.openstack.org'
                   '/compute/ext/disk_config/api/v1.1}diskConfig')
     terminated_at = ('{http://docs.openstack.org/'
@@ -122,6 +119,10 @@
                 '/compute/ext/extended_status/api/v1.1}vm_state')
     task_state = ('{http://docs.openstack.org'
                   '/compute/ext/extended_status/api/v1.1}task_state')
+    if 'tenantId' in json:
+        json['tenant_id'] = json.pop('tenantId')
+    if 'userId' in json:
+        json['user_id'] = json.pop('userId')
     if diskConfig in json:
         json['OS-DCF:diskConfig'] = json.pop(diskConfig)
     if terminated_at in json:
@@ -157,7 +158,7 @@
         del json['link']
         json['links'] = []
         for linknode in node.findall('{http://www.w3.org/2005/Atom}link'):
-            json['links'].append(xml_to_json(linknode))
+            json['links'].append(xml_utils.xml_to_json(linknode))
 
     def _parse_server(self, body):
         json = _translate_server_xml_to_json(body)
@@ -165,7 +166,7 @@
         if 'metadata' in json and json['metadata']:
             # NOTE(danms): if there was metadata, we need to re-parse
             # that as a special type
-            metadata_tag = body.find('{%s}metadata' % XMLNS_11)
+            metadata_tag = body.find('{%s}metadata' % xml_utils.XMLNS_11)
             json["metadata"] = self._parse_key_value(metadata_tag)
         if 'link' in json:
             self._parse_links(body, json)
@@ -242,7 +243,13 @@
     def _parse_array(self, node):
         array = []
         for child in node.getchildren():
-            array.append(xml_to_json(child))
+            array.append(xml_utils.xml_to_json(child))
+        return array
+
+    def _parse_server_array(self, node):
+        array = []
+        for child in node.getchildren():
+            array.append(self._parse_server(child))
         return array
 
     def list_servers(self, params=None):
@@ -251,7 +258,7 @@
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
-        servers = self._parse_array(etree.fromstring(body))
+        servers = self._parse_server_array(etree.fromstring(body))
         return resp, {"servers": servers}
 
     def list_servers_with_detail(self, params=None):
@@ -260,13 +267,13 @@
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
-        servers = self._parse_array(etree.fromstring(body))
+        servers = self._parse_server_array(etree.fromstring(body))
         return resp, {"servers": servers}
 
     def update_server(self, server_id, name=None, meta=None, accessIPv4=None,
                       accessIPv6=None, disk_config=None):
-        doc = Document()
-        server = Element("server")
+        doc = xml_utils.Document()
+        server = xml_utils.Element("server")
         doc.append(server)
 
         if name is not None:
@@ -280,15 +287,15 @@
                             "compute/ext/disk_config/api/v1.1")
             server.add_attr("OS-DCF:diskConfig", disk_config)
         if meta is not None:
-            metadata = Element("metadata")
+            metadata = xml_utils.Element("metadata")
             server.append(metadata)
             for k, v in meta:
-                meta = Element("meta", key=k)
-                meta.append(Text(v))
+                meta = xml_utils.Element("meta", key=k)
+                meta.append(xml_utils.Text(v))
                 metadata.append(meta)
 
         resp, body = self.put('servers/%s' % str(server_id), str(doc))
-        return resp, xml_to_json(etree.fromstring(body))
+        return resp, xml_utils.xml_to_json(etree.fromstring(body))
 
     def create_server(self, name, image_ref, flavor_ref, **kwargs):
         """
@@ -311,16 +318,18 @@
         min_count: Count of minimum number of instances to launch.
         max_count: Count of maximum number of instances to launch.
         disk_config: Determines if user or admin controls disk configuration.
+        block_device_mapping: Block device mapping for the server.
         """
-        server = Element("server",
-                         xmlns=XMLNS_11,
-                         imageRef=image_ref,
-                         flavorRef=flavor_ref,
-                         name=name)
+        server = xml_utils.Element("server",
+                                   xmlns=xml_utils.XMLNS_11,
+                                   imageRef=image_ref,
+                                   flavorRef=flavor_ref,
+                                   name=name)
 
         for attr in ["adminPass", "accessIPv4", "accessIPv6", "key_name",
                      "user_data", "availability_zone", "min_count",
-                     "max_count", "return_reservation_id"]:
+                     "max_count", "return_reservation_id",
+                     "block_device_mapping"]:
             if attr in kwargs:
                 server.add_attr(attr, kwargs[attr])
 
@@ -330,46 +339,46 @@
             server.add_attr('OS-DCF:diskConfig', kwargs['disk_config'])
 
         if 'security_groups' in kwargs:
-            secgroups = Element("security_groups")
+            secgroups = xml_utils.Element("security_groups")
             server.append(secgroups)
             for secgroup in kwargs['security_groups']:
-                s = Element("security_group", name=secgroup['name'])
+                s = xml_utils.Element("security_group", name=secgroup['name'])
                 secgroups.append(s)
 
         if 'networks' in kwargs:
-            networks = Element("networks")
+            networks = xml_utils.Element("networks")
             server.append(networks)
             for network in kwargs['networks']:
-                s = Element("network", uuid=network['uuid'],
-                            fixed_ip=network['fixed_ip'])
+                s = xml_utils.Element("network", uuid=network['uuid'],
+                                      fixed_ip=network['fixed_ip'])
                 networks.append(s)
 
         if 'meta' in kwargs:
-            metadata = Element("metadata")
+            metadata = xml_utils.Element("metadata")
             server.append(metadata)
             for k, v in kwargs['meta'].items():
-                meta = Element("meta", key=k)
-                meta.append(Text(v))
+                meta = xml_utils.Element("meta", key=k)
+                meta.append(xml_utils.Text(v))
                 metadata.append(meta)
 
         if 'personality' in kwargs:
-            personality = Element('personality')
+            personality = xml_utils.Element('personality')
             server.append(personality)
             for k in kwargs['personality']:
-                temp = Element('file', path=k['path'])
-                temp.append(Text(k['contents']))
+                temp = xml_utils.Element('file', path=k['path'])
+                temp.append(xml_utils.Text(k['contents']))
                 personality.append(temp)
 
         if 'sched_hints' in kwargs:
             sched_hints = kwargs.get('sched_hints')
-            hints = Element("os:scheduler_hints")
-            hints.add_attr('xmlns:os', XMLNS_11)
+            hints = xml_utils.Element("os:scheduler_hints")
+            hints.add_attr('xmlns:os', xml_utils.XMLNS_11)
             for attr in sched_hints:
-                p1 = Element(attr)
+                p1 = xml_utils.Element(attr)
                 p1.append(sched_hints[attr])
                 hints.append(p1)
             server.append(hints)
-        resp, body = self.post('servers', str(Document(server)))
+        resp, body = self.post('servers', str(xml_utils.Document(server)))
         server = self._parse_server(etree.fromstring(body))
         return resp, server
 
@@ -427,11 +436,11 @@
 
     def action(self, server_id, action_name, response_key, **kwargs):
         if 'xmlns' not in kwargs:
-            kwargs['xmlns'] = XMLNS_11
-        doc = Document((Element(action_name, **kwargs)))
+            kwargs['xmlns'] = xml_utils.XMLNS_11
+        doc = xml_utils.Document((xml_utils.Element(action_name, **kwargs)))
         resp, body = self.post("servers/%s/action" % server_id, str(doc))
         if response_key is not None:
-            body = xml_to_json(etree.fromstring(body))
+            body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def create_backup(self, server_id, backup_type, rotation, name):
@@ -447,7 +456,7 @@
 
     def get_password(self, server_id):
         resp, body = self.get("servers/%s/os-server-password" % str(server_id))
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def delete_password(self, server_id):
@@ -470,24 +479,23 @@
                                      "compute/ext/disk_config/api/v1.1"
             kwargs['xmlns:atom'] = "http://www.w3.org/2005/Atom"
         if 'xmlns' not in kwargs:
-            kwargs['xmlns'] = XMLNS_11
+            kwargs['xmlns'] = xml_utils.XMLNS_11
 
         attrs = kwargs.copy()
         if 'metadata' in attrs:
             del attrs['metadata']
-        rebuild = Element("rebuild",
-                          **attrs)
+        rebuild = xml_utils.Element("rebuild", **attrs)
 
         if 'metadata' in kwargs:
-            metadata = Element("metadata")
+            metadata = xml_utils.Element("metadata")
             rebuild.append(metadata)
             for k, v in kwargs['metadata'].items():
-                meta = Element("meta", key=k)
-                meta.append(Text(v))
+                meta = xml_utils.Element("meta", key=k)
+                meta.append(xml_utils.Text(v))
                 metadata.append(meta)
 
         resp, body = self.post('servers/%s/action' % server_id,
-                               str(Document(rebuild)))
+                               str(xml_utils.Document(rebuild)))
         server = self._parse_server(etree.fromstring(body))
         return resp, server
 
@@ -525,14 +533,14 @@
     def live_migrate_server(self, server_id, dest_host, use_block_migration):
         """This should be called with administrator privileges ."""
 
-        req_body = Element("os-migrateLive",
-                           xmlns=XMLNS_11,
-                           disk_over_commit=False,
-                           block_migration=use_block_migration,
-                           host=dest_host)
+        req_body = xml_utils.Element("os-migrateLive",
+                                     xmlns=xml_utils.XMLNS_11,
+                                     disk_over_commit=False,
+                                     block_migration=use_block_migration,
+                                     host=dest_host)
 
         resp, body = self.post("servers/%s/action" % str(server_id),
-                               str(Document(req_body)))
+                               str(xml_utils.Document(req_body)))
         return resp, body
 
     def list_server_metadata(self, server_id):
@@ -541,44 +549,44 @@
         return resp, body
 
     def set_server_metadata(self, server_id, meta, no_metadata_field=False):
-        doc = Document()
+        doc = xml_utils.Document()
         if not no_metadata_field:
-            metadata = Element("metadata")
+            metadata = xml_utils.Element("metadata")
             doc.append(metadata)
             for k, v in meta.items():
-                meta_element = Element("meta", key=k)
-                meta_element.append(Text(v))
+                meta_element = xml_utils.Element("meta", key=k)
+                meta_element.append(xml_utils.Text(v))
                 metadata.append(meta_element)
         resp, body = self.put('servers/%s/metadata' % str(server_id), str(doc))
-        return resp, xml_to_json(etree.fromstring(body))
+        return resp, xml_utils.xml_to_json(etree.fromstring(body))
 
     def update_server_metadata(self, server_id, meta):
-        doc = Document()
-        metadata = Element("metadata")
+        doc = xml_utils.Document()
+        metadata = xml_utils.Element("metadata")
         doc.append(metadata)
         for k, v in meta.items():
-            meta_element = Element("meta", key=k)
-            meta_element.append(Text(v))
+            meta_element = xml_utils.Element("meta", key=k)
+            meta_element.append(xml_utils.Text(v))
             metadata.append(meta_element)
         resp, body = self.post("/servers/%s/metadata" % str(server_id),
                                str(doc))
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def get_server_metadata_item(self, server_id, key):
         resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
         return resp, dict([(etree.fromstring(body).attrib['key'],
-                            xml_to_json(etree.fromstring(body)))])
+                            xml_utils.xml_to_json(etree.fromstring(body)))])
 
     def set_server_metadata_item(self, server_id, key, meta):
-        doc = Document()
+        doc = xml_utils.Document()
         for k, v in meta.items():
-            meta_element = Element("meta", key=k)
-            meta_element.append(Text(v))
+            meta_element = xml_utils.Element("meta", key=k)
+            meta_element.append(xml_utils.Text(v))
             doc.append(meta_element)
         resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
                               str(doc))
-        return resp, xml_to_json(etree.fromstring(body))
+        return resp, xml_utils.xml_to_json(etree.fromstring(body))
 
     def delete_server_metadata_item(self, server_id, key):
         resp, body = self.delete("servers/%s/metadata/%s" %
@@ -607,10 +615,10 @@
         return self.action(server_id, 'unrescue', None)
 
     def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
-        post_body = Element("volumeAttachment", volumeId=volume_id,
-                            device=device)
+        post_body = xml_utils.Element("volumeAttachment", volumeId=volume_id,
+                                      device=device)
         resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
-                               str(Document(post_body)))
+                               str(xml_utils.Document(post_body)))
         return resp, body
 
     def detach_volume(self, server_id, volume_id):
@@ -623,7 +631,7 @@
     def get_server_diagnostics(self, server_id):
         """Get the usage data for a server."""
         resp, body = self.get("servers/%s/diagnostics" % server_id)
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def list_instance_actions(self, server_id):
@@ -636,7 +644,7 @@
         """Returns the action details of the provided server."""
         resp, body = self.get("servers/%s/os-instance-actions/%s" %
                               (server_id, request_id))
-        body = xml_to_json(etree.fromstring(body))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def force_delete_server(self, server_id, **kwargs):
diff --git a/tempest/services/compute/xml/services_client.py b/tempest/services/compute/xml/services_client.py
index d7b8a60..e1e78d0 100644
--- a/tempest/services/compute/xml/services_client.py
+++ b/tempest/services/compute/xml/services_client.py
@@ -19,10 +19,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -41,7 +39,7 @@
 
         resp, body = self.get(url)
         node = etree.fromstring(body)
-        body = [xml_to_json(x) for x in node.getchildren()]
+        body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
         return resp, body
 
     def enable_service(self, host_name, binary):
@@ -50,12 +48,13 @@
         host_name: Name of host
         binary: Service binary
         """
-        post_body = Element("service")
+        post_body = xml_utils.Element("service")
         post_body.add_attr('binary', binary)
         post_body.add_attr('host', host_name)
 
-        resp, body = self.put('os-services/enable', str(Document(post_body)))
-        body = xml_to_json(etree.fromstring(body))
+        resp, body = self.put('os-services/enable', str(
+            xml_utils.Document(post_body)))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def disable_service(self, host_name, binary):
@@ -64,10 +63,11 @@
         host_name: Name of host
         binary: Service binary
         """
-        post_body = Element("service")
+        post_body = xml_utils.Element("service")
         post_body.add_attr('binary', binary)
         post_body.add_attr('host', host_name)
 
-        resp, body = self.put('os-services/disable', str(Document(post_body)))
-        body = xml_to_json(etree.fromstring(body))
+        resp, body = self.put('os-services/disable', str(
+            xml_utils.Document(post_body)))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
diff --git a/tempest/services/compute/xml/tenant_usages_client.py b/tempest/services/compute/xml/tenant_usages_client.py
index 79f0ac9..0b19f63 100644
--- a/tempest/services/compute/xml/tenant_usages_client.py
+++ b/tempest/services/compute/xml/tenant_usages_client.py
@@ -18,8 +18,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
 
 CONF = config.CONF
 
@@ -32,7 +32,7 @@
         self.service = CONF.compute.catalog_type
 
     def _parse_array(self, node):
-        json = xml_to_json(node)
+        json = xml_utils.xml_to_json(node)
         return json
 
     def list_tenant_usages(self, params=None):
diff --git a/tempest/services/compute/xml/volumes_extensions_client.py b/tempest/services/compute/xml/volumes_extensions_client.py
index 570b715..e9c5035 100644
--- a/tempest/services/compute/xml/volumes_extensions_client.py
+++ b/tempest/services/compute/xml/volumes_extensions_client.py
@@ -19,13 +19,9 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
 
 CONF = config.CONF
 
@@ -51,7 +47,7 @@
                 vol['metadata'] = dict((meta.get('key'),
                                         meta.text) for meta in list(child))
             else:
-                vol[tag] = xml_to_json(child)
+                vol[tag] = xml_utils.xml_to_json(child)
         return vol
 
     def list_volumes(self, params=None):
@@ -96,23 +92,23 @@
         :param display_name: Optional Volume Name.
         :param metadata: An optional dictionary of values for metadata.
         """
-        volume = Element("volume",
-                         xmlns=XMLNS_11,
-                         size=size)
+        volume = xml_utils.Element("volume",
+                                   xmlns=xml_utils.XMLNS_11,
+                                   size=size)
         if display_name:
             volume.add_attr('display_name', display_name)
 
         if metadata:
-            _metadata = Element('metadata')
+            _metadata = xml_utils.Element('metadata')
             volume.append(_metadata)
             for key, value in metadata.items():
-                meta = Element('meta')
+                meta = xml_utils.Element('meta')
                 meta.add_attr('key', key)
-                meta.append(Text(value))
+                meta.append(xml_utils.Text(value))
                 _metadata.append(meta)
 
-        resp, body = self.post('os-volumes', str(Document(volume)))
-        body = xml_to_json(etree.fromstring(body))
+        resp, body = self.post('os-volumes', str(xml_utils.Document(volume)))
+        body = xml_utils.xml_to_json(etree.fromstring(body))
         return resp, body
 
     def delete_volume(self, volume_id):
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index e96b44b..c2c7fd1 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -1,17 +1,16 @@
 # Copyright (c) 2013 Mirantis Inc.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
 #
-#    http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
 
 import json
 
@@ -32,7 +31,6 @@
 
         It returns pair: resp and parsed resource(s) body.
         """
-
         resp, body = req_fun(uri, headers={
             'Content-Type': 'application/json'
         }, *args, **kwargs)
@@ -48,7 +46,7 @@
     def get_node_group_template(self, tmpl_id):
         """Returns the details of a single node group template."""
 
-        uri = "node-group-templates/%s" % tmpl_id
+        uri = 'node-group-templates/%s' % tmpl_id
         return self._request_and_parse(self.get, uri, 'node_group_template')
 
     def create_node_group_template(self, name, plugin_name, hadoop_version,
@@ -59,7 +57,7 @@
         It supports passing additional params using kwargs and returns created
         object.
         """
-        uri = "node-group-templates"
+        uri = 'node-group-templates'
         body = kwargs.copy()
         body.update({
             'name': name,
@@ -75,7 +73,7 @@
     def delete_node_group_template(self, tmpl_id):
         """Deletes the specified node group template by id."""
 
-        uri = "node-group-templates/%s" % tmpl_id
+        uri = 'node-group-templates/%s' % tmpl_id
         return self.delete(uri)
 
     def list_plugins(self):
@@ -87,7 +85,150 @@
     def get_plugin(self, plugin_name, plugin_version=None):
         """Returns the details of a single plugin."""
 
-        uri = "plugins/%s" % plugin_name
+        uri = 'plugins/%s' % plugin_name
         if plugin_version:
             uri += '/%s' % plugin_version
         return self._request_and_parse(self.get, uri, 'plugin')
+
+    def list_cluster_templates(self):
+        """List all cluster templates for a user."""
+
+        uri = 'cluster-templates'
+        return self._request_and_parse(self.get, uri, 'cluster_templates')
+
+    def get_cluster_template(self, tmpl_id):
+        """Returns the details of a single cluster template."""
+
+        uri = 'cluster-templates/%s' % tmpl_id
+        return self._request_and_parse(self.get, uri, 'cluster_template')
+
+    def create_cluster_template(self, name, plugin_name, hadoop_version,
+                                node_groups, cluster_configs=None,
+                                **kwargs):
+        """Creates cluster template with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object.
+        """
+        uri = 'cluster-templates'
+        body = kwargs.copy()
+        body.update({
+            'name': name,
+            'plugin_name': plugin_name,
+            'hadoop_version': hadoop_version,
+            'node_groups': node_groups,
+            'cluster_configs': cluster_configs or dict(),
+        })
+        return self._request_and_parse(self.post, uri, 'cluster_template',
+                                       body=json.dumps(body))
+
+    def delete_cluster_template(self, tmpl_id):
+        """Deletes the specified cluster template by id."""
+
+        uri = 'cluster-templates/%s' % tmpl_id
+        return self.delete(uri)
+
+    def list_data_sources(self):
+        """List all data sources for a user."""
+
+        uri = 'data-sources'
+        return self._request_and_parse(self.get, uri, 'data_sources')
+
+    def get_data_source(self, source_id):
+        """Returns the details of a single data source."""
+
+        uri = 'data-sources/%s' % source_id
+        return self._request_and_parse(self.get, uri, 'data_source')
+
+    def create_data_source(self, name, data_source_type, url, **kwargs):
+        """Creates data source with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object.
+        """
+        uri = 'data-sources'
+        body = kwargs.copy()
+        body.update({
+            'name': name,
+            'type': data_source_type,
+            'url': url
+        })
+        return self._request_and_parse(self.post, uri, 'data_source',
+                                       body=json.dumps(body))
+
+    def delete_data_source(self, source_id):
+        """Deletes the specified data source by id."""
+
+        uri = 'data-sources/%s' % source_id
+        return self.delete(uri)
+
+    def list_job_binary_internals(self):
+        """List all job binary internals for a user."""
+
+        uri = 'job-binary-internals'
+        return self._request_and_parse(self.get, uri, 'binaries')
+
+    def get_job_binary_internal(self, job_binary_id):
+        """Returns the details of a single job binary internal."""
+
+        uri = 'job-binary-internals/%s' % job_binary_id
+        return self._request_and_parse(self.get, uri, 'job_binary_internal')
+
+    def create_job_binary_internal(self, name, data):
+        """Creates job binary internal with specified params."""
+
+        uri = 'job-binary-internals/%s' % name
+        return self._request_and_parse(self.put, uri, 'job_binary_internal',
+                                       data)
+
+    def delete_job_binary_internal(self, job_binary_id):
+        """Deletes the specified job binary internal by id."""
+
+        uri = 'job-binary-internals/%s' % job_binary_id
+        return self.delete(uri)
+
+    def get_job_binary_internal_data(self, job_binary_id):
+        """Returns data of a single job binary internal."""
+
+        uri = 'job-binary-internals/%s/data' % job_binary_id
+        return self.get(uri)
+
+    def list_job_binaries(self):
+        """List all job binaries for a user."""
+
+        uri = 'job-binaries'
+        return self._request_and_parse(self.get, uri, 'binaries')
+
+    def get_job_binary(self, job_binary_id):
+        """Returns the details of a single job binary."""
+
+        uri = 'job-binaries/%s' % job_binary_id
+        return self._request_and_parse(self.get, uri, 'job_binary')
+
+    def create_job_binary(self, name, url, extra=None, **kwargs):
+        """Creates job binary with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object.
+        """
+        uri = 'job-binaries'
+        body = kwargs.copy()
+        body.update({
+            'name': name,
+            'url': url,
+            'extra': extra or dict(),
+        })
+        return self._request_and_parse(self.post, uri, 'job_binary',
+                                       body=json.dumps(body))
+
+    def delete_job_binary(self, job_binary_id):
+        """Deletes the specified job binary by id."""
+
+        uri = 'job-binaries/%s' % job_binary_id
+        return self.delete(uri)
+
+    def get_job_binary_data(self, job_binary_id):
+        """Returns data of a single job binary."""
+
+        uri = 'job-binaries/%s/data' % job_binary_id
+        return self.get(uri)
diff --git a/tempest/services/database/json/versions_client.py b/tempest/services/database/json/versions_client.py
new file mode 100644
index 0000000..0269c43
--- /dev/null
+++ b/tempest/services/database/json/versions_client.py
@@ -0,0 +1,38 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class DatabaseVersionsClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(DatabaseVersionsClientJSON, self).__init__(auth_provider)
+        self.skip_path()
+        self.service = CONF.database.catalog_type
+
+    def list_db_versions(self, params=None):
+        """List all versions."""
+        url = ''
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        return resp, self._parse_resp(body)
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index 58451fb..b0cab8e 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -27,7 +27,8 @@
         self.endpoint_url = 'adminURL'
 
         # Needed for xml service client
-        self.list_tags = ["roles", "tenants", "users", "services"]
+        self.list_tags = ["roles", "tenants", "users", "services",
+                          "extensions"]
 
     def has_admin_extensions(self):
         """
@@ -49,6 +50,12 @@
         resp, body = self.post('OS-KSADM/roles', post_body)
         return resp, self._parse_resp(body)
 
+    def get_role(self, role_id):
+        """Get a role by its id."""
+        resp, body = self.get('OS-KSADM/roles/%s' % role_id)
+        body = json.loads(body)
+        return resp, body['role']
+
     def create_tenant(self, name, **kwargs):
         """
         Create a tenant
@@ -213,7 +220,7 @@
 
     def list_services(self):
         """List Service - Returns Services."""
-        resp, body = self.get('/OS-KSADM/services/')
+        resp, body = self.get('/OS-KSADM/services')
         return resp, self._parse_resp(body)
 
     def delete_service(self, service_id):
@@ -221,6 +228,22 @@
         url = '/OS-KSADM/services/%s' % service_id
         return self.delete(url)
 
+    def update_user_password(self, user_id, new_pass):
+        """Update User Password."""
+        put_body = {
+            'password': new_pass,
+            'id': user_id
+        }
+        put_body = json.dumps({'user': put_body})
+        resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
+        return resp, self._parse_resp(body)
+
+    def list_extensions(self):
+        """List all the extensions."""
+        resp, body = self.get('/extensions')
+        body = json.loads(body)
+        return resp, body['extensions']['values']
+
 
 class TokenClientJSON(IdentityClientJSON):
 
@@ -269,13 +292,20 @@
 
         return resp, body['access']
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         if headers is None:
             # Always accept 'json', for TokenClientXML too.
             # Because XML response is not easily
             # converted to the corresponding JSON one
             headers = self.get_headers(accept_type="json")
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers(accept_type="json"))
+            except (ValueError, TypeError):
+                headers = self.get_headers(accept_type="json")
+
         resp, resp_body = self.http_obj.request(url, method,
                                                 headers=headers, body=body)
         self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 35d8aa0..6829333 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -163,6 +163,12 @@
         body = json.loads(body)
         return resp, body['role']
 
+    def list_roles(self):
+        """Get the list of Roles."""
+        resp, body = self.get("roles")
+        body = json.loads(body)
+        return resp, body['roles']
+
     def update_role(self, name, role_id):
         """Create a Role."""
         post_body = {
@@ -459,16 +465,20 @@
 
         self.auth_url = auth_url
 
-    def auth(self, user, password, tenant=None, user_type='id', domain=None):
+    def auth(self, user=None, password=None, tenant=None, user_type='id',
+             domain=None, token=None):
         """
         :param user: user id or name, as specified in user_type
         :param domain: the user and tenant domain
+        :param token: a token to re-scope.
 
         Accepts different combinations of credentials. Restrictions:
         - tenant and domain are only name (no id)
         - user domain and tenant domain are assumed identical
         - domain scope is not supported here
         Sample sample valid combinations:
+        - token
+        - token, tenant, domain
         - user_id, password
         - username, password, domain
         - username, password, tenant, domain
@@ -477,23 +487,32 @@
         creds = {
             'auth': {
                 'identity': {
-                    'methods': ['password'],
-                    'password': {
-                        'user': {
-                            'password': password,
-                        }
-                    }
+                    'methods': [],
                 }
             }
         }
-        if user_type == 'id':
-            creds['auth']['identity']['password']['user']['id'] = user
-        else:
-            creds['auth']['identity']['password']['user']['name'] = user
-        if domain is not None:
-            _domain = dict(name=domain)
-            creds['auth']['identity']['password']['user']['domain'] = _domain
+        id_obj = creds['auth']['identity']
+        if token:
+            id_obj['methods'].append('token')
+            id_obj['token'] = {
+                'id': token
+            }
+        if user and password:
+            id_obj['methods'].append('password')
+            id_obj['password'] = {
+                'user': {
+                    'password': password,
+                }
+            }
+            if user_type == 'id':
+                id_obj['password']['user']['id'] = user
+            else:
+                id_obj['password']['user']['name'] = user
+            if domain is not None:
+                _domain = dict(name=domain)
+                id_obj['password']['user']['domain'] = _domain
         if tenant is not None:
+            _domain = dict(name=domain)
             project = dict(name=tenant, domain=_domain)
             scope = dict(project=project)
             creds['auth']['scope'] = scope
@@ -502,13 +521,20 @@
         resp, body = self.post(self.auth_url, body=body)
         return resp, body
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         if headers is None:
             # Always accept 'json', for xml token client too.
             # Because XML response is not easily
             # converted to the corresponding JSON one
             headers = self.get_headers(accept_type="json")
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers(accept_type="json"))
+            except (ValueError, TypeError):
+                headers = self.get_headers(accept_type="json")
+
         resp, resp_body = self.http_obj.request(url, method,
                                                 headers=headers, body=body)
         self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/json/region_client.py b/tempest/services/identity/v3/json/region_client.py
new file mode 100644
index 0000000..f95d00f
--- /dev/null
+++ b/tempest/services/identity/v3/json/region_client.py
@@ -0,0 +1,80 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class RegionClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(RegionClientJSON, self).__init__(auth_provider)
+        self.service = CONF.identity.catalog_type
+        self.endpoint_url = 'adminURL'
+        self.api_version = "v3"
+
+    def create_region(self, description, **kwargs):
+        """Create region."""
+        req_body = {
+            'description': description,
+        }
+        if kwargs.get('parent_region_id'):
+            req_body['parent_region_id'] = kwargs.get('parent_region_id')
+        req_body = json.dumps({'region': req_body})
+        if kwargs.get('unique_region_id'):
+            resp, body = self.put(
+                'regions/%s' % kwargs.get('unique_region_id'), req_body)
+        else:
+            resp, body = self.post('regions', req_body)
+        body = json.loads(body)
+        return resp, body['region']
+
+    def update_region(self, region_id, **kwargs):
+        """Updates a region."""
+        post_body = {}
+        if 'description' in kwargs:
+            post_body['description'] = kwargs.get('description')
+        if 'parent_region_id' in kwargs:
+            post_body['parent_region_id'] = kwargs.get('parent_region_id')
+        post_body = json.dumps({'region': post_body})
+        resp, body = self.patch('regions/%s' % region_id, post_body)
+        body = json.loads(body)
+        return resp, body['region']
+
+    def get_region(self, region_id):
+        """Get region."""
+        url = 'regions/%s' % region_id
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['region']
+
+    def list_regions(self, params=None):
+        """List regions."""
+        url = 'regions'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['regions']
+
+    def delete_region(self, region_id):
+        """Delete region."""
+        resp, body = self.delete('regions/%s' % region_id)
+        return resp, body
diff --git a/tempest/services/identity/v3/xml/credentials_client.py b/tempest/services/identity/v3/xml/credentials_client.py
index 70f85a1..3c44188 100644
--- a/tempest/services/identity/v3/xml/credentials_client.py
+++ b/tempest/services/identity/v3/xml/credentials_client.py
@@ -18,8 +18,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
index a1f9811..6490e34 100644
--- a/tempest/services/identity/v3/xml/endpoints_client.py
+++ b/tempest/services/identity/v3/xml/endpoints_client.py
@@ -17,8 +17,8 @@
 
 from tempest.common import http
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
@@ -46,12 +46,19 @@
         json = common.xml_to_json(body)
         return json
 
-    def request(self, method, url, headers=None, body=None, wait=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None, wait=None):
         """Overriding the existing HTTP request in super class RestClient."""
+        if extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = self.get_headers()
         dscv = CONF.identity.disable_ssl_certificate_validation
         self.http_obj = http.ClosingHttp(
             disable_ssl_certificate_validation=dscv)
         return super(EndPointClientXML, self).request(method, url,
+                                                      extra_headers,
                                                       headers=headers,
                                                       body=body)
 
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 8f42924..35295d7 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -18,9 +18,9 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
@@ -217,6 +217,12 @@
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
+    def list_roles(self):
+        """Get the list of Roles."""
+        resp, body = self.get("roles")
+        body = self._parse_roles(etree.fromstring(body))
+        return resp, body
+
     def update_role(self, name, role_id):
         """Updates a Role."""
         post_body = common.Element("role",
@@ -453,43 +459,61 @@
 
         self.auth_url = auth_url
 
-    def auth(self, user, password, tenant=None, user_type='id', domain=None):
+    def auth(self, user=None, password=None, tenant=None, user_type='id',
+             domain=None, token=None):
         """
         :param user: user id or name, as specified in user_type
+        :param domain: the user and tenant domain
+        :param token: a token to re-scope.
 
         Accepts different combinations of credentials. Restrictions:
         - tenant and domain are only name (no id)
         - user domain and tenant domain are assumed identical
+        - domain scope is not supported here
         Sample sample valid combinations:
+        - token
+        - token, tenant, domain
         - user_id, password
         - username, password, domain
         - username, password, tenant, domain
         Validation is left to the server side.
         """
-        if user_type == 'id':
-            _user = common.Element('user', id=user, password=password)
-        else:
-            _user = common.Element('user', name=user, password=password)
-        if domain is not None:
-            _domain = common.Element('domain', name=domain)
-            _user.append(_domain)
 
-        password = common.Element('password')
-        password.append(_user)
-
-        method = common.Element('method')
-        method.append(common.Text('password'))
         methods = common.Element('methods')
-        methods.append(method)
         identity = common.Element('identity')
+
+        if token:
+            method = common.Element('method')
+            method.append(common.Text('token'))
+            methods.append(method)
+
+            token = common.Element('token', id=token)
+            identity.append(token)
+
+        if user and password:
+            if user_type == 'id':
+                _user = common.Element('user', id=user, password=password)
+            else:
+                _user = common.Element('user', name=user, password=password)
+            if domain is not None:
+                _domain = common.Element('domain', name=domain)
+                _user.append(_domain)
+
+            password = common.Element('password')
+            password.append(_user)
+            method = common.Element('method')
+            method.append(common.Text('password'))
+            methods.append(method)
+            identity.append(password)
+
         identity.append(methods)
-        identity.append(password)
 
         auth = common.Element('auth')
         auth.append(identity)
 
         if tenant is not None:
             project = common.Element('project', name=tenant)
+            _domain = common.Element('domain', name=domain)
             project.append(_domain)
             scope = common.Element('scope')
             scope.append(project)
@@ -498,13 +522,19 @@
         resp, body = self.post(self.auth_url, body=str(common.Document(auth)))
         return resp, body
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         if headers is None:
             # Always accept 'json', for xml token client too.
             # Because XML response is not easily
             # converted to the corresponding JSON one
             headers = self.get_headers(accept_type="json")
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers(accept_type="json"))
+            except (ValueError, TypeError):
+                headers = self.get_headers(accept_type="json")
         resp, resp_body = self.http_obj.request(url, method,
                                                 headers=headers, body=body)
         self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/xml/policy_client.py b/tempest/services/identity/v3/xml/policy_client.py
index bf4cce7..73d831b 100644
--- a/tempest/services/identity/v3/xml/policy_client.py
+++ b/tempest/services/identity/v3/xml/policy_client.py
@@ -17,8 +17,8 @@
 
 from tempest.common import http
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
@@ -46,12 +46,19 @@
         json = common.xml_to_json(body)
         return json
 
-    def request(self, method, url, headers=None, body=None, wait=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None, wait=None):
         """Overriding the existing HTTP request in super class RestClient."""
+        if extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = self.get_headers()
         dscv = CONF.identity.disable_ssl_certificate_validation
         self.http_obj = http.ClosingHttp(
             disable_ssl_certificate_validation=dscv)
         return super(PolicyClientXML, self).request(method, url,
+                                                    extra_headers,
                                                     headers=headers,
                                                     body=body)
 
diff --git a/tempest/services/identity/v3/xml/region_client.py b/tempest/services/identity/v3/xml/region_client.py
new file mode 100644
index 0000000..9f9161d
--- /dev/null
+++ b/tempest/services/identity/v3/xml/region_client.py
@@ -0,0 +1,120 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common import http
+from tempest.common import rest_client
+from tempest.common import xml_utils as common
+from tempest import config
+
+CONF = config.CONF
+
+XMLNS = "http://docs.openstack.org/identity/api/v3"
+
+
+class RegionClientXML(rest_client.RestClient):
+    TYPE = "xml"
+
+    def __init__(self, auth_provider):
+        super(RegionClientXML, self).__init__(auth_provider)
+        self.service = CONF.identity.catalog_type
+        self.region_url = 'adminURL'
+        self.api_version = "v3"
+
+    def _parse_array(self, node):
+        array = []
+        for child in node.getchildren():
+            tag_list = child.tag.split('}', 1)
+            if tag_list[1] == "region":
+                array.append(common.xml_to_json(child))
+        return array
+
+    def _parse_body(self, body):
+        json = common.xml_to_json(body)
+        return json
+
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None, wait=None):
+        """Overriding the existing HTTP request in super class RestClient."""
+        if extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = self.get_headers()
+        dscv = CONF.identity.disable_ssl_certificate_validation
+        self.http_obj = http.ClosingHttp(
+            disable_ssl_certificate_validation=dscv)
+        return super(RegionClientXML, self).request(method, url,
+                                                    extra_headers,
+                                                    headers=headers,
+                                                    body=body)
+
+    def create_region(self, description, **kwargs):
+        """Create region."""
+        create_region = common.Element("region",
+                                       xmlns=XMLNS,
+                                       description=description)
+        if 'parent_region_id' in kwargs:
+            create_region.append(common.Element(
+                'parent_region_id', kwargs.get('parent_region_id')))
+        if 'unique_region_id' in kwargs:
+            resp, body = self.put(
+                'regions/%s' % kwargs.get('unique_region_id'),
+                str(common.Document(create_region)))
+        else:
+            resp, body = self.post('regions',
+                                   str(common.Document(create_region)))
+        body = self._parse_body(etree.fromstring(body))
+        return resp, body
+
+    def update_region(self, region_id, **kwargs):
+        """Updates an region with given parameters.
+        """
+        description = kwargs.get('description', None)
+        update_region = common.Element("region",
+                                       xmlns=XMLNS,
+                                       description=description)
+        if 'parent_region_id' in kwargs:
+            update_region.append(common.Element('parent_region_id',
+                                 kwargs.get('parent_region_id')))
+
+        resp, body = self.patch('regions/%s' % str(region_id),
+                                str(common.Document(update_region)))
+        body = self._parse_body(etree.fromstring(body))
+        return resp, body
+
+    def get_region(self, region_id):
+        """Get Region."""
+        url = 'regions/%s' % region_id
+        resp, body = self.get(url)
+        body = self._parse_body(etree.fromstring(body))
+        return resp, body
+
+    def list_regions(self, params=None):
+        """Get the list of regions."""
+        url = 'regions'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        body = self._parse_array(etree.fromstring(body))
+        return resp, body
+
+    def delete_region(self, region_id):
+        """Delete region."""
+        resp, body = self.delete('regions/%s' % region_id)
+        return resp, body
diff --git a/tempest/services/identity/v3/xml/service_client.py b/tempest/services/identity/v3/xml/service_client.py
index 966d7f7..37ed892 100644
--- a/tempest/services/identity/v3/xml/service_client.py
+++ b/tempest/services/identity/v3/xml/service_client.py
@@ -16,8 +16,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
diff --git a/tempest/services/identity/xml/identity_client.py b/tempest/services/identity/xml/identity_client.py
index c5bf310..886ce7b 100644
--- a/tempest/services/identity/xml/identity_client.py
+++ b/tempest/services/identity/xml/identity_client.py
@@ -12,8 +12,8 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+from tempest.common import xml_utils as xml
 from tempest import config
-from tempest.services.compute.xml import common as xml
 from tempest.services.identity.json import identity_client
 
 CONF = config.CONF
@@ -31,6 +31,11 @@
                                str(xml.Document(create_role)))
         return resp, self._parse_resp(body)
 
+    def get_role(self, role_id):
+        """Get a role by its id."""
+        resp, body = self.get('OS-KSADM/roles/%s' % role_id)
+        return resp, self._parse_resp(body)
+
     def create_tenant(self, name, **kwargs):
         """
         Create a tenant
@@ -113,6 +118,20 @@
                                str(xml.Document(create_service)))
         return resp, self._parse_resp(body)
 
+    def update_user_password(self, user_id, new_pass):
+        """Update User Password."""
+        put_body = xml.Element("user",
+                               id=user_id,
+                               password=new_pass)
+        resp, body = self.put('users/%s/OS-KSADM/password' % user_id,
+                              str(xml.Document(put_body)))
+        return resp, self._parse_resp(body)
+
+    def list_extensions(self):
+        """List all the extensions."""
+        resp, body = self.get('/extensions')
+        return resp, self._parse_resp(body)
+
 
 class TokenClientXML(identity_client.TokenClientJSON):
     TYPE = "xml"
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index b3014fc..201869e 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -70,13 +70,12 @@
             "disk_format": disk_format,
         }
 
-        for option in ['visibility']:
-            if option in kwargs:
-                value = kwargs.get(option)
-                if isinstance(value, dict) or isinstance(value, tuple):
-                    params.update(value)
-                else:
-                    params[option] = value
+        for option in kwargs:
+            value = kwargs.get(option)
+            if isinstance(value, dict) or isinstance(value, tuple):
+                params.update(value)
+            else:
+                params[option] = value
 
         data = json.dumps(params)
         self._validate_schema(data)
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 27f4655..8e53b8d 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -165,33 +165,6 @@
         resp, body = self.delete(uri)
         return resp, body
 
-    def create_vpnservice(self, subnet_id, router_id, **kwargs):
-        post_body = {
-            "vpnservice": {
-                "subnet_id": subnet_id,
-                "router_id": router_id
-            }
-        }
-        for key, val in kwargs.items():
-            post_body['vpnservice'][key] = val
-        body = json.dumps(post_body)
-        uri = '%s/vpn/vpnservices' % (self.uri_prefix)
-        resp, body = self.post(uri, body)
-        body = json.loads(body)
-        return resp, body
-
-    def update_vpnservice(self, uuid, description):
-        put_body = {
-            "vpnservice": {
-                "description": description
-            }
-        }
-        body = json.dumps(put_body)
-        uri = '%s/vpn/vpnservices/%s' % (self.uri_prefix, uuid)
-        resp, body = self.put(uri, body)
-        body = json.loads(body)
-        return resp, body
-
     def list_router_interfaces(self, uuid):
         uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
         resp, body = self.get(uri)
@@ -281,14 +254,6 @@
         body = json.loads(body)
         return resp, body
 
-    def update_ikepolicy(self, uuid, **kwargs):
-        put_body = {'ikepolicy': kwargs}
-        body = json.dumps(put_body)
-        uri = '%s/vpn/ikepolicies/%s' % (self.uri_prefix, uuid)
-        resp, body = self.put(uri, body)
-        body = json.loads(body)
-        return resp, body
-
     def update_extra_routes(self, router_id, nexthop, destination):
         uri = '%s/routers/%s' % (self.uri_prefix, router_id)
         put_body = {
@@ -320,3 +285,11 @@
         resp, body = self.get(uri)
         body = json.loads(body)
         return resp, body
+
+    def add_dhcp_agent_to_network(self, agent_id, network_id):
+        post_body = {'network_id': network_id}
+        body = json.dumps(post_body)
+        uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
+        resp, body = self.post(uri, body)
+        body = json.loads(body)
+        return resp, body
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index e21abe1..81792c4 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -30,13 +30,17 @@
     'members': 'lb',
     'vpnservices': 'vpn',
     'ikepolicies': 'vpn',
+    'ipsecpolicies': 'vpn',
     'metering_labels': 'metering',
-    'metering_label_rules': 'metering'
+    'metering_label_rules': 'metering',
+    'firewall_rules': 'fw',
+    'firewall_policies': 'fw',
+    'firewalls': 'fw'
 }
 
 # The following list represents resource names that do not require
 # changing underscore to a hyphen
-hyphen_exceptions = ["health_monitors"]
+hyphen_exceptions = ["health_monitors", "firewall_rules", "firewall_policies"]
 
 # map from resource name to a plural name
 # needed only for those which can't be constructed as name + 's'
@@ -44,7 +48,9 @@
     'security_groups': 'security_groups',
     'security_group_rules': 'security_group_rules',
     'ikepolicy': 'ikepolicies',
-    'quotas': 'quotas'
+    'ipsecpolicy': 'ipsecpolicies',
+    'quotas': 'quotas',
+    'firewall_policy': 'firewall_policies'
 }
 
 
@@ -100,7 +106,7 @@
         def _list(**filters):
             uri = self.get_uri(plural_name)
             if filters:
-                uri += '?' + urllib.urlencode(filters)
+                uri += '?' + urllib.urlencode(filters, doseq=1)
             resp, body = self.get(uri)
             result = {plural_name: self.deserialize_list(body)}
             return resp, result
@@ -116,14 +122,14 @@
         return _delete
 
     def _shower(self, resource_name):
-        def _show(resource_id, field_list=[]):
-            # field_list is a sequence of two-element tuples, with the
-            # first element being 'fields'. An example:
-            # [('fields', 'id'), ('fields', 'name')]
+        def _show(resource_id, **fields):
+            # fields is a dict which key is 'fields' and value is a
+            # list of field's name. An example:
+            # {'fields': ['id', 'name']}
             plural = self.pluralize(resource_name)
             uri = '%s/%s' % (self.get_uri(plural), resource_id)
-            if field_list:
-                uri += '?' + urllib.urlencode(field_list)
+            if fields:
+                uri += '?' + urllib.urlencode(fields, doseq=1)
             resp, body = self.get(uri)
             body = self.deserialize_single(body)
             return resp, body
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 68bc424..a7a6b2c 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -14,7 +14,7 @@
 import xml.etree.ElementTree as ET
 
 from tempest.common import rest_client
-from tempest.services.compute.xml import common
+from tempest.common import xml_utils as common
 from tempest.services.network import network_client_base as client_base
 
 
@@ -24,7 +24,7 @@
     # list of plurals used for xml serialization
     PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
                'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
-               'health_monitors', 'vips']
+               'health_monitors', 'vips', 'members', 'allowed_address_pairs']
 
     def get_rest_client(self, auth_provider):
         rc = rest_client.RestClient(auth_provider)
@@ -250,6 +250,13 @@
         body = _root_tag_fetcher_and_xml_to_json_parse(body)
         return resp, body
 
+    def add_dhcp_agent_to_network(self, agent_id, network_id):
+        uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
+        network = common.Element("network_id", network_id)
+        resp, body = self.post(uri, str(common.Document(network)))
+        body = _root_tag_fetcher_and_xml_to_json_parse(body)
+        return resp, body
+
 
 def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
     body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index 6e7910e..a0506f2 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -162,11 +162,17 @@
         self.service = CONF.object_storage.catalog_type
         self.format = 'json'
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         self.http_obj = http.ClosingHttp()
         if headers is None:
             headers = {}
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = {}
 
         # Authorize the request
         req_url, req_headers, req_body = self.auth_provider.auth_request(
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 49f7f49..f3f4eb6 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -29,12 +29,16 @@
 
         self.service = CONF.object_storage.catalog_type
 
-    def create_object(self, container, object_name, data, params=None):
+    def create_object(self, container, object_name, data,
+                      params=None, metadata=None):
         """Create storage object."""
 
         headers = self.get_headers()
         if not data:
             headers['content-length'] = '0'
+        if metadata:
+            for key in metadata:
+                headers[str(key)] = metadata[key]
         url = "%s/%s" % (str(container), str(object_name))
         if params:
             url += '?%s' % urllib.urlencode(params)
@@ -73,11 +77,16 @@
         resp, body = self.head(url)
         return resp, body
 
-    def get_object(self, container, object_name):
+    def get_object(self, container, object_name, metadata=None):
         """Retrieve object's data."""
 
+        headers = {}
+        if metadata:
+            for key in metadata:
+                headers[str(key)] = metadata[key]
+
         url = "{0}/{1}".format(container, object_name)
-        resp, body = self.get(url)
+        resp, body = self.get(url, headers=headers)
         return resp, body
 
     def copy_object_in_same_container(self, container, src_object_name,
@@ -146,13 +155,19 @@
         self.service = CONF.object_storage.catalog_type
         self.format = 'json'
 
-    def request(self, method, url, headers=None, body=None):
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None):
         """A simple HTTP request interface."""
         dscv = CONF.identity.disable_ssl_certificate_validation
         self.http_obj = http.ClosingHttp(
             disable_ssl_certificate_validation=dscv)
         if headers is None:
             headers = {}
+        elif extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = {}
 
         # Authorize the request
         req_url, req_headers, req_body = self.auth_provider.auth_request(
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 113003c..c459f28 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -45,28 +45,32 @@
         return resp, body['stacks']
 
     def create_stack(self, name, disable_rollback=True, parameters={},
-                     timeout_mins=60, template=None, template_url=None):
+                     timeout_mins=60, template=None, template_url=None,
+                     environment=None, files=None):
         headers, body = self._prepare_update_create(
             name,
             disable_rollback,
             parameters,
             timeout_mins,
             template,
-            template_url)
+            template_url,
+            environment,
+            files)
         uri = 'stacks'
         resp, body = self.post(uri, headers=headers, body=body)
         return resp, body
 
     def update_stack(self, stack_identifier, name, disable_rollback=True,
                      parameters={}, timeout_mins=60, template=None,
-                     template_url=None):
+                     template_url=None, environment=None, files=None):
         headers, body = self._prepare_update_create(
             name,
             disable_rollback,
             parameters,
             timeout_mins,
             template,
-            template_url)
+            template_url,
+            environment)
 
         uri = "stacks/%s" % stack_identifier
         resp, body = self.put(uri, headers=headers, body=body)
@@ -74,13 +78,16 @@
 
     def _prepare_update_create(self, name, disable_rollback=True,
                                parameters={}, timeout_mins=60,
-                               template=None, template_url=None):
+                               template=None, template_url=None,
+                               environment=None, files=None):
         post_body = {
             "stack_name": name,
             "disable_rollback": disable_rollback,
             "parameters": parameters,
             "timeout_mins": timeout_mins,
-            "template": "HeatTemplateFormatVersion: '2012-12-12'\n"
+            "template": "HeatTemplateFormatVersion: '2012-12-12'\n",
+            "environment": environment,
+            "files": files
         }
         if template:
             post_body['template'] = template
@@ -154,7 +161,8 @@
                 if resource_status == status:
                     return
                 if fail_regexp.search(resource_status):
-                    raise exceptions.StackBuildErrorException(
+                    raise exceptions.StackResourceBuildErrorException(
+                        resource_name=resource_name,
                         stack_identifier=stack_identifier,
                         resource_status=resource_status,
                         resource_status_reason=body['resource_status_reason'])
diff --git a/tempest/services/queuing/json/queuing_client.py b/tempest/services/queuing/json/queuing_client.py
index 4a0c495..e5978f5 100644
--- a/tempest/services/queuing/json/queuing_client.py
+++ b/tempest/services/queuing/json/queuing_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.queuing.v1 import queues as queues_schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -33,6 +34,7 @@
         uri = '{0}/queues'.format(self.uri_prefix)
         resp, body = self.get(uri)
         body = json.loads(body)
+        self.validate_response(queues_schema.list_queues, resp, body)
         return resp, body
 
     def create_queue(self, queue_name):
@@ -43,16 +45,32 @@
     def get_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.get(uri)
-        body = json.loads(body)
         return resp, body
 
     def head_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp, body = self.head(uri)
-        body = json.loads(body)
         return resp, body
 
     def delete_queue(self, queue_name):
         uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
         resp = self.delete(uri)
         return resp
+
+    def get_queue_stats(self, queue_name):
+        uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
+        resp, body = self.get(uri)
+        body = json.loads(body)
+        self.validate_response(queues_schema.queue_stats, resp, body)
+        return resp, body
+
+    def get_queue_metadata(self, queue_name):
+        uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
+        resp, body = self.get(uri)
+        body = json.loads(body)
+        return resp, body
+
+    def set_queue_metadata(self, queue_name, rbody):
+        uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
+        resp, body = self.put(uri, body=json.dumps(rbody))
+        return resp, body
diff --git a/tempest/services/telemetry/telemetry_client_base.py b/tempest/services/telemetry/telemetry_client_base.py
index 610f07b..a073f54 100644
--- a/tempest/services/telemetry/telemetry_client_base.py
+++ b/tempest/services/telemetry/telemetry_client_base.py
@@ -73,7 +73,10 @@
         return resp, body
 
     def put(self, uri, body):
-        return self.rest_client.put(uri, body)
+        body = self.serialize(body)
+        resp, body = self.rest_client.put(uri, body)
+        body = self.deserialize(body)
+        return resp, body
 
     def get(self, uri):
         resp, body = self.rest_client.get(uri)
@@ -133,3 +136,15 @@
     def create_alarm(self, **kwargs):
         uri = "%s/alarms" % self.uri_prefix
         return self.post(uri, kwargs)
+
+    def update_alarm(self, alarm_id, **kwargs):
+        uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
+        return self.put(uri, kwargs)
+
+    def alarm_get_state(self, alarm_id):
+        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+        return self.get(uri)
+
+    def alarm_set_state(self, alarm_id, state):
+        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+        return self.put(uri, state)
diff --git a/tempest/services/telemetry/xml/telemetry_client.py b/tempest/services/telemetry/xml/telemetry_client.py
index 673f98e..3bee8bf 100644
--- a/tempest/services/telemetry/xml/telemetry_client.py
+++ b/tempest/services/telemetry/xml/telemetry_client.py
@@ -16,7 +16,7 @@
 from lxml import etree
 
 from tempest.common import rest_client
-from tempest.services.compute.xml import common
+from tempest.common import xml_utils as common
 import tempest.services.telemetry.telemetry_client_base as client
 
 
diff --git a/tempest/services/volume/json/admin/volume_quotas_client.py b/tempest/services/volume/json/admin/volume_quotas_client.py
index ea9c92e..961c7da 100644
--- a/tempest/services/volume/json/admin/volume_quotas_client.py
+++ b/tempest/services/volume/json/admin/volume_quotas_client.py
@@ -77,3 +77,7 @@
         post_body = jsonutils.dumps({'quota_set': post_body})
         resp, body = self.put('os-quota-sets/%s' % tenant_id, post_body)
         return resp, self._parse_resp(body)
+
+    def delete_quota_set(self, tenant_id):
+        """Delete the tenant's quota set."""
+        return self.delete('os-quota-sets/%s' % tenant_id)
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/json/admin/volume_services_client.py
new file mode 100644
index 0000000..d43c04a
--- /dev/null
+++ b/tempest/services/volume/json/admin/volume_services_client.py
@@ -0,0 +1,38 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(VolumesServicesClientJSON, self).__init__(auth_provider)
+        self.service = CONF.volume.catalog_type
+
+    def list_services(self, params=None):
+        url = 'os-services'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['services']
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/json/admin/volume_types_client.py
index c9c0582..65ecc67 100644
--- a/tempest/services/volume/json/admin/volume_types_client.py
+++ b/tempest/services/volume/json/admin/volume_types_client.py
@@ -18,6 +18,7 @@
 
 from tempest.common import rest_client
 from tempest import config
+from tempest import exceptions
 
 CONF = config.CONF
 
@@ -34,6 +35,26 @@
         self.build_interval = CONF.volume.build_interval
         self.build_timeout = CONF.volume.build_timeout
 
+    def is_resource_deleted(self, resource):
+        # to use this method self.resource must be defined to respective value
+        # Resource is a dictionary containing resource id and type
+        # Resource : {"id" : resource_id
+        #             "type": resource_type}
+        try:
+            if resource['type'] == "volume-type":
+                self.get_volume_type(resource['id'])
+            elif resource['type'] == "encryption-type":
+                resp, body = self.get_encryption_type(resource['id'])
+                assert 200 == resp.status
+                if not body:
+                    return True
+            else:
+                msg = (" resource value is either not defined or incorrect.")
+                raise exceptions.UnprocessableEntity(msg)
+        except exceptions.NotFound:
+            return True
+        return False
+
     def list_volume_types(self, params=None):
         """List all the volume_types created."""
         url = 'types'
@@ -150,3 +171,7 @@
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         return resp, body['encryption']
+
+    def delete_encryption_type(self, vol_type_id):
+        """Delete the encryption type for the specified volume-type."""
+        return self.delete("/types/%s/encryption/provider" % str(vol_type_id))
diff --git a/tempest/services/volume/json/availability_zone_client.py b/tempest/services/volume/json/availability_zone_client.py
new file mode 100644
index 0000000..6839d3a
--- /dev/null
+++ b/tempest/services/volume/json/availability_zone_client.py
@@ -0,0 +1,34 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumeAvailabilityZoneClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(VolumeAvailabilityZoneClientJSON, self).__init__(
+            auth_provider)
+        self.service = CONF.volume.catalog_type
+
+    def get_availability_zone_list(self):
+        resp, body = self.get('os-availability-zone')
+        body = json.loads(body)
+        return resp, body['availabilityZoneInfo']
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index e4d2e8d..b55a037 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -67,10 +67,10 @@
         body = json.loads(body)
         return resp, body['volume']
 
-    def create_volume(self, size, **kwargs):
+    def create_volume(self, size=None, **kwargs):
         """
         Creates a new Volume.
-        size(Required): Size of volume in GB.
+        size: Size of volume in GB.
         Following optional keyword arguments are accepted:
         display_name: Optional Volume Name.
         metadata: A dictionary of values to be used as metadata.
@@ -78,6 +78,10 @@
         snapshot_id: When specified the volume is created from this snapshot
         imageRef: When specified the volume is created from this image
         """
+        # for bug #1293885:
+        # If no size specified, read volume size from CONF
+        if size is None:
+            size = CONF.volume.volume_size
         post_body = {'size': size}
         post_body.update(kwargs)
         post_body = json.dumps({'volume': post_body})
diff --git a/tempest/services/volume/v2/json/volumes_client.py b/tempest/services/volume/v2/json/volumes_client.py
index 5bfa75f..df20a2a 100644
--- a/tempest/services/volume/v2/json/volumes_client.py
+++ b/tempest/services/volume/v2/json/volumes_client.py
@@ -68,10 +68,10 @@
         body = json.loads(body)
         return resp, body['volume']
 
-    def create_volume(self, size, **kwargs):
+    def create_volume(self, size=None, **kwargs):
         """
         Creates a new Volume.
-        size(Required): Size of volume in GB.
+        size: Size of volume in GB.
         Following optional keyword arguments are accepted:
         name: Optional Volume Name.
         metadata: A dictionary of values to be used as metadata.
@@ -79,6 +79,10 @@
         snapshot_id: When specified the volume is created from this snapshot
         imageRef: When specified the volume is created from this image
         """
+        # for bug #1293885:
+        # If no size specified, read volume size from CONF
+        if size is None:
+            size = CONF.volume.volume_size
         post_body = {'size': size}
         post_body.update(kwargs)
         post_body = json.dumps({'volume': post_body})
diff --git a/tempest/services/volume/v2/xml/volumes_client.py b/tempest/services/volume/v2/xml/volumes_client.py
index 0b8f47c..1fdaf19 100644
--- a/tempest/services/volume/v2/xml/volumes_client.py
+++ b/tempest/services/volume/v2/xml/volumes_client.py
@@ -19,9 +19,9 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
@@ -117,10 +117,10 @@
         body = self._check_if_bootable(body)
         return resp, body
 
-    def create_volume(self, size, **kwargs):
+    def create_volume(self, size=None, **kwargs):
         """Creates a new Volume.
 
-        :param size: Size of volume in GB. (Required)
+        :param size: Size of volume in GB.
         :param name: Optional Volume Name.
         :param metadata: An optional dictionary of values for metadata.
         :param volume_type: Optional Name of volume_type for the volume
@@ -129,6 +129,10 @@
         :param imageRef: When specified the volume is created from this
                          image
         """
+        # for bug #1293885:
+        # If no size specified, read volume size from CONF
+        if size is None:
+            size = CONF.volume.volume_size
         # NOTE(afazekas): it should use a volume namespace
         volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
 
diff --git a/tempest/services/volume/xml/admin/volume_hosts_client.py b/tempest/services/volume/xml/admin/volume_hosts_client.py
index e34b9f0..967c7c2 100644
--- a/tempest/services/volume/xml/admin/volume_hosts_client.py
+++ b/tempest/services/volume/xml/admin/volume_hosts_client.py
@@ -18,8 +18,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
diff --git a/tempest/services/volume/xml/admin/volume_quotas_client.py b/tempest/services/volume/xml/admin/volume_quotas_client.py
index d2eac34..a38410b 100644
--- a/tempest/services/volume/xml/admin/volume_quotas_client.py
+++ b/tempest/services/volume/xml/admin/volume_quotas_client.py
@@ -14,11 +14,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from ast import literal_eval
+import ast
 from lxml import etree
 
+from tempest.common import xml_utils as xml
 from tempest import config
-from tempest.services.compute.xml import common as xml
 from tempest.services.volume.json.admin import volume_quotas_client
 
 CONF = config.CONF
@@ -35,7 +35,7 @@
         quota = {}
         for k, v in q.items():
             try:
-                v = literal_eval(v)
+                v = ast.literal_eval(v)
             except (ValueError, SyntaxError):
                 pass
 
@@ -68,3 +68,7 @@
                               str(xml.Document(element)))
         body = xml.xml_to_json(etree.fromstring(body))
         return resp, self._format_quota(body)
+
+    def delete_quota_set(self, tenant_id):
+        """Delete the tenant's quota set."""
+        return self.delete('os-quota-sets/%s' % tenant_id)
diff --git a/tempest/services/volume/xml/admin/volume_services_client.py b/tempest/services/volume/xml/admin/volume_services_client.py
new file mode 100644
index 0000000..7bad16d
--- /dev/null
+++ b/tempest/services/volume/xml/admin/volume_services_client.py
@@ -0,0 +1,42 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common import rest_client
+from tempest.common import xml_utils
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientXML(rest_client.RestClient):
+    TYPE = "xml"
+
+    def __init__(self, auth_provider):
+        super(VolumesServicesClientXML, self).__init__(auth_provider)
+        self.service = CONF.volume.catalog_type
+
+    def list_services(self, params=None):
+        url = 'os-services'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        node = etree.fromstring(body)
+        body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
+        return resp, body
diff --git a/tempest/services/volume/xml/admin/volume_types_client.py b/tempest/services/volume/xml/admin/volume_types_client.py
index 1fa3e73..90897ee 100644
--- a/tempest/services/volume/xml/admin/volume_types_client.py
+++ b/tempest/services/volume/xml/admin/volume_types_client.py
@@ -18,9 +18,9 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
diff --git a/tempest/services/volume/xml/availability_zone_client.py b/tempest/services/volume/xml/availability_zone_client.py
new file mode 100644
index 0000000..e4a004a
--- /dev/null
+++ b/tempest/services/volume/xml/availability_zone_client.py
@@ -0,0 +1,39 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from lxml import etree
+
+from tempest.common import rest_client
+from tempest.common import xml_utils
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumeAvailabilityZoneClientXML(rest_client.RestClient):
+    TYPE = "xml"
+
+    def __init__(self, auth_provider):
+        super(VolumeAvailabilityZoneClientXML, self).__init__(
+            auth_provider)
+        self.service = CONF.volume.catalog_type
+
+    def _parse_array(self, node):
+        return [xml_utils.xml_to_json(x) for x in node]
+
+    def get_availability_zone_list(self):
+        resp, body = self.get('os-availability-zone')
+        availability_zone = self._parse_array(etree.fromstring(body))
+        return resp, availability_zone
diff --git a/tempest/services/volume/xml/extensions_client.py b/tempest/services/volume/xml/extensions_client.py
index 4861733..2986fcd 100644
--- a/tempest/services/volume/xml/extensions_client.py
+++ b/tempest/services/volume/xml/extensions_client.py
@@ -16,8 +16,8 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index 9ad86d2..4b1ba25 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -16,10 +16,10 @@
 from lxml import etree
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log as logging
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 8e886ce..9799e55 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -20,12 +20,17 @@
 from xml.sax import saxutils
 
 from tempest.common import rest_client
+from tempest.common import xml_utils as common
 from tempest import config
 from tempest import exceptions
-from tempest.services.compute.xml import common
 
 CONF = config.CONF
 
+VOLUME_NS_BASE = 'http://docs.openstack.org/volume/ext/'
+VOLUME_HOST_NS = VOLUME_NS_BASE + 'volume_host_attribute/api/v1'
+VOLUME_MIG_STATUS_NS = VOLUME_NS_BASE + 'volume_mig_status_attribute/api/v1'
+VOLUMES_TENANT_NS = VOLUME_NS_BASE + 'volume_tenant_attribute/api/v1'
+
 
 class VolumesClientXML(rest_client.RestClient):
     """
@@ -39,6 +44,23 @@
         self.build_interval = CONF.compute.build_interval
         self.build_timeout = CONF.compute.build_timeout
 
+    def _translate_attributes_to_json(self, volume):
+        volume_host_attr = '{' + VOLUME_HOST_NS + '}host'
+        volume_mig_stat_attr = '{' + VOLUME_MIG_STATUS_NS + '}migstat'
+        volume_mig_name_attr = '{' + VOLUME_MIG_STATUS_NS + '}name_id'
+        volume_tenant_id_attr = '{' + VOLUMES_TENANT_NS + '}tenant_id'
+        if volume_host_attr in volume:
+            volume['os-vol-host-attr:host'] = volume.pop(volume_host_attr)
+        if volume_mig_stat_attr in volume:
+            volume['os-vol-mig-status-attr:migstat'] = volume.pop(
+                volume_mig_stat_attr)
+        if volume_mig_name_attr in volume:
+            volume['os-vol-mig-status-attr:name_id'] = volume.pop(
+                volume_mig_name_attr)
+        if volume_tenant_id_attr in volume:
+            volume['os-vol-tenant-attr:tenant_id'] = volume.pop(
+                volume_tenant_id_attr)
+
     def _parse_volume(self, body):
         vol = dict((attr, body.get(attr)) for attr in body.keys())
 
@@ -52,6 +74,8 @@
                                        child.getchildren())
             else:
                 vol[tag] = common.xml_to_json(child)
+        self._translate_attributes_to_json(vol)
+        self._check_if_bootable(vol)
         return vol
 
     def get_attachment_from_volume(self, volume):
@@ -90,8 +114,6 @@
         volumes = []
         if body is not None:
             volumes += [self._parse_volume(vol) for vol in list(body)]
-        for v in volumes:
-            v = self._check_if_bootable(v)
         return resp, volumes
 
     def list_volumes_with_detail(self, params=None):
@@ -106,8 +128,6 @@
         volumes = []
         if body is not None:
             volumes += [self._parse_volume(vol) for vol in list(body)]
-        for v in volumes:
-            v = self._check_if_bootable(v)
         return resp, volumes
 
     def get_volume(self, volume_id):
@@ -115,13 +135,12 @@
         url = "volumes/%s" % str(volume_id)
         resp, body = self.get(url)
         body = self._parse_volume(etree.fromstring(body))
-        body = self._check_if_bootable(body)
         return resp, body
 
-    def create_volume(self, size, **kwargs):
+    def create_volume(self, size=None, **kwargs):
         """Creates a new Volume.
 
-        :param size: Size of volume in GB. (Required)
+        :param size: Size of volume in GB.
         :param display_name: Optional Volume Name.
         :param metadata: An optional dictionary of values for metadata.
         :param volume_type: Optional Name of volume_type for the volume
@@ -130,6 +149,10 @@
         :param imageRef: When specified the volume is created from this
                          image
         """
+        # for bug #1293885:
+        # If no size specified, read volume size from CONF
+        if size is None:
+            size = CONF.volume.volume_size
         # NOTE(afazekas): it should use a volume namespace
         volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
 
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index b56f96b..0a63679 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -34,14 +34,14 @@
 In order to use this discovery you have to be in the tempest root directory
 and execute the following:
 
-	tempest/stress/run_stress.py -a -d 30
+	run-tempest-stress -a -d 30
 
 Running the sample test
 -----------------------
 
-To test installation, do the following (from the tempest/stress directory):
+To test installation, do the following:
 
-	./run_stress.py -t etc/server-create-destroy-test.json -d 30
+	run-tempest-stress -t tempest/stress/etc/server-create-destroy-test.json -d 30
 
 This sample test tries to create a few VMs and kill a few VMs.
 
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index c330165..478cd07 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -32,8 +32,6 @@
                                 stderr=subprocess.PIPE)
         proc.wait()
         success = proc.returncode == 0
-        self.logger.info("%s(%s): %s", self.server_id, self.floating['ip'],
-                         "pong!" if success else "no pong :(")
         return success
 
     def tcp_connect_scan(self, addr, port):
@@ -58,11 +56,17 @@
             raise RuntimeError("Cannot connect to the ssh port.")
 
     def check_icmp_echo(self):
+        self.logger.info("%s(%s): Pinging..",
+                         self.server_id, self.floating['ip'])
+
         def func():
             return self.ping_ip_address(self.floating['ip'])
         if not tempest.test.call_until_true(func, self.check_timeout,
                                             self.check_interval):
-            raise RuntimeError("Cannot ping the machine.")
+            raise RuntimeError("%s(%s): Cannot ping the machine.",
+                               self.server_id, self.floating['ip'])
+        self.logger.info("%s(%s): pong :)",
+                         self.server_id, self.floating['ip'])
 
     def _create_vm(self):
         self.name = name = data_utils.rand_name("instance")
@@ -170,6 +174,8 @@
             self._create_vm()
         if self.reboot:
             self.manager.servers_client.reboot(self.server_id, 'HARD')
+            self.manager.servers_client.wait_for_server_status(self.server_id,
+                                                               'ACTIVE')
 
         self.run_core()
 
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
new file mode 100644
index 0000000..1bc3b06
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -0,0 +1,232 @@
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common.utils import data_utils
+from tempest.common.utils.linux import remote_client
+from tempest import config
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+import re
+CONF = config.CONF
+
+
+class VolumeVerifyStress(stressaction.StressAction):
+
+    def _create_keypair(self):
+        keyname = data_utils.rand_name("key")
+        resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
+        assert(resp.status == 200)
+
+    def _delete_keypair(self):
+        resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
+        assert(resp.status == 202)
+
+    def _create_vm(self):
+        self.name = name = data_utils.rand_name("instance")
+        servers_client = self.manager.servers_client
+        self.logger.info("creating %s" % name)
+        vm_args = self.vm_extra_args.copy()
+        vm_args['security_groups'] = [self.sec_grp]
+        vm_args['key_name'] = self.key['name']
+        resp, server = servers_client.create_server(name, self.image,
+                                                    self.flavor,
+                                                    **vm_args)
+        self.server_id = server['id']
+        assert(resp.status == 202)
+        self.manager.servers_client.wait_for_server_status(self.server_id,
+                                                           'ACTIVE')
+
+    def _destroy_vm(self):
+        self.logger.info("deleting server: %s" % self.server_id)
+        resp, _ = self.manager.servers_client.delete_server(self.server_id)
+        assert(resp.status == 204)  # It cannot be 204 if I had to wait..
+        self.manager.servers_client.wait_for_server_termination(self.server_id)
+        self.logger.info("deleted server: %s" % self.server_id)
+
+    def _create_sec_group(self):
+        sec_grp_cli = self.manager.security_groups_client
+        s_name = data_utils.rand_name('sec_grp-')
+        s_description = data_utils.rand_name('desc-')
+        _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+                                                            s_description)
+        create_rule = sec_grp_cli.create_security_group_rule
+        create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+        create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+
+    def _destroy_sec_grp(self):
+        sec_grp_cli = self.manager.security_groups_client
+        sec_grp_cli.delete_security_group(self.sec_grp['id'])
+
+    def _create_floating_ip(self):
+        floating_cli = self.manager.floating_ips_client
+        _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+    def _destroy_floating_ip(self):
+        cli = self.manager.floating_ips_client
+        cli.delete_floating_ip(self.floating['id'])
+        cli.wait_for_resource_deletion(self.floating['id'])
+        self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+    def _create_volume(self):
+        name = data_utils.rand_name("volume")
+        self.logger.info("creating volume: %s" % name)
+        volumes_client = self.manager.volumes_client
+        resp, self.volume = volumes_client.create_volume(size=1,
+                                                         display_name=
+                                                         name)
+        assert(resp.status == 200)
+        volumes_client.wait_for_volume_status(self.volume['id'],
+                                              'available')
+        self.logger.info("created volume: %s" % self.volume['id'])
+
+    def _delete_volume(self):
+        self.logger.info("deleting volume: %s" % self.volume['id'])
+        volumes_client = self.manager.volumes_client
+        resp, _ = volumes_client.delete_volume(self.volume['id'])
+        assert(resp.status == 202)
+        volumes_client.wait_for_resource_deletion(self.volume['id'])
+        self.logger.info("deleted volume: %s" % self.volume['id'])
+
+    def _wait_disassociate(self):
+        cli = self.manager.floating_ips_client
+
+        def func():
+            _, floating = cli.get_floating_ip_details(self.floating['id'])
+            return floating['instance_id'] is None
+
+        if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
+                                            CONF.compute.build_interval):
+            raise RuntimeError("IP disassociate timeout!")
+
+    def new_server_ops(self):
+        self._create_vm()
+        cli = self.manager.floating_ips_client
+        cli.associate_floating_ip_to_server(self.floating['ip'],
+                                            self.server_id)
+        if self.ssh_test_before_attach and self.enable_ssh_verify:
+            self.logger.info("Scanning for block devices via ssh on %s"
+                             % self.server_id)
+            self.part_wait(self.detach_match_count)
+
+    def setUp(self, **kwargs):
+        """Note able configuration combinations:
+            Closest options to the test_stamp_pattern:
+                new_server = True
+                new_volume = True
+                enable_ssh_verify = True
+                ssh_test_before_attach = False
+            Just attaching:
+                new_server = False
+                new_volume = False
+                enable_ssh_verify = True
+                ssh_test_before_attach = True
+            Mostly API load by repeated attachment:
+                new_server = False
+                new_volume = False
+                enable_ssh_verify = False
+                ssh_test_before_attach = False
+            Minimal Nova load, but cinder load not decreased:
+                new_server = False
+                new_volume = True
+                enable_ssh_verify = True
+                ssh_test_before_attach = True
+        """
+        self.image = CONF.compute.image_ref
+        self.flavor = CONF.compute.flavor_ref
+        self.vm_extra_args = kwargs.get('vm_extra_args', {})
+        self.floating_pool = kwargs.get('floating_pool', None)
+        self.new_volume = kwargs.get('new_volume', True)
+        self.new_server = kwargs.get('new_server', False)
+        self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
+        self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
+                                                 False)
+        self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
+        self.detach_match_count = kwargs.get('detach_match_count', 1)
+        self.attach_match_count = kwargs.get('attach_match_count', 2)
+        self.part_name = kwargs.get('part_name', '/dev/vdc')
+
+        self._create_floating_ip()
+        self._create_sec_group()
+        self._create_keypair()
+        private_key = self.key['private_key']
+        username = CONF.compute.image_ssh_user
+        self.remote_client = remote_client.RemoteClient(self.floating['ip'],
+                                                        username,
+                                                        pkey=private_key)
+        if not self.new_volume:
+            self._create_volume()
+        if not self.new_server:
+            self.new_server_ops()
+
+    # now we just test is number of partition increased or decrised
+    def part_wait(self, num_match):
+        def _part_state():
+            self.partitions = self.remote_client.get_partitions().split('\n')
+            matching = 0
+            for part_line in self.partitions[1:]:
+                if self.part_line_re.match(part_line):
+                    matching += 1
+            return matching == num_match
+        if tempest.test.call_until_true(_part_state,
+                                        CONF.compute.build_timeout,
+                                        CONF.compute.build_interval):
+            return
+        else:
+            raise RuntimeError("Unexpected partitions: %s",
+                               str(self.partitions))
+
+    def run(self):
+        if self.new_server:
+            self.new_server_ops()
+        if self.new_volume:
+            self._create_volume()
+        servers_client = self.manager.servers_client
+        self.logger.info("attach volume (%s) to vm %s" %
+                        (self.volume['id'], self.server_id))
+        resp, body = servers_client.attach_volume(self.server_id,
+                                                  self.volume['id'],
+                                                  self.part_name)
+        assert(resp.status == 200)
+        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                           'in-use')
+        if self.enable_ssh_verify:
+            self.logger.info("Scanning for new block device on %s"
+                             % self.server_id)
+            self.part_wait(self.attach_match_count)
+
+        resp, body = servers_client.detach_volume(self.server_id,
+                                                  self.volume['id'])
+        assert(resp.status == 202)
+        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                           'available')
+        if self.enable_ssh_verify:
+            self.logger.info("Scanning for block device disapperance on %s"
+                             % self.server_id)
+            self.part_wait(self.detach_match_count)
+        if self.new_volume:
+            self._delete_volume()
+        if self.new_server:
+            self._destroy_vm()
+
+    def tearDown(self):
+        cli = self.manager.floating_ips_client
+        cli.disassociate_floating_ip_from_server(self.floating['ip'],
+                                                 self.server_id)
+        self._wait_disassociate()
+        if not self.new_server:
+            self._destroy_vm()
+        self._delete_keypair()
+        self._destroy_floating_ip()
+        self._destroy_sec_grp()
+        if not self.new_volume:
+            self._delete_volume()
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 9660081..642108a 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -19,6 +19,7 @@
 
 from six import moves
 
+from tempest import auth
 from tempest import clients
 from tempest.common import ssh
 from tempest.common.utils import data_utils
@@ -80,17 +81,23 @@
     return ret
 
 
-def sigchld_handler(signal, frame):
+def sigchld_handler(signalnum, frame):
     """
     Signal handler (only active if stop_on_error is True).
     """
-    terminate_all_processes()
+    for process in processes:
+        if (not process['process'].is_alive() and
+                process['process'].exitcode != 0):
+            signal.signal(signalnum, signal.SIG_DFL)
+            terminate_all_processes()
+            break
 
 
 def terminate_all_processes(check_interval=20):
     """
     Goes through the process list and terminates all child processes.
     """
+    LOG.info("Stopping all processes.")
     for process in processes:
         if process['process'].is_alive():
             try:
@@ -141,9 +148,10 @@
                                             password,
                                             tenant['id'],
                                             "email")
-                manager = clients.Manager(username=username,
-                                          password="pass",
-                                          tenant_name=tenant_name)
+                creds = auth.get_credentials(username=username,
+                                             password=password,
+                                             tenant_name=tenant_name)
+                manager = clients.Manager(credentials=creds)
 
             test_obj = importutils.import_class(test['action'])
             test_run = test_obj(manager, max_runs, stop_on_error)
@@ -174,34 +182,39 @@
         signal.signal(signal.SIGCHLD, sigchld_handler)
     end_time = time.time() + duration
     had_errors = False
-    while True:
-        if max_runs is None:
-            remaining = end_time - time.time()
-            if remaining <= 0:
-                break
-        else:
-            remaining = log_check_interval
-            all_proc_term = True
-            for process in processes:
-                if process['process'].is_alive():
-                    all_proc_term = False
+    try:
+        while True:
+            if max_runs is None:
+                remaining = end_time - time.time()
+                if remaining <= 0:
                     break
-            if all_proc_term:
-                break
-
-        time.sleep(min(remaining, log_check_interval))
-        if stop_on_error:
-            for process in processes:
-                if process['statistic']['fails'] > 0:
+            else:
+                remaining = log_check_interval
+                all_proc_term = True
+                for process in processes:
+                    if process['process'].is_alive():
+                        all_proc_term = False
+                        break
+                if all_proc_term:
                     break
 
-        if not logfiles:
-            continue
-        if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
-                              stop_on_error):
-            had_errors = True
-            break
+            time.sleep(min(remaining, log_check_interval))
+            if stop_on_error:
+                if any([True for proc in processes
+                        if proc['statistic']['fails'] > 0]):
+                    break
 
+            if not logfiles:
+                continue
+            if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
+                                  stop_on_error):
+                had_errors = True
+                break
+    except KeyboardInterrupt:
+        LOG.warning("Interrupted, going to print statistics and exit ...")
+
+    if stop_on_error:
+        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
     terminate_all_processes()
 
     sum_fails = 0
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
new file mode 100644
index 0000000..731f5ed
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -0,0 +1,11 @@
+[{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
+  "threads": 1,
+  "use_admin": false,
+  "use_isolated_tenants": false,
+  "kwargs": {"vm_extra_args": {},
+             "new_volume": true,
+             "new_server": false,
+             "ssh_test_before_attach": false,
+             "enable_ssh_verify": true}
+}
+]
diff --git a/tempest/test.py b/tempest/test.py
index abf42c0..748a98c 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -17,6 +17,7 @@
 import functools
 import json
 import os
+import re
 import sys
 import time
 import urllib
@@ -74,12 +75,16 @@
             try:
                 f(cls)
             except Exception as se:
+                etype, value, trace = sys.exc_info()
                 LOG.exception("setUpClass failed: %s" % se)
                 try:
                     cls.tearDownClass()
                 except Exception as te:
                     LOG.exception("tearDownClass failed: %s" % te)
-                raise se
+                try:
+                    raise etype(value), None, trace
+                finally:
+                    del trace  # for avoiding circular refs
 
     return decorator
 
@@ -93,6 +98,7 @@
     service_list = {
         'compute': CONF.service_available.nova,
         'image': CONF.service_available.glance,
+        'baremetal': CONF.service_available.ironic,
         'volume': CONF.service_available.cinder,
         'orchestration': CONF.service_available.heat,
         # NOTE(mtreinish) nova-network will provide networking functionality
@@ -301,26 +307,18 @@
             cls.__name__, network_resources=cls.network_resources)
 
         force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
-        if (CONF.compute.allow_tenant_isolation or
-            force_tenant_isolation):
+        if CONF.compute.allow_tenant_isolation or force_tenant_isolation:
             creds = cls.isolated_creds.get_primary_creds()
-            username, tenant_name, password = creds
             if getattr(cls, '_interface', None):
-                os = clients.Manager(username=username,
-                                     password=password,
-                                     tenant_name=tenant_name,
+                os = clients.Manager(credentials=creds,
                                      interface=cls._interface,
                                      service=cls._service)
             elif interface:
-                os = clients.Manager(username=username,
-                                     password=password,
-                                     tenant_name=tenant_name,
+                os = clients.Manager(credentials=creds,
                                      interface=interface,
                                      service=cls._service)
             else:
-                os = clients.Manager(username=username,
-                                     password=password,
-                                     tenant_name=tenant_name,
+                os = clients.Manager(credentials=creds,
                                      service=cls._service)
         else:
             if getattr(cls, '_interface', None):
@@ -581,6 +579,24 @@
         return None
 
 
+def SimpleNegativeAutoTest(klass):
+    """
+    This decorator registers a test function on basis of the class name.
+    """
+    @attr(type=['negative', 'gate'])
+    def generic_test(self):
+        self.execute(self._schema_file)
+
+    cn = klass.__name__
+    cn = cn.replace('JSON', '')
+    cn = cn.replace('Test', '')
+    # NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
+    lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
+    func_name = 'test_%s' % lower_cn
+    setattr(klass, func_name, generic_test)
+    return klass
+
+
 def call_until_true(func, duration, sleep_for):
     """
     Call the given function until it returns True (and return True) or
diff --git a/tempest/tests/base.py b/tempest/tests/base.py
index 15e4311..f4df3b9 100644
--- a/tempest/tests/base.py
+++ b/tempest/tests/base.py
@@ -12,28 +12,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import os
-
-import fixtures
 import mock
-import testtools
 
-from tempest.openstack.common.fixture import moxstubout
+from oslotest import base
+from oslotest import moxstubout
 
 
-class TestCase(testtools.TestCase):
+class TestCase(base.BaseTestCase):
 
     def setUp(self):
         super(TestCase, self).setUp()
-        if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
-                os.environ.get('OS_STDOUT_CAPTURE') == '1'):
-            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
-            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
-        if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
-                os.environ.get('OS_STDERR_CAPTURE') == '1'):
-            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
-            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
-
         mox_fixture = self.useFixture(moxstubout.MoxStubout())
         self.mox = mox_fixture.mox
         self.stubs = mox_fixture.stubs
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/tests/cli/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/tests/cli/__init__.py
diff --git a/tempest/tests/cli/test_output_parser.py b/tempest/tests/cli/test_output_parser.py
new file mode 100644
index 0000000..7ad270c
--- /dev/null
+++ b/tempest/tests/cli/test_output_parser.py
@@ -0,0 +1,177 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from tempest.cli import output_parser
+from tempest import exceptions
+from tempest.tests import base
+
+
+class TestOutputParser(base.TestCase):
+    OUTPUT_LINES = """
++----+------+---------+
+| ID | Name | Status  |
++----+------+---------+
+| 11 | foo  | BUILD   |
+| 21 | bar  | ERROR   |
+| 31 | bee  | None    |
++----+------+---------+
+"""
+    OUTPUT_LINES2 = """
++----+-------+---------+
+| ID | Name2 | Status2 |
++----+-------+---------+
+| 41 | aaa   | SSSSS   |
+| 51 | bbb   | TTTTT   |
+| 61 | ccc   | AAAAA   |
++----+-------+---------+
+"""
+
+    EXPECTED_TABLE = {'headers': ['ID', 'Name', 'Status'],
+                      'values': [['11', 'foo', 'BUILD'],
+                                 ['21', 'bar', 'ERROR'],
+                                 ['31', 'bee', 'None']]}
+    EXPECTED_TABLE2 = {'headers': ['ID', 'Name2', 'Status2'],
+                       'values': [['41', 'aaa', 'SSSSS'],
+                                  ['51', 'bbb', 'TTTTT'],
+                                  ['61', 'ccc', 'AAAAA']]}
+
+    def test_table_with_normal_values(self):
+        actual = output_parser.table(self.OUTPUT_LINES)
+        self.assertIsInstance(actual, dict)
+        self.assertEqual(self.EXPECTED_TABLE, actual)
+
+    def test_table_with_list(self):
+        output_lines = self.OUTPUT_LINES.split('\n')
+        actual = output_parser.table(output_lines)
+        self.assertIsInstance(actual, dict)
+        self.assertEqual(self.EXPECTED_TABLE, actual)
+
+    def test_table_with_invalid_line(self):
+        output_lines = self.OUTPUT_LINES + "aaaa"
+        actual = output_parser.table(output_lines)
+        self.assertIsInstance(actual, dict)
+        self.assertEqual(self.EXPECTED_TABLE, actual)
+
+    def test_tables_with_normal_values(self):
+        output_lines = 'test' + self.OUTPUT_LINES +\
+                       'test2' + self.OUTPUT_LINES2
+        expected = [{'headers': self.EXPECTED_TABLE['headers'],
+                     'label': 'test',
+                     'values': self.EXPECTED_TABLE['values']},
+                    {'headers': self.EXPECTED_TABLE2['headers'],
+                     'label': 'test2',
+                     'values': self.EXPECTED_TABLE2['values']}]
+        actual = output_parser.tables(output_lines)
+        self.assertIsInstance(actual, list)
+        self.assertEqual(expected, actual)
+
+    def test_tables_with_invalid_values(self):
+        output_lines = 'test' + self.OUTPUT_LINES +\
+                       'test2' + self.OUTPUT_LINES2 + '\n'
+        expected = [{'headers': self.EXPECTED_TABLE['headers'],
+                     'label': 'test',
+                     'values': self.EXPECTED_TABLE['values']},
+                    {'headers': self.EXPECTED_TABLE2['headers'],
+                     'label': 'test2',
+                     'values': self.EXPECTED_TABLE2['values']}]
+        actual = output_parser.tables(output_lines)
+        self.assertIsInstance(actual, list)
+        self.assertEqual(expected, actual)
+
+    def test_tables_with_invalid_line(self):
+        output_lines = 'test' + self.OUTPUT_LINES +\
+                       'test2' + self.OUTPUT_LINES2 +\
+                       '+----+-------+---------+'
+        expected = [{'headers': self.EXPECTED_TABLE['headers'],
+                     'label': 'test',
+                     'values': self.EXPECTED_TABLE['values']},
+                    {'headers': self.EXPECTED_TABLE2['headers'],
+                     'label': 'test2',
+                     'values': self.EXPECTED_TABLE2['values']}]
+
+        actual = output_parser.tables(output_lines)
+        self.assertIsInstance(actual, list)
+        self.assertEqual(expected, actual)
+
+    LISTING_OUTPUT = """
++----+
+| ID |
++----+
+| 11 |
+| 21 |
+| 31 |
++----+
+"""
+
+    def test_listing(self):
+        expected = [{'ID': '11'}, {'ID': '21'}, {'ID': '31'}]
+        actual = output_parser.listing(self.LISTING_OUTPUT)
+        self.assertIsInstance(actual, list)
+        self.assertEqual(expected, actual)
+
+    def test_details_multiple_with_invalid_line(self):
+        self.assertRaises(exceptions.InvalidStructure,
+                          output_parser.details_multiple,
+                          self.OUTPUT_LINES)
+
+    DETAILS_LINES1 = """First Table
++----------+--------+
+| Property | Value  |
++----------+--------+
+| foo      | BUILD  |
+| bar      | ERROR  |
+| bee      | None   |
++----------+--------+
+"""
+    DETAILS_LINES2 = """Second Table
++----------+--------+
+| Property | Value  |
++----------+--------+
+| aaa      | VVVVV  |
+| bbb      | WWWWW  |
+| ccc      | XXXXX  |
++----------+--------+
+"""
+
+    def test_details_with_normal_line_label_false(self):
+        expected = {'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'}
+        actual = output_parser.details(self.DETAILS_LINES1)
+        self.assertEqual(expected, actual)
+
+    def test_details_with_normal_line_label_true(self):
+        expected = {'__label': 'First Table',
+                    'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'}
+        actual = output_parser.details(self.DETAILS_LINES1, with_label=True)
+        self.assertEqual(expected, actual)
+
+    def test_details_multiple_with_normal_line_label_false(self):
+        expected = [{'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'},
+                    {'aaa': 'VVVVV', 'bbb': 'WWWWW', 'ccc': 'XXXXX'}]
+        actual = output_parser.details_multiple(self.DETAILS_LINES1 +
+                                                self.DETAILS_LINES2)
+        self.assertIsInstance(actual, list)
+        self.assertEqual(expected, actual)
+
+    def test_details_multiple_with_normal_line_label_true(self):
+        expected = [{'__label': 'First Table',
+                     'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'},
+                    {'__label': 'Second Table',
+                     'aaa': 'VVVVV', 'bbb': 'WWWWW', 'ccc': 'XXXXX'}]
+        actual = output_parser.details_multiple(self.DETAILS_LINES1 +
+                                                self.DETAILS_LINES2,
+                                                with_label=True)
+        self.assertIsInstance(actual, list)
+        self.assertEqual(expected, actual)
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/tests/cmd/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/tests/cmd/__init__.py
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
new file mode 100644
index 0000000..429f56f
--- /dev/null
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -0,0 +1,397 @@
+# Copyright 2014 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+import mock
+
+from tempest.cmd import verify_tempest_config
+from tempest import config
+from tempest.openstack.common.fixture import mockpatch
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestGetAPIVersions(base.TestCase):
+
+    def test_url_grab_versioned_nova_nossl(self):
+        base_url = 'http://127.0.0.1:8774/v2/'
+        endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+        self.assertEqual('http://127.0.0.1:8774', endpoint)
+
+    def test_url_grab_versioned_nova_ssl(self):
+        base_url = 'https://127.0.0.1:8774/v3/'
+        endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+        self.assertEqual('https://127.0.0.1:8774', endpoint)
+
+
+class TestDiscovery(base.TestCase):
+
+    def setUp(self):
+        super(TestDiscovery, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def test_get_keystone_api_versions(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
+        self.assertIn('v2.0', versions)
+        self.assertIn('v3.0', versions)
+
+    def test_get_cinder_api_versions(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
+        self.assertIn('v1.0', versions)
+        self.assertIn('v2.0', versions)
+
+    def test_get_nova_versions(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
+        self.assertIn('v2.0', versions)
+        self.assertIn('v3.0', versions)
+
+    def test_verify_keystone_api_versions_no_v3(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v3',
+                                           'identity_feature_enabled',
+                                           False, True)
+
+    def test_verify_keystone_api_versions_no_v2(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v2',
+                                           'identity_feature_enabled',
+                                           False, True)
+
+    def test_verify_cinder_api_versions_no_v2(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v1.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
+                                           False, True)
+
+    def test_verify_cinder_api_versions_no_v1(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v2.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
+                                           False, True)
+
+    def test_verify_nova_versions(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v2.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_nova_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v3', 'compute_feature_enabled',
+                                           False, True)
+
+    def test_verify_glance_version_no_v2_with_v1_1(self):
+        def fake_get_versions():
+            return (None, ['v1.1'])
+        fake_os = mock.MagicMock()
+        fake_os.image_client.get_versions = fake_get_versions
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_glance_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+                                           False, True)
+
+    def test_verify_glance_version_no_v2_with_v1_0(self):
+        def fake_get_versions():
+            return (None, ['v1.0'])
+        fake_os = mock.MagicMock()
+        fake_os.image_client.get_versions = fake_get_versions
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_glance_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+                                           False, True)
+
+    def test_verify_glance_version_no_v1(self):
+        def fake_get_versions():
+            return (None, ['v2.0'])
+        fake_os = mock.MagicMock()
+        fake_os.image_client.get_versions = fake_get_versions
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_glance_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
+                                           False, True)
+
+    def test_verify_extensions_neutron(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'alias': 'fake1'},
+                                          {'alias': 'fake2'},
+                                          {'alias': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.network_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'neutron', {})
+        self.assertIn('neutron', results)
+        self.assertIn('fake1', results['neutron'])
+        self.assertTrue(results['neutron']['fake1'])
+        self.assertIn('fake2', results['neutron'])
+        self.assertTrue(results['neutron']['fake2'])
+        self.assertIn('fake3', results['neutron'])
+        self.assertFalse(results['neutron']['fake3'])
+        self.assertIn('not_fake', results['neutron'])
+        self.assertFalse(results['neutron']['not_fake'])
+
+    def test_verify_extensions_neutron_all(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'alias': 'fake1'},
+                                          {'alias': 'fake2'},
+                                          {'alias': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.network_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'neutron', {})
+        self.assertIn('neutron', results)
+        self.assertIn('extensions', results['neutron'])
+        self.assertEqual(['fake1', 'fake2', 'not_fake'],
+                         results['neutron']['extensions'])
+
+    def test_verify_extensions_cinder(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'name': 'fake1'},
+                                          {'name': 'fake2'},
+                                          {'name': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'cinder', {})
+        self.assertIn('cinder', results)
+        self.assertIn('fake1', results['cinder'])
+        self.assertTrue(results['cinder']['fake1'])
+        self.assertIn('fake2', results['cinder'])
+        self.assertTrue(results['cinder']['fake2'])
+        self.assertIn('fake3', results['cinder'])
+        self.assertFalse(results['cinder']['fake3'])
+        self.assertIn('not_fake', results['cinder'])
+        self.assertFalse(results['cinder']['not_fake'])
+
+    def test_verify_extensions_cinder_all(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'name': 'fake1'},
+                                          {'name': 'fake2'},
+                                          {'name': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'cinder', {})
+        self.assertIn('cinder', results)
+        self.assertIn('extensions', results['cinder'])
+        self.assertEqual(['fake1', 'fake2', 'not_fake'],
+                         results['cinder']['extensions'])
+
+    def test_verify_extensions_nova(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'alias': 'fake1'},
+                                          {'alias': 'fake2'},
+                                          {'alias': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.extensions_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'nova', {})
+        self.assertIn('nova', results)
+        self.assertIn('fake1', results['nova'])
+        self.assertTrue(results['nova']['fake1'])
+        self.assertIn('fake2', results['nova'])
+        self.assertTrue(results['nova']['fake2'])
+        self.assertIn('fake3', results['nova'])
+        self.assertFalse(results['nova']['fake3'])
+        self.assertIn('not_fake', results['nova'])
+        self.assertFalse(results['nova']['not_fake'])
+
+    def test_verify_extensions_nova_all(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'alias': 'fake1'},
+                                          {'alias': 'fake2'},
+                                          {'alias': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.extensions_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'nova', {})
+        self.assertIn('nova', results)
+        self.assertIn('extensions', results['nova'])
+        self.assertEqual(['fake1', 'fake2', 'not_fake'],
+                         results['nova']['extensions'])
+
+    def test_verify_extensions_nova_v3(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'alias': 'fake1'},
+                                          {'alias': 'fake2'},
+                                          {'alias': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'nova_v3', {})
+        self.assertIn('nova_v3', results)
+        self.assertIn('fake1', results['nova_v3'])
+        self.assertTrue(results['nova_v3']['fake1'])
+        self.assertIn('fake2', results['nova_v3'])
+        self.assertTrue(results['nova_v3']['fake2'])
+        self.assertIn('fake3', results['nova_v3'])
+        self.assertFalse(results['nova_v3']['fake3'])
+        self.assertIn('not_fake', results['nova_v3'])
+        self.assertFalse(results['nova_v3']['not_fake'])
+
+    def test_verify_extensions_nova_v3_all(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'alias': 'fake1'},
+                                          {'alias': 'fake2'},
+                                          {'alias': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'nova_v3', {})
+        self.assertIn('nova_v3', results)
+        self.assertIn('extensions', results['nova_v3'])
+        self.assertEqual(['fake1', 'fake2', 'not_fake'],
+                         results['nova_v3']['extensions'])
+
+    def test_verify_extensions_swift(self):
+        def fake_list_extensions():
+            return (None, {'fake1': 'metadata',
+                           'fake2': 'metadata',
+                           'not_fake': 'metadata',
+                           'swift': 'metadata'})
+        fake_os = mock.MagicMock()
+        fake_os.account_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
+        self.assertIn('swift', results)
+        self.assertIn('fake1', results['swift'])
+        self.assertTrue(results['swift']['fake1'])
+        self.assertIn('fake2', results['swift'])
+        self.assertTrue(results['swift']['fake2'])
+        self.assertIn('fake3', results['swift'])
+        self.assertFalse(results['swift']['fake3'])
+        self.assertIn('not_fake', results['swift'])
+        self.assertFalse(results['swift']['not_fake'])
+
+    def test_verify_extensions_swift_all(self):
+        def fake_list_extensions():
+            return (None, {'fake1': 'metadata',
+                           'fake2': 'metadata',
+                           'not_fake': 'metadata',
+                           'swift': 'metadata'})
+        fake_os = mock.MagicMock()
+        fake_os.account_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'swift', {})
+        self.assertIn('swift', results)
+        self.assertIn('extensions', results['swift'])
+        self.assertEqual(['not_fake', 'fake1', 'fake2'],
+                         results['swift']['extensions'])
diff --git a/tempest/tests/common/test_debug.py b/tempest/tests/common/test_debug.py
new file mode 100644
index 0000000..8a880f2
--- /dev/null
+++ b/tempest/tests/common/test_debug.py
@@ -0,0 +1,122 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from tempest.common import debug
+from tempest import config
+from tempest.openstack.common.fixture import mockpatch
+from tempest import test
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestDebug(base.TestCase):
+
+    def setUp(self):
+        super(TestDebug, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+        common_pre = 'tempest.common.commands'
+        self.ip_addr_raw_mock = self.patch(common_pre + '.ip_addr_raw')
+        self.ip_route_raw_mock = self.patch(common_pre + '.ip_route_raw')
+        self.iptables_raw_mock = self.patch(common_pre + '.iptables_raw')
+        self.ip_ns_list_mock = self.patch(common_pre + '.ip_ns_list')
+        self.ip_ns_addr_mock = self.patch(common_pre + '.ip_ns_addr')
+        self.ip_ns_route_mock = self.patch(common_pre + '.ip_ns_route')
+        self.iptables_ns_mock = self.patch(common_pre + '.iptables_ns')
+        self.ovs_db_dump_mock = self.patch(common_pre + '.ovs_db_dump')
+
+        self.log_mock = self.patch('tempest.common.debug.LOG')
+
+    def test_log_ip_ns_debug_disabled(self):
+        self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+                                              'enable', False))
+        debug.log_ip_ns()
+        self.assertFalse(self.ip_addr_raw_mock.called)
+        self.assertFalse(self.log_mock.info.called)
+
+    def test_log_ip_ns_debug_enabled(self):
+        self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+                                              'enable', True))
+
+        self.ip_ns_list_mock.return_value = [1, 2]
+
+        debug.log_ip_ns()
+        self.ip_addr_raw_mock.assert_called_with()
+        self.assertTrue(self.log_mock.info.called)
+        self.ip_route_raw_mock.assert_called_with()
+        self.assertEqual(len(debug.TABLES), self.iptables_raw_mock.call_count)
+        for table in debug.TABLES:
+            self.assertIn(mock.call(table),
+                          self.iptables_raw_mock.call_args_list)
+
+        self.ip_ns_list_mock.assert_called_with()
+        self.assertEqual(len(self.ip_ns_list_mock.return_value),
+                         self.ip_ns_addr_mock.call_count)
+        self.assertEqual(len(self.ip_ns_list_mock.return_value),
+                         self.ip_ns_route_mock.call_count)
+        for ns in self.ip_ns_list_mock.return_value:
+            self.assertIn(mock.call(ns),
+                          self.ip_ns_addr_mock.call_args_list)
+            self.assertIn(mock.call(ns),
+                          self.ip_ns_route_mock.call_args_list)
+
+        self.assertEqual(len(debug.TABLES) *
+                         len(self.ip_ns_list_mock.return_value),
+                         self.iptables_ns_mock.call_count)
+        for ns in self.ip_ns_list_mock.return_value:
+            for table in debug.TABLES:
+                self.assertIn(mock.call(ns, table),
+                              self.iptables_ns_mock.call_args_list)
+
+    def test_log_ovs_db_debug_disabled(self):
+        self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+                                              'enable', False))
+        self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
+                                              'neutron', False))
+        debug.log_ovs_db()
+        self.assertFalse(self.ovs_db_dump_mock.called)
+
+        self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+                                              'enable', True))
+        self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
+                                              'neutron', False))
+        debug.log_ovs_db()
+        self.assertFalse(self.ovs_db_dump_mock.called)
+
+        self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+                                              'enable', False))
+        self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
+                                              'neutron', True))
+        debug.log_ovs_db()
+        self.assertFalse(self.ovs_db_dump_mock.called)
+
+    def test_log_ovs_db_debug_enabled(self):
+        self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+                                              'enable', True))
+        self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
+                                              'neutron', True))
+        debug.log_ovs_db()
+        self.ovs_db_dump_mock.assert_called_with()
+
+    def test_log_net_debug(self):
+        self.log_ip_ns_mock = self.patch('tempest.common.debug.log_ip_ns')
+        self.log_ovs_db_mock = self.patch('tempest.common.debug.log_ovs_db')
+
+        debug.log_net_debug()
+        self.log_ip_ns_mock.assert_called_with()
+        self.log_ovs_db_mock.assert_called_with()
diff --git a/tempest/tests/common/utils/test_file_utils.py b/tempest/tests/common/utils/test_file_utils.py
index 99ae033..605e82a 100644
--- a/tempest/tests/common/utils/test_file_utils.py
+++ b/tempest/tests/common/utils/test_file_utils.py
@@ -14,7 +14,6 @@
 #    under the License.
 
 import mock
-from mock import patch
 
 from tempest.common.utils import file_utils
 from tempest.tests import base
@@ -23,7 +22,7 @@
 class TestFileUtils(base.TestCase):
 
     def test_have_effective_read_path(self):
-        with patch('__builtin__.open', mock.mock_open(), create=True):
+        with mock.patch('__builtin__.open', mock.mock_open(), create=True):
             result = file_utils.have_effective_read_access('fake_path')
         self.assertTrue(result)
 
diff --git a/tempest/tests/common/utils/test_misc.py b/tempest/tests/common/utils/test_misc.py
index b8c6184..aee9805 100644
--- a/tempest/tests/common/utils/test_misc.py
+++ b/tempest/tests/common/utils/test_misc.py
@@ -50,3 +50,39 @@
         self.assertEqual(test, test2)
         test3 = TestBar()
         self.assertNotEqual(test, test3)
+
+    def test_find_test_caller_test_case(self):
+        # Calling it from here should give us the method we're in.
+        self.assertEqual('TestMisc:test_find_test_caller_test_case',
+                         misc.find_test_caller())
+
+    def test_find_test_caller_setup_self(self):
+        def setUp(self):
+            return misc.find_test_caller()
+        self.assertEqual('TestMisc:setUp', setUp(self))
+
+    def test_find_test_caller_setup_no_self(self):
+        def setUp():
+            return misc.find_test_caller()
+        self.assertEqual(':setUp', setUp())
+
+    def test_find_test_caller_setupclass_cls(self):
+        def setUpClass(cls):  # noqa
+            return misc.find_test_caller()
+        self.assertEqual('TestMisc:setUpClass', setUpClass(self.__class__))
+
+    def test_find_test_caller_teardown_self(self):
+        def tearDown(self):
+            return misc.find_test_caller()
+        self.assertEqual('TestMisc:tearDown', tearDown(self))
+
+    def test_find_test_caller_teardown_no_self(self):
+        def tearDown():
+            return misc.find_test_caller()
+        self.assertEqual(':tearDown', tearDown())
+
+    def test_find_test_caller_teardown_class(self):
+        def tearDownClass(cls):
+            return misc.find_test_caller()
+        self.assertEqual('TestMisc:tearDownClass',
+                         tearDownClass(self.__class__))
diff --git a/tempest/tests/fake_auth_provider.py b/tempest/tests/fake_auth_provider.py
index bc68d26..44c331e 100644
--- a/tempest/tests/fake_auth_provider.py
+++ b/tempest/tests/fake_auth_provider.py
@@ -13,6 +13,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.tests import fake_credentials
+
+
+def get_default_credentials(credential_type, fill_in=True):
+    return fake_credentials.FakeCredentials()
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+    return fake_credentials.FakeCredentials()
+
 
 class FakeAuthProvider(object):
 
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4676cbd..4bed0c2 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -45,6 +45,16 @@
             os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
         self.conf.set_default('lock_path',
                               str(os.environ.get('OS_TEST_LOCK_PATH')))
+        self.conf.set_default('auth_version', 'v2', group='identity')
+        for config_option in ['username', 'password', 'tenant_name']:
+            # Identity group items
+            for prefix in ['', 'alt_', 'admin_']:
+                self.conf.set_default(prefix + config_option,
+                                      'fake_' + config_option,
+                                      group='identity')
+            # Compute Admin group items
+            self.conf.set_default(config_option, 'fake_' + config_option,
+                                  group='compute-admin')
 
 
 class FakePrivate(config.TempestConfigPrivate):
diff --git a/tempest/tests/fake_credentials.py b/tempest/tests/fake_credentials.py
new file mode 100644
index 0000000..48f67d2
--- /dev/null
+++ b/tempest/tests/fake_credentials.py
@@ -0,0 +1,62 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import auth
+
+
+class FakeCredentials(auth.Credentials):
+
+    def is_valid(self):
+        return True
+
+
+class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
+
+    def __init__(self):
+        creds = dict(
+            username='fake_username',
+            password='fake_password',
+            tenant_name='fake_tenant_name'
+        )
+        super(FakeKeystoneV2Credentials, self).__init__(**creds)
+
+
+class FakeKeystoneV3Credentials(auth.KeystoneV3Credentials):
+    """
+    Fake credentials suitable for the Keystone Identity V3 API
+    """
+
+    def __init__(self):
+        creds = dict(
+            username='fake_username',
+            password='fake_password',
+            user_domain_name='fake_domain_name',
+            project_name='fake_tenant_name'
+        )
+        super(FakeKeystoneV3Credentials, self).__init__(**creds)
+
+
+class FakeKeystoneV3DomainCredentials(auth.KeystoneV3Credentials):
+    """
+    Fake credentials suitable for the Keystone Identity V3 API, with no scope
+    """
+
+    def __init__(self):
+        creds = dict(
+            username='fake_username',
+            password='fake_password',
+            user_domain_name='fake_domain_name'
+        )
+        super(FakeKeystoneV3DomainCredentials, self).__init__(**creds)
diff --git a/tempest/tests/fake_identity.py b/tempest/tests/fake_identity.py
index 058c9c2..1900fc9 100644
--- a/tempest/tests/fake_identity.py
+++ b/tempest/tests/fake_identity.py
@@ -113,7 +113,7 @@
         "expires_at": "2020-01-01T00:00:10.000123Z",
         "project": {
             "domain": {
-                "id": "fake_id",
+                "id": "fake_domain_id",
                 "name": "fake"
             },
             "id": "project_id",
@@ -121,7 +121,7 @@
         },
         "user": {
             "domain": {
-                "id": "domain_id",
+                "id": "fake_domain_id",
                 "name": "domain_name"
             },
             "id": "fake_user_id",
diff --git a/tempest/tests/negative/test_generate_json.py b/tempest/tests/negative/test_generate_json.py
deleted file mode 100644
index e09fcdf..0000000
--- a/tempest/tests/negative/test_generate_json.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014 Deutsche Telekom AG
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.generator import negative_generator
-import tempest.test
-
-
-class TestNegativeGenerator(tempest.test.BaseTestCase):
-
-    fake_input_str = {"type": "string",
-                      "minLength": 2,
-                      "maxLength": 8,
-                      'results': {'gen_number': 404}}
-
-    fake_input_int = {"type": "integer",
-                      "maximum": 255,
-                      "minimum": 1}
-
-    fake_input_obj = {"type": "object",
-                      "properties": {"minRam": {"type": "integer"},
-                                     "diskName": {"type": "string"},
-                                     "maxRam": {"type": "integer", }
-                                     }
-                      }
-
-    def setUp(self):
-        super(TestNegativeGenerator, self).setUp()
-        self.negative = negative_generator.NegativeTestGenerator()
-
-    def _validate_result(self, data):
-        self.assertTrue(isinstance(data, list))
-        for t in data:
-            self.assertTrue(isinstance(t, tuple))
-
-    def test_generate_invalid_string(self):
-        result = self.negative.generate(self.fake_input_str)
-        self._validate_result(result)
-
-    def test_generate_invalid_integer(self):
-        result = self.negative.generate(self.fake_input_int)
-        self._validate_result(result)
-
-    def test_generate_invalid_obj(self):
-        result = self.negative.generate(self.fake_input_obj)
-        self._validate_result(result)
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index f2ed999..c77faca 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -16,7 +16,9 @@
 import jsonschema
 import mock
 
-import tempest.common.generator.base_generator as base_generator
+from tempest.common.generator import base_generator
+from tempest.common.generator import negative_generator
+from tempest.common.generator import valid_generator
 from tempest.tests import base
 
 
@@ -79,3 +81,73 @@
         self.assertRaises(jsonschema.SchemaError,
                           self.generator.validate_schema,
                           self.invalid_json_schema_desc)
+
+
+class BaseNegativeGenerator(object):
+    types = ['string', 'integer', 'object']
+
+    fake_input_str = {"type": "string",
+                      "minLength": 2,
+                      "maxLength": 8,
+                      'results': {'gen_int': 404}}
+
+    fake_input_int = {"type": "integer",
+                      "maximum": 255,
+                      "minimum": 1}
+
+    fake_input_obj = {"type": "object",
+                      "properties": {"minRam": {"type": "integer"},
+                                     "diskName": {"type": "string"},
+                                     "maxRam": {"type": "integer", }
+                                     }
+                      }
+
+    unkown_type_schema = {
+        "type": "not_defined"
+    }
+
+    def _validate_result(self, data):
+        self.assertTrue(isinstance(data, list))
+        for t in data:
+            self.assertIsInstance(t, tuple)
+            self.assertEqual(3, len(t))
+            self.assertIsInstance(t[0], str)
+
+    def test_generate_string(self):
+        result = self.generator.generate(self.fake_input_str)
+        self._validate_result(result)
+
+    def test_generate_integer(self):
+        result = self.generator.generate(self.fake_input_int)
+        self._validate_result(result)
+
+    def test_generate_obj(self):
+        result = self.generator.generate(self.fake_input_obj)
+        self._validate_result(result)
+
+    def test_generator_mandatory_functions(self):
+        for data_type in self.types:
+            self.assertIn(data_type, self.generator.types_dict)
+
+    def test_generate_with_unknown_type(self):
+        self.assertRaises(TypeError, self.generator.generate,
+                          self.unkown_type_schema)
+
+
+class TestNegativeValidGenerator(base.TestCase, BaseNegativeGenerator):
+    def setUp(self):
+        super(TestNegativeValidGenerator, self).setUp()
+        self.generator = valid_generator.ValidTestGenerator()
+
+    def test_generate_valid(self):
+        result = self.generator.generate_valid(self.fake_input_obj)
+        self.assertIn("minRam", result)
+        self.assertIsInstance(result["minRam"], int)
+        self.assertIn("diskName", result)
+        self.assertIsInstance(result["diskName"], str)
+
+
+class TestNegativeNegativeGenerator(base.TestCase, BaseNegativeGenerator):
+    def setUp(self):
+        super(TestNegativeNegativeGenerator, self).setUp()
+        self.generator = negative_generator.NegativeTestGenerator()
diff --git a/tempest/tests/stress/test_stress.py b/tempest/tests/stress/test_stress.py
index c76abde..5a334c5 100644
--- a/tempest/tests/stress/test_stress.py
+++ b/tempest/tests/stress/test_stress.py
@@ -18,12 +18,12 @@
 
 import tempest.cli as cli
 from tempest.openstack.common import log as logging
-import tempest.test
+from tempest.tests import base
 
 LOG = logging.getLogger(__name__)
 
 
-class StressFrameworkTest(tempest.test.BaseTestCase):
+class StressFrameworkTest(base.TestCase):
     """Basic test for the stress test framework.
     """
 
@@ -51,5 +51,5 @@
         return proc.returncode
 
     def test_help_function(self):
-        result = self._cmd("python", "-m tempest.stress.run_stress -h")
+        result = self._cmd("python", "-m tempest.cmd.run_stress -h")
         self.assertEqual(0, result)
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 62c20e3..1dcddad 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -16,24 +16,23 @@
 import copy
 import datetime
 
+from oslotest import mockpatch
+
 from tempest import auth
 from tempest.common import http
 from tempest import config
 from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
 from tempest.tests import base
+from tempest.tests import fake_auth_provider
 from tempest.tests import fake_config
+from tempest.tests import fake_credentials
 from tempest.tests import fake_http
 from tempest.tests import fake_identity
 
 
 class BaseAuthTestsSetUp(base.TestCase):
     _auth_provider_class = None
-    credentials = {
-        'username': 'fake_user',
-        'password': 'fake_pwd',
-        'tenant_name': 'fake_tenant'
-    }
+    credentials = fake_credentials.FakeCredentials()
 
     def _auth(self, credentials, **params):
         """
@@ -47,6 +46,10 @@
         self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
         self.fake_http = fake_http.fake_httplib2(return_type=200)
         self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+        self.stubs.Set(auth, 'get_credentials',
+                       fake_auth_provider.get_credentials)
+        self.stubs.Set(auth, 'get_default_credentials',
+                       fake_auth_provider.get_default_credentials)
         self.auth_provider = self._auth(self.credentials)
 
 
@@ -58,12 +61,19 @@
     """
     _auth_provider_class = auth.AuthProvider
 
-    def test_check_credentials_is_dict(self):
-        self.assertTrue(self.auth_provider.check_credentials({}))
+    def test_check_credentials_class(self):
+        self.assertRaises(NotImplementedError,
+                          self.auth_provider.check_credentials,
+                          auth.Credentials())
 
     def test_check_credentials_bad_type(self):
         self.assertFalse(self.auth_provider.check_credentials([]))
 
+    def test_instantiate_with_dict(self):
+        # Dict credentials are only supported for backward compatibility
+        auth_provider = self._auth(credentials={})
+        self.assertIsInstance(auth_provider.credentials, auth.Credentials)
+
     def test_instantiate_with_bad_credentials_type(self):
         """
         Assure that credentials with bad type fail with TypeError
@@ -100,10 +110,15 @@
         self.assertIsNone(self.auth_provider.alt_part)
         self.assertIsNone(self.auth_provider.alt_auth_data)
 
+    def test_fill_credentials(self):
+        self.assertRaises(NotImplementedError,
+                          self.auth_provider.fill_credentials)
+
 
 class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
     _endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
     _auth_provider_class = auth.KeystoneV2AuthProvider
+    credentials = fake_credentials.FakeKeystoneV2Credentials()
 
     def setUp(self):
         super(TestKeystoneV2AuthProvider, self).setUp()
@@ -123,6 +138,13 @@
     def _get_token_from_fake_identity(self):
         return fake_identity.TOKEN
 
+    def _get_from_fake_identity(self, attr):
+        access = fake_identity.IDENTITY_V2_RESPONSE['access']
+        if attr == 'user_id':
+            return access['user']['id']
+        elif attr == 'tenant_id':
+            return access['token']['tenant']['id']
+
     def _test_request_helper(self, filters, expected):
         url, headers, body = self.auth_provider.auth_request('GET',
                                                              self.target_url,
@@ -210,16 +232,12 @@
             del cred[attr]
             self.assertFalse(self.auth_provider.check_credentials(cred))
 
-    def test_check_credentials_not_scoped_missing_tenant_name(self):
-        cred = copy.copy(self.credentials)
-        del cred['tenant_name']
-        self.assertTrue(self.auth_provider.check_credentials(cred,
-                                                             scoped=False))
-
-    def test_check_credentials_missing_tenant_name(self):
-        cred = copy.copy(self.credentials)
-        del cred['tenant_name']
-        self.assertFalse(self.auth_provider.check_credentials(cred))
+    def test_fill_credentials(self):
+        self.auth_provider.fill_credentials()
+        creds = self.auth_provider.credentials
+        for attr in ['user_id', 'tenant_id']:
+            self.assertEqual(self._get_from_fake_identity(attr),
+                             getattr(creds, attr))
 
     def _test_base_url_helper(self, expected_url, filters,
                               auth_data=None):
@@ -321,12 +339,7 @@
 class TestKeystoneV3AuthProvider(TestKeystoneV2AuthProvider):
     _endpoints = fake_identity.IDENTITY_V3_RESPONSE['token']['catalog']
     _auth_provider_class = auth.KeystoneV3AuthProvider
-    credentials = {
-        'username': 'fake_user',
-        'password': 'fake_pwd',
-        'tenant_name': 'fake_tenant',
-        'domain_name': 'fake_domain_name',
-    }
+    credentials = fake_credentials.FakeKeystoneV3Credentials()
 
     def setUp(self):
         super(TestKeystoneV3AuthProvider, self).setUp()
@@ -346,10 +359,44 @@
         access['expires_at'] = date_as_string
         return token, access
 
-    def test_check_credentials_missing_tenant_name(self):
-        cred = copy.copy(self.credentials)
-        del cred['domain_name']
-        self.assertFalse(self.auth_provider.check_credentials(cred))
+    def _get_from_fake_identity(self, attr):
+        token = fake_identity.IDENTITY_V3_RESPONSE['token']
+        if attr == 'user_id':
+            return token['user']['id']
+        elif attr == 'project_id':
+            return token['project']['id']
+        elif attr == 'user_domain_id':
+            return token['user']['domain']['id']
+        elif attr == 'project_domain_id':
+            return token['project']['domain']['id']
+
+    def test_check_credentials_missing_attribute(self):
+        # reset credentials to fresh ones
+        self.credentials.reset()
+        for attr in ['username', 'password', 'user_domain_name',
+                     'project_domain_name']:
+            cred = copy.copy(self.credentials)
+            del cred[attr]
+            self.assertFalse(self.auth_provider.check_credentials(cred),
+                             "Credentials should be invalid without %s" % attr)
+
+    def test_check_domain_credentials_missing_attribute(self):
+        # reset credentials to fresh ones
+        self.credentials.reset()
+        domain_creds = fake_credentials.FakeKeystoneV3DomainCredentials()
+        for attr in ['username', 'password', 'user_domain_name']:
+            cred = copy.copy(domain_creds)
+            del cred[attr]
+            self.assertFalse(self.auth_provider.check_credentials(cred),
+                             "Credentials should be invalid without %s" % attr)
+
+    def test_fill_credentials(self):
+        self.auth_provider.fill_credentials()
+        creds = self.auth_provider.credentials
+        for attr in ['user_id', 'project_id', 'user_domain_id',
+                     'project_domain_id']:
+            self.assertEqual(self._get_from_fake_identity(attr),
+                             getattr(creds, attr))
 
     # Overwrites v2 test
     def test_base_url_to_get_admin_endpoint(self):
diff --git a/tempest/tests/test_commands.py b/tempest/tests/test_commands.py
new file mode 100644
index 0000000..bdb9269
--- /dev/null
+++ b/tempest/tests/test_commands.py
@@ -0,0 +1,87 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+import subprocess
+
+from tempest.common import commands
+from tempest.tests import base
+
+
+class TestCommands(base.TestCase):
+
+    def setUp(self):
+        super(TestCommands, self).setUp()
+        self.subprocess_args = {'stdout': subprocess.PIPE,
+                                'stderr': subprocess.STDOUT}
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_addr_raw(self, mock):
+        expected = ['/usr/bin/sudo', '-n', 'ip', 'a']
+        commands.ip_addr_raw()
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_route_raw(self, mock):
+        expected = ['/usr/bin/sudo', '-n', 'ip', 'r']
+        commands.ip_route_raw()
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_ns_raw(self, mock):
+        expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
+        commands.ip_ns_raw()
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_iptables_raw(self, mock):
+        table = 'filter'
+        expected = ['/usr/bin/sudo', '-n', 'iptables', '-v', '-S', '-t',
+                    '%s' % table]
+        commands.iptables_raw(table)
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_ns_list(self, mock):
+        expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
+        commands.ip_ns_list()
+        mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_ns_addr(self, mock):
+        ns_list = commands.ip_ns_list()
+        for ns in ns_list:
+            expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+                        'ip', 'a']
+            commands.ip_ns_addr(ns)
+            mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_ip_ns_route(self, mock):
+        ns_list = commands.ip_ns_list()
+        for ns in ns_list:
+            expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+                        'ip', 'r']
+            commands.ip_ns_route(ns)
+            mock.assert_called_once_with(expected, **self.subprocess_args)
+
+    @mock.patch('subprocess.Popen')
+    def test_iptables_ns(self, mock):
+        table = 'filter'
+        ns_list = commands.ip_ns_list()
+        for ns in ns_list:
+            expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+                        'iptables', '-v', '-S', '-t', table]
+            commands.iptables_ns(ns, table)
+            mock.assert_called_once_with(expected, **self.subprocess_args)
diff --git a/tempest/tests/test_compute_xml_common.py b/tempest/tests/test_compute_xml_common.py
index bfa6a10..1561931 100644
--- a/tempest/tests/test_compute_xml_common.py
+++ b/tempest/tests/test_compute_xml_common.py
@@ -13,7 +13,7 @@
 #    under the License.
 from lxml import etree
 
-from tempest.services.compute.xml import common
+from tempest.common import xml_utils as common
 from tempest.tests import base
 
 
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
new file mode 100644
index 0000000..9da5f92
--- /dev/null
+++ b/tempest/tests/test_credentials.py
@@ -0,0 +1,229 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from oslo.config import cfg
+
+from tempest import auth
+from tempest.common import http
+from tempest.common import tempest_fixtures as fixtures
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
+
+
+class CredentialsTests(base.TestCase):
+    attributes = {}
+    credentials_class = auth.Credentials
+
+    def _get_credentials(self, attributes=None):
+        if attributes is None:
+            attributes = self.attributes
+        return self.credentials_class(**attributes)
+
+    def setUp(self):
+        super(CredentialsTests, self).setUp()
+        self.fake_http = fake_http.fake_httplib2(return_type=200)
+        self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def test_create(self):
+        creds = self._get_credentials()
+        self.assertEqual(self.attributes, creds._initial)
+
+    def test_create_invalid_attr(self):
+        self.assertRaises(exceptions.InvalidCredentials,
+                          self._get_credentials,
+                          attributes=dict(invalid='fake'))
+
+    def test_default(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        for ctype in self.credentials_class.TYPES:
+            self.assertRaises(NotImplementedError,
+                              self.credentials_class.get_default,
+                              credentials_type=ctype)
+
+    def test_invalid_default(self):
+        self.assertRaises(exceptions.InvalidCredentials,
+                          auth.Credentials.get_default,
+                          credentials_type='invalid_type')
+
+    def test_is_valid(self):
+        creds = self._get_credentials()
+        self.assertRaises(NotImplementedError, creds.is_valid)
+
+
+class KeystoneV2CredentialsTests(CredentialsTests):
+    attributes = {
+        'username': 'fake_username',
+        'password': 'fake_password',
+        'tenant_name': 'fake_tenant_name'
+    }
+
+    identity_response = fake_identity._fake_v2_response
+    credentials_class = auth.KeystoneV2Credentials
+
+    def setUp(self):
+        super(KeystoneV2CredentialsTests, self).setUp()
+        self.stubs.Set(http.ClosingHttp, 'request', self.identity_response)
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def _verify_credentials(self, credentials_class, filled=True,
+                            creds_dict=None):
+
+        def _check(credentials):
+            # Check the right version of credentials has been returned
+            self.assertIsInstance(credentials, credentials_class)
+            # Check the id attributes are filled in
+            attributes = [x for x in credentials.ATTRIBUTES if (
+                '_id' in x and x != 'domain_id')]
+            for attr in attributes:
+                if filled:
+                    self.assertIsNotNone(getattr(credentials, attr))
+                else:
+                    self.assertIsNone(getattr(credentials, attr))
+
+        if creds_dict is None:
+            for ctype in auth.Credentials.TYPES:
+                creds = auth.get_default_credentials(credential_type=ctype,
+                                                     fill_in=filled)
+                _check(creds)
+        else:
+            creds = auth.get_credentials(fill_in=filled, **creds_dict)
+            _check(creds)
+
+    def test_get_default_credentials(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        self._verify_credentials(credentials_class=self.credentials_class)
+
+    def test_get_credentials(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        self._verify_credentials(credentials_class=self.credentials_class,
+                                 creds_dict=self.attributes)
+
+    def test_get_credentials_not_filled(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        self._verify_credentials(credentials_class=self.credentials_class,
+                                 filled=False,
+                                 creds_dict=self.attributes)
+
+    def test_is_valid(self):
+        creds = self._get_credentials()
+        self.assertTrue(creds.is_valid())
+
+    def test_is_not_valid(self):
+        creds = self._get_credentials()
+        for attr in self.attributes.keys():
+            delattr(creds, attr)
+            self.assertFalse(creds.is_valid(),
+                             "Credentials should be invalid without %s" % attr)
+
+    def test_default(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        for ctype in self.credentials_class.TYPES:
+            creds = self.credentials_class.get_default(credentials_type=ctype)
+            for attr in self.attributes.keys():
+                # Default configuration values related to credentials
+                # are defined as fake_* in fake_config.py
+                self.assertEqual(getattr(creds, attr), 'fake_' + attr)
+
+    def test_reset_all_attributes(self):
+        creds = self._get_credentials()
+        initial_creds = copy.deepcopy(creds)
+        set_attr = creds.__dict__.keys()
+        missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
+        # Set all unset attributes, then reset
+        for attr in missing_attr:
+            setattr(creds, attr, 'fake' + attr)
+        creds.reset()
+        # Check reset credentials are same as initial ones
+        self.assertEqual(creds, initial_creds)
+
+    def test_reset_single_attribute(self):
+        creds = self._get_credentials()
+        initial_creds = copy.deepcopy(creds)
+        set_attr = creds.__dict__.keys()
+        missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
+        # Set one unset attributes, then reset
+        for attr in missing_attr:
+            setattr(creds, attr, 'fake' + attr)
+            creds.reset()
+            # Check reset credentials are same as initial ones
+            self.assertEqual(creds, initial_creds)
+
+
+class KeystoneV3CredentialsTests(KeystoneV2CredentialsTests):
+    attributes = {
+        'username': 'fake_username',
+        'password': 'fake_password',
+        'project_name': 'fake_project_name',
+        'user_domain_name': 'fake_domain_name'
+    }
+
+    credentials_class = auth.KeystoneV3Credentials
+    identity_response = fake_identity._fake_v3_response
+
+    def setUp(self):
+        super(KeystoneV3CredentialsTests, self).setUp()
+        # Additional config items reset by cfg fixture after each test
+        cfg.CONF.set_default('auth_version', 'v3', group='identity')
+        # Identity group items
+        for prefix in ['', 'alt_', 'admin_']:
+            cfg.CONF.set_default(prefix + 'domain_name', 'fake_domain_name',
+                                 group='identity')
+        # Compute Admin group items
+        cfg.CONF.set_default('domain_name', 'fake_domain_name',
+                             group='compute-admin')
+
+    def test_default(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        for ctype in self.credentials_class.TYPES:
+            creds = self.credentials_class.get_default(credentials_type=ctype)
+            for attr in self.attributes.keys():
+                if attr == 'project_name':
+                    config_value = 'fake_tenant_name'
+                elif attr == 'user_domain_name':
+                    config_value = 'fake_domain_name'
+                else:
+                    config_value = 'fake_' + attr
+                self.assertEqual(getattr(creds, attr), config_value)
+
+    def test_synced_attributes(self):
+        attributes = self.attributes
+        # Create V3 credentials with tenant instead of project, and user_domain
+        for attr in ['project_id', 'user_domain_id']:
+            attributes[attr] = 'fake_' + attr
+        creds = self._get_credentials(attributes)
+        self.assertEqual(creds.project_name, creds.tenant_name)
+        self.assertEqual(creds.project_id, creds.tenant_id)
+        self.assertEqual(creds.user_domain_name, creds.project_domain_name)
+        self.assertEqual(creds.user_domain_id, creds.project_domain_id)
+        # Replace user_domain with project_domain
+        del attributes['user_domain_name']
+        del attributes['user_domain_id']
+        del attributes['project_name']
+        del attributes['project_id']
+        for attr in ['project_domain_name', 'project_domain_id',
+                     'tenant_name', 'tenant_id']:
+            attributes[attr] = 'fake_' + attr
+        self.assertEqual(creds.tenant_name, creds.project_name)
+        self.assertEqual(creds.tenant_id, creds.project_id)
+        self.assertEqual(creds.project_domain_name, creds.user_domain_name)
+        self.assertEqual(creds.project_domain_id, creds.user_domain_id)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index ebf0ca0..6b678f7 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -13,13 +13,13 @@
 #    under the License.
 
 
-import testtools
-
+import mock
 from oslo.config import cfg
+from oslotest import mockpatch
+import testtools
 
 from tempest import config
 from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
 from tempest import test
 from tempest.tests import base
 from tempest.tests import fake_config
@@ -232,3 +232,19 @@
                           self._test_requires_ext_helper,
                           extension='enabled_ext',
                           service='bad_service')
+
+
+class TestSimpleNegativeDecorator(BaseDecoratorsTest):
+    @test.SimpleNegativeAutoTest
+    class FakeNegativeJSONTest(test.NegativeAutoTest):
+        _schema_file = 'fake/schemas/file.json'
+
+    def test_testfunc_exist(self):
+        self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
+
+    @mock.patch('tempest.test.NegativeAutoTest.execute')
+    def test_testfunc_calls_execute(self, mock):
+        obj = self.FakeNegativeJSONTest("test_fake_negative")
+        self.assertIn("test_fake_negative", dir(obj))
+        obj.test_fake_negative()
+        mock.assert_called_once_with(self.FakeNegativeJSONTest._schema_file)
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
new file mode 100644
index 0000000..ab81836
--- /dev/null
+++ b/tempest/tests/test_hacking.py
@@ -0,0 +1,95 @@
+# Copyright 2014 Matthew Treinish
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.hacking import checks
+from tempest.tests import base
+
+
+class HackingTestCase(base.TestCase):
+    """
+    This class tests the hacking checks in tempest.hacking.checks by passing
+    strings to the check methods like the pep8/flake8 parser would. The parser
+    loops over each line in the file and then passes the parameters to the
+    check method. The parameter names in the check method dictate what type of
+    object is passed to the check method. The parameter types are::
+
+        logical_line: A processed line with the following modifications:
+            - Multi-line statements converted to a single line.
+            - Stripped left and right.
+            - Contents of strings replaced with "xxx" of same length.
+            - Comments removed.
+        physical_line: Raw line of text from the input file.
+        lines: a list of the raw lines from the input file
+        tokens: the tokens that contribute to this logical line
+        line_number: line number in the input file
+        total_lines: number of lines in the input file
+        blank_lines: blank lines before this one
+        indent_char: indentation character in this file (" " or "\t")
+        indent_level: indentation (with tabs expanded to multiples of 8)
+        previous_indent_level: indentation on previous line
+        previous_logical: previous logical line
+        filename: Path of the file being run through pep8
+
+    When running a test on a check method the return will be False/None if
+    there is no violation in the sample input. If there is an error a tuple is
+    returned with a position in the line, and a message. So to check the result
+    just assertTrue if the check is expected to fail and assertFalse if it
+    should pass.
+    """
+    def test_no_setupclass_for_unit_tests(self):
+        self.assertTrue(checks.no_setupclass_for_unit_tests(
+            "  def setUpClass(cls):", './tempest/tests/fake_test.py'))
+        self.assertIsNone(checks.no_setupclass_for_unit_tests(
+            "  def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
+        self.assertFalse(checks.no_setupclass_for_unit_tests(
+            "  def setUpClass(cls):", './tempest/api/fake_test.py'))
+
+    def test_import_no_clients_in_api(self):
+        for client in checks.PYTHON_CLIENTS:
+            string = "import " + client + "client"
+            self.assertTrue(checks.import_no_clients_in_api(
+                string, './tempest/api/fake_test.py'))
+            self.assertFalse(checks.import_no_clients_in_api(
+                string, './tempest/scenario/fake_test.py'))
+
+    def test_scenario_tests_need_service_tags(self):
+        self.assertFalse(checks.scenario_tests_need_service_tags(
+            'def test_fake:', './tempest/scenario/test_fake.py',
+            "@test.services('compute')"))
+        self.assertFalse(checks.scenario_tests_need_service_tags(
+            'def test_fake_test:', './tempest/api/compute/test_fake.py',
+            "@test.services('image')"))
+        self.assertTrue(checks.scenario_tests_need_service_tags(
+            'def test_fake_test:', './tempest/scenario/test_fake.py',
+            '\n'))
+
+    def test_no_vi_headers(self):
+        # NOTE(mtreinish)  The lines parameter is used only for finding the
+        # line location in the file. So these tests just pass a list of an
+        # arbitrary length to use for verifying the check function.
+        self.assertTrue(checks.no_vi_headers(
+            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
+        self.assertTrue(checks.no_vi_headers(
+            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
+        self.assertFalse(checks.no_vi_headers(
+            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
+
+    def test_service_tags_not_in_module_path(self):
+        self.assertTrue(checks.service_tags_not_in_module_path(
+            "@test.services('compute')", './tempest/api/compute/fake_test.py'))
+        self.assertFalse(checks.service_tags_not_in_module_path(
+            "@test.services('compute')",
+            './tempest/scenario/compute/fake_test.py'))
+        self.assertFalse(checks.service_tags_not_in_module_path(
+            "@test.services('compute')", './tempest/api/image/fake_test.py'))
diff --git a/tempest/tests/test_rest_client.py b/tempest/tests/test_rest_client.py
index 0677aa0..d20520c 100644
--- a/tempest/tests/test_rest_client.py
+++ b/tempest/tests/test_rest_client.py
@@ -15,11 +15,12 @@
 import httplib2
 import json
 
+from oslotest import mockpatch
+
 from tempest.common import rest_client
+from tempest.common import xml_utils as xml
 from tempest import config
 from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
-from tempest.services.compute.xml import common as xml
 from tempest.tests import base
 from tempest.tests import fake_auth_provider
 from tempest.tests import fake_config
@@ -139,6 +140,102 @@
         self._verify_headers(resp)
 
 
+class TestRestClientUpdateHeaders(BaseRestClientTestClass):
+    def setUp(self):
+        self.fake_http = fake_http.fake_httplib2()
+        super(TestRestClientUpdateHeaders, self).setUp()
+        self.useFixture(mockpatch.PatchObject(self.rest_client,
+                                              '_error_checker'))
+        self.headers = {'X-Configuration-Session': 'session_id'}
+
+    def test_post_update_headers(self):
+        __, return_dict = self.rest_client.post(self.url, {},
+                                                extra_headers=True,
+                                                headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_get_update_headers(self):
+        __, return_dict = self.rest_client.get(self.url,
+                                               extra_headers=True,
+                                               headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_delete_update_headers(self):
+        __, return_dict = self.rest_client.delete(self.url,
+                                                  extra_headers=True,
+                                                  headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_patch_update_headers(self):
+        __, return_dict = self.rest_client.patch(self.url, {},
+                                                 extra_headers=True,
+                                                 headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_put_update_headers(self):
+        __, return_dict = self.rest_client.put(self.url, {},
+                                               extra_headers=True,
+                                               headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_head_update_headers(self):
+        self.useFixture(mockpatch.PatchObject(self.rest_client,
+                                              'response_checker'))
+
+        __, return_dict = self.rest_client.head(self.url,
+                                                extra_headers=True,
+                                                headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+    def test_copy_update_headers(self):
+        __, return_dict = self.rest_client.copy(self.url,
+                                                extra_headers=True,
+                                                headers=self.headers)
+
+        self.assertDictContainsSubset(
+            {'X-Configuration-Session': 'session_id',
+             'Content-Type': 'application/json',
+             'Accept': 'application/json'},
+            return_dict['headers']
+        )
+
+
 class TestRestClientHeadersXML(TestRestClientHeadersJSON):
     TYPE = "xml"
 
@@ -384,3 +481,63 @@
         self.assertRaises(NotImplementedError,
                           self.rest_client.wait_for_resource_deletion,
                           '1234')
+
+
+class TestNegativeRestClient(BaseRestClientTestClass):
+
+    def setUp(self):
+        self.fake_http = fake_http.fake_httplib2()
+        super(TestNegativeRestClient, self).setUp()
+        self.negative_rest_client = rest_client.NegativeRestClient(
+            fake_auth_provider.FakeAuthProvider())
+        self.useFixture(mockpatch.PatchObject(self.negative_rest_client,
+                                              '_log_request'))
+
+    def test_post(self):
+        __, return_dict = self.negative_rest_client.send_request('POST',
+                                                                 self.url,
+                                                                 [], {})
+        self.assertEqual('POST', return_dict['method'])
+
+    def test_get(self):
+        __, return_dict = self.negative_rest_client.send_request('GET',
+                                                                 self.url,
+                                                                 [])
+        self.assertEqual('GET', return_dict['method'])
+
+    def test_delete(self):
+        __, return_dict = self.negative_rest_client.send_request('DELETE',
+                                                                 self.url,
+                                                                 [])
+        self.assertEqual('DELETE', return_dict['method'])
+
+    def test_patch(self):
+        __, return_dict = self.negative_rest_client.send_request('PATCH',
+                                                                 self.url,
+                                                                 [], {})
+        self.assertEqual('PATCH', return_dict['method'])
+
+    def test_put(self):
+        __, return_dict = self.negative_rest_client.send_request('PUT',
+                                                                 self.url,
+                                                                 [], {})
+        self.assertEqual('PUT', return_dict['method'])
+
+    def test_head(self):
+        self.useFixture(mockpatch.PatchObject(self.negative_rest_client,
+                                              'response_checker'))
+        __, return_dict = self.negative_rest_client.send_request('HEAD',
+                                                                 self.url,
+                                                                 [])
+        self.assertEqual('HEAD', return_dict['method'])
+
+    def test_copy(self):
+        __, return_dict = self.negative_rest_client.send_request('COPY',
+                                                                 self.url,
+                                                                 [])
+        self.assertEqual('COPY', return_dict['method'])
+
+    def test_other(self):
+        self.assertRaises(AssertionError,
+                          self.negative_rest_client.send_request,
+                          'OTHER', self.url, [])
diff --git a/tempest/tests/test_ssh.py b/tempest/tests/test_ssh.py
index a6eedc4..0da52dc 100644
--- a/tempest/tests/test_ssh.py
+++ b/tempest/tests/test_ssh.py
@@ -14,6 +14,7 @@
 
 import contextlib
 import socket
+import time
 
 import mock
 import testtools
@@ -43,25 +44,21 @@
             rsa_mock.assert_not_called()
             cs_mock.assert_not_called()
 
-    def test_get_ssh_connection(self):
-        c_mock = self.patch('paramiko.SSHClient')
-        aa_mock = self.patch('paramiko.AutoAddPolicy')
-        s_mock = self.patch('time.sleep')
-        t_mock = self.patch('time.time')
+    def _set_ssh_connection_mocks(self):
+        client_mock = mock.MagicMock()
+        client_mock.connect.return_value = True
+        return (self.patch('paramiko.SSHClient'),
+                self.patch('paramiko.AutoAddPolicy'),
+                client_mock)
 
+    def test_get_ssh_connection(self):
+        c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
+        s_mock = self.patch('time.sleep')
+
+        c_mock.return_value = client_mock
         aa_mock.return_value = mock.sentinel.aa
 
-        def reset_mocks():
-            aa_mock.reset_mock()
-            c_mock.reset_mock()
-            s_mock.reset_mock()
-            t_mock.reset_mock()
-
         # Test normal case for successful connection on first try
-        client_mock = mock.MagicMock()
-        c_mock.return_value = client_mock
-        client_mock.connect.return_value = True
-
         client = ssh.Client('localhost', 'root', timeout=2)
         client._get_ssh_connection(sleep=1)
 
@@ -79,50 +76,40 @@
         )]
         self.assertEqual(expected_connect, client_mock.connect.mock_calls)
         s_mock.assert_not_called()
-        t_mock.assert_called_once_with()
 
-        reset_mocks()
+    def test_get_ssh_connection_two_attemps(self):
+        c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
 
-        # Test case when connection fails on first two tries and
-        # succeeds on third try (this validates retry logic)
-        client_mock.connect.side_effect = [socket.error, socket.error, True]
-        t_mock.side_effect = [
-            1000,  # Start time
-            1000,  # LOG.warning() calls time.time() loop 1
-            1001,  # Sleep loop 1
-            1001,  # LOG.warning() calls time.time() loop 2
-            1002   # Sleep loop 2
+        c_mock.return_value = client_mock
+        client_mock.connect.side_effect = [
+            socket.error,
+            mock.MagicMock()
         ]
 
+        client = ssh.Client('localhost', 'root', timeout=1)
+        start_time = int(time.time())
         client._get_ssh_connection(sleep=1)
+        end_time = int(time.time())
+        self.assertTrue((end_time - start_time) < 3)
+        self.assertTrue((end_time - start_time) > 1)
 
-        expected_sleeps = [
-            mock.call(2),
-            mock.call(3)
-        ]
-        self.assertEqual(expected_sleeps, s_mock.mock_calls)
+    def test_get_ssh_connection_timeout(self):
+        c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
 
-        reset_mocks()
-
-        # Test case when connection fails on first three tries and
-        # exceeds the timeout, so expect to raise a Timeout exception
+        c_mock.return_value = client_mock
         client_mock.connect.side_effect = [
             socket.error,
             socket.error,
-            socket.error
-        ]
-        t_mock.side_effect = [
-            1000,  # Start time
-            1000,  # LOG.warning() calls time.time() loop 1
-            1001,  # Sleep loop 1
-            1001,  # LOG.warning() calls time.time() loop 2
-            1002,  # Sleep loop 2
-            1003,  # Sleep loop 3
-            1004  # LOG.error() calls time.time()
+            socket.error,
         ]
 
+        client = ssh.Client('localhost', 'root', timeout=2)
+        start_time = int(time.time())
         with testtools.ExpectedException(exceptions.SSHTimeout):
             client._get_ssh_connection()
+        end_time = int(time.time())
+        self.assertTrue((end_time - start_time) < 4)
+        self.assertTrue((end_time - start_time) >= 2)
 
     def test_exec_command(self):
         gsc_mock = self.patch('tempest.common.ssh.Client._get_ssh_connection')
diff --git a/tempest/tests/test_tenant_isolation.py b/tempest/tests/test_tenant_isolation.py
index 2e50cfd..7a9b6be 100644
--- a/tempest/tests/test_tenant_isolation.py
+++ b/tempest/tests/test_tenant_isolation.py
@@ -13,12 +13,15 @@
 #    under the License.
 
 import keystoneclient.v2_0.client as keystoneclient
-from mock import patch
+import mock
 import neutronclient.v2_0.client as neutronclient
 from oslo.config import cfg
 
+from tempest import clients
+from tempest.common import http
 from tempest.common import isolated_creds
 from tempest import config
+from tempest import exceptions
 from tempest.openstack.common.fixture import mockpatch
 from tempest.services.identity.json import identity_client as json_iden_client
 from tempest.services.identity.xml import identity_client as xml_iden_client
@@ -26,6 +29,8 @@
 from tempest.services.network.xml import network_client as xml_network_client
 from tempest.tests import base
 from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
 
 
 class TestTenantIsolation(base.TestCase):
@@ -34,6 +39,9 @@
         super(TestTenantIsolation, self).setUp()
         self.useFixture(fake_config.ConfigFixture())
         self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+        self.fake_http = fake_http.fake_httplib2(return_type=200)
+        self.stubs.Set(http.ClosingHttp, 'request',
+                       fake_identity._fake_v2_response)
 
     def test_tempest_client(self):
         iso_creds = isolated_creds.IsolatedCreds('test class')
@@ -45,6 +53,12 @@
     def test_official_client(self):
         self.useFixture(mockpatch.PatchObject(keystoneclient.Client,
                                               'authenticate'))
+        self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+                                              '_get_image_client'))
+        self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+                                              '_get_object_storage_client'))
+        self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+                                              '_get_orchestration_client'))
         iso_creds = isolated_creds.IsolatedCreds('test class',
                                                  tempest_client=False)
         self.assertTrue(isinstance(iso_creds.identity_admin_client,
@@ -100,23 +114,21 @@
                           {'router': {'id': id, 'name': name}})))
         return router_fix
 
-    @patch('tempest.common.rest_client.RestClient')
+    @mock.patch('tempest.common.rest_client.RestClient')
     def test_primary_creds(self, MockRestClient):
         cfg.CONF.set_default('neutron', False, 'service_available')
         iso_creds = isolated_creds.IsolatedCreds('test class',
                                                  password='fake_password')
         self._mock_tenant_create('1234', 'fake_prim_tenant')
         self._mock_user_create('1234', 'fake_prim_user')
-        username, tenant_name, password = iso_creds.get_primary_creds()
-        self.assertEqual(username, 'fake_prim_user')
-        self.assertEqual(tenant_name, 'fake_prim_tenant')
-        # Verify helper methods
-        tenant = iso_creds.get_primary_tenant()
-        user = iso_creds.get_primary_user()
-        self.assertEqual(tenant['id'], '1234')
-        self.assertEqual(user['id'], '1234')
+        primary_creds = iso_creds.get_primary_creds()
+        self.assertEqual(primary_creds.username, 'fake_prim_user')
+        self.assertEqual(primary_creds.tenant_name, 'fake_prim_tenant')
+        # Verify IDs
+        self.assertEqual(primary_creds.tenant_id, '1234')
+        self.assertEqual(primary_creds.user_id, '1234')
 
-    @patch('tempest.common.rest_client.RestClient')
+    @mock.patch('tempest.common.rest_client.RestClient')
     def test_admin_creds(self, MockRestClient):
         cfg.CONF.set_default('neutron', False, 'service_available')
         iso_creds = isolated_creds.IsolatedCreds('test class',
@@ -129,35 +141,33 @@
             return_value=({'status': 200},
                           [{'id': '1234', 'name': 'admin'}])))
 
-        user_mock = patch.object(json_iden_client.IdentityClientJSON,
-                                 'assign_user_role')
+        user_mock = mock.patch.object(json_iden_client.IdentityClientJSON,
+                                      'assign_user_role')
         user_mock.start()
         self.addCleanup(user_mock.stop)
-        with patch.object(json_iden_client.IdentityClientJSON,
-                          'assign_user_role') as user_mock:
-            username, tenant_name, password = iso_creds.get_admin_creds()
+        with mock.patch.object(json_iden_client.IdentityClientJSON,
+                               'assign_user_role') as user_mock:
+            admin_creds = iso_creds.get_admin_creds()
         user_mock.assert_called_once_with('1234', '1234', '1234')
-        self.assertEqual(username, 'fake_admin_user')
-        self.assertEqual(tenant_name, 'fake_admin_tenant')
-        # Verify helper methods
-        tenant = iso_creds.get_admin_tenant()
-        user = iso_creds.get_admin_user()
-        self.assertEqual(tenant['id'], '1234')
-        self.assertEqual(user['id'], '1234')
+        self.assertEqual(admin_creds.username, 'fake_admin_user')
+        self.assertEqual(admin_creds.tenant_name, 'fake_admin_tenant')
+        # Verify IDs
+        self.assertEqual(admin_creds.tenant_id, '1234')
+        self.assertEqual(admin_creds.user_id, '1234')
 
-    @patch('tempest.common.rest_client.RestClient')
+    @mock.patch('tempest.common.rest_client.RestClient')
     def test_all_cred_cleanup(self, MockRestClient):
         cfg.CONF.set_default('neutron', False, 'service_available')
         iso_creds = isolated_creds.IsolatedCreds('test class',
                                                  password='fake_password')
         tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
         user_fix = self._mock_user_create('1234', 'fake_prim_user')
-        username, tenant_name, password = iso_creds.get_primary_creds()
+        iso_creds.get_primary_creds()
         tenant_fix.cleanUp()
         user_fix.cleanUp()
         tenant_fix = self._mock_tenant_create('12345', 'fake_alt_tenant')
         user_fix = self._mock_user_create('12345', 'fake_alt_user')
-        alt_username, alt_tenant, alt_password = iso_creds.get_alt_creds()
+        iso_creds.get_alt_creds()
         tenant_fix.cleanUp()
         user_fix.cleanUp()
         tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
@@ -167,10 +177,9 @@
             'list_roles',
             return_value=({'status': 200},
                           [{'id': '123456', 'name': 'admin'}])))
-        with patch.object(json_iden_client.IdentityClientJSON,
-                          'assign_user_role'):
-            admin_username, admin_tenant, admin_pass = \
-                iso_creds.get_admin_creds()
+        with mock.patch.object(json_iden_client.IdentityClientJSON,
+                               'assign_user_role'):
+            iso_creds.get_admin_creds()
         user_mock = self.patch(
             'tempest.services.identity.json.identity_client.'
             'IdentityClientJSON.delete_user')
@@ -193,23 +202,21 @@
         self.assertIn('12345', args)
         self.assertIn('123456', args)
 
-    @patch('tempest.common.rest_client.RestClient')
+    @mock.patch('tempest.common.rest_client.RestClient')
     def test_alt_creds(self, MockRestClient):
         cfg.CONF.set_default('neutron', False, 'service_available')
         iso_creds = isolated_creds.IsolatedCreds('test class',
                                                  password='fake_password')
         self._mock_user_create('1234', 'fake_alt_user')
         self._mock_tenant_create('1234', 'fake_alt_tenant')
-        username, tenant_name, password = iso_creds.get_alt_creds()
-        self.assertEqual(username, 'fake_alt_user')
-        self.assertEqual(tenant_name, 'fake_alt_tenant')
-        # Verify helper methods
-        tenant = iso_creds.get_alt_tenant()
-        user = iso_creds.get_alt_user()
-        self.assertEqual(tenant['id'], '1234')
-        self.assertEqual(user['id'], '1234')
+        alt_creds = iso_creds.get_alt_creds()
+        self.assertEqual(alt_creds.username, 'fake_alt_user')
+        self.assertEqual(alt_creds.tenant_name, 'fake_alt_tenant')
+        # Verify IDs
+        self.assertEqual(alt_creds.tenant_id, '1234')
+        self.assertEqual(alt_creds.user_id, '1234')
 
-    @patch('tempest.common.rest_client.RestClient')
+    @mock.patch('tempest.common.rest_client.RestClient')
     def test_network_creation(self, MockRestClient):
         iso_creds = isolated_creds.IsolatedCreds('test class',
                                                  password='fake_password')
@@ -221,7 +228,7 @@
         router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClientJSON.'
             'add_router_interface_with_subnet_id')
-        username, tenant_name, password = iso_creds.get_primary_creds()
+        iso_creds.get_primary_creds()
         router_interface_mock.called_once_with('1234', '1234')
         network = iso_creds.get_primary_network()
         subnet = iso_creds.get_primary_subnet()
@@ -233,7 +240,7 @@
         self.assertEqual(router['id'], '1234')
         self.assertEqual(router['name'], 'fake_router')
 
-    @patch('tempest.common.rest_client.RestClient')
+    @mock.patch('tempest.common.rest_client.RestClient')
     def test_network_cleanup(self, MockRestClient):
         iso_creds = isolated_creds.IsolatedCreds('test class',
                                                  password='fake_password')
@@ -246,7 +253,7 @@
         router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClientJSON.'
             'add_router_interface_with_subnet_id')
-        username, tenant_name, password = iso_creds.get_primary_creds()
+        iso_creds.get_primary_creds()
         router_interface_mock.called_once_with('1234', '1234')
         router_interface_mock.reset_mock()
         tenant_fix.cleanUp()
@@ -261,7 +268,7 @@
         subnet_fix = self._mock_subnet_create(iso_creds, '12345',
                                               'fake_alt_subnet')
         router_fix = self._mock_router_create('12345', 'fake_alt_router')
-        alt_username, alt_tenant_name, password = iso_creds.get_alt_creds()
+        iso_creds.get_alt_creds()
         router_interface_mock.called_once_with('12345', '12345')
         router_interface_mock.reset_mock()
         tenant_fix.cleanUp()
@@ -282,28 +289,28 @@
             'list_roles',
             return_value=({'status': 200},
                           [{'id': '123456', 'name': 'admin'}])))
-        with patch.object(json_iden_client.IdentityClientJSON,
-                          'assign_user_role'):
-            admin_user, admin_tenant, password = iso_creds.get_admin_creds()
+        with mock.patch.object(json_iden_client.IdentityClientJSON,
+                               'assign_user_role'):
+            iso_creds.get_admin_creds()
         self.patch('tempest.services.identity.json.identity_client.'
                    'IdentityClientJSON.delete_user')
         self.patch('tempest.services.identity.json.identity_client.'
                    'IdentityClientJSON.delete_tenant')
-        net = patch.object(iso_creds.network_admin_client,
-                           'delete_network')
+        net = mock.patch.object(iso_creds.network_admin_client,
+                                'delete_network')
         net_mock = net.start()
-        subnet = patch.object(iso_creds.network_admin_client,
-                              'delete_subnet')
+        subnet = mock.patch.object(iso_creds.network_admin_client,
+                                   'delete_subnet')
         subnet_mock = subnet.start()
-        router = patch.object(iso_creds.network_admin_client,
-                              'delete_router')
+        router = mock.patch.object(iso_creds.network_admin_client,
+                                   'delete_router')
         router_mock = router.start()
         remove_router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClientJSON.'
             'remove_router_interface_with_subnet_id')
-        port_list_mock = patch.object(iso_creds.network_admin_client,
-                                      'list_ports', return_value=(
-                                      {'status': 200}, {'ports': []}))
+        port_list_mock = mock.patch.object(iso_creds.network_admin_client,
+                                           'list_ports', return_value=(
+                                           {'status': 200}, {'ports': []}))
         port_list_mock.start()
         iso_creds.clear_isolated_creds()
         # Verify remove router interface calls
@@ -334,3 +341,140 @@
         self.assertIn('1234', args)
         self.assertIn('12345', args)
         self.assertIn('123456', args)
+
+    @mock.patch('tempest.common.rest_client.RestClient')
+    def test_network_alt_creation(self, MockRestClient):
+        iso_creds = isolated_creds.IsolatedCreds('test class',
+                                                 password='fake_password')
+        self._mock_user_create('1234', 'fake_alt_user')
+        self._mock_tenant_create('1234', 'fake_alt_tenant')
+        self._mock_network_create(iso_creds, '1234', 'fake_alt_net')
+        self._mock_subnet_create(iso_creds, '1234', 'fake_alt_subnet')
+        self._mock_router_create('1234', 'fake_alt_router')
+        router_interface_mock = self.patch(
+            'tempest.services.network.json.network_client.NetworkClientJSON.'
+            'add_router_interface_with_subnet_id')
+        iso_creds.get_alt_creds()
+        router_interface_mock.called_once_with('1234', '1234')
+        network = iso_creds.get_alt_network()
+        subnet = iso_creds.get_alt_subnet()
+        router = iso_creds.get_alt_router()
+        self.assertEqual(network['id'], '1234')
+        self.assertEqual(network['name'], 'fake_alt_net')
+        self.assertEqual(subnet['id'], '1234')
+        self.assertEqual(subnet['name'], 'fake_alt_subnet')
+        self.assertEqual(router['id'], '1234')
+        self.assertEqual(router['name'], 'fake_alt_router')
+
+    @mock.patch('tempest.common.rest_client.RestClient')
+    def test_network_admin_creation(self, MockRestClient):
+        iso_creds = isolated_creds.IsolatedCreds('test class',
+                                                 password='fake_password')
+        self._mock_user_create('1234', 'fake_admin_user')
+        self._mock_tenant_create('1234', 'fake_admin_tenant')
+        self._mock_network_create(iso_creds, '1234', 'fake_admin_net')
+        self._mock_subnet_create(iso_creds, '1234', 'fake_admin_subnet')
+        self._mock_router_create('1234', 'fake_admin_router')
+        router_interface_mock = self.patch(
+            'tempest.services.network.json.network_client.NetworkClientJSON.'
+            'add_router_interface_with_subnet_id')
+        self.useFixture(mockpatch.PatchObject(
+            json_iden_client.IdentityClientJSON,
+            'list_roles',
+            return_value=({'status': 200},
+                          [{'id': '123456', 'name': 'admin'}])))
+        with mock.patch.object(json_iden_client.IdentityClientJSON,
+                               'assign_user_role'):
+            iso_creds.get_admin_creds()
+        router_interface_mock.called_once_with('1234', '1234')
+        network = iso_creds.get_admin_network()
+        subnet = iso_creds.get_admin_subnet()
+        router = iso_creds.get_admin_router()
+        self.assertEqual(network['id'], '1234')
+        self.assertEqual(network['name'], 'fake_admin_net')
+        self.assertEqual(subnet['id'], '1234')
+        self.assertEqual(subnet['name'], 'fake_admin_subnet')
+        self.assertEqual(router['id'], '1234')
+        self.assertEqual(router['name'], 'fake_admin_router')
+
+    @mock.patch('tempest.common.rest_client.RestClient')
+    def test_no_network_resources(self, MockRestClient):
+        net_dict = {
+            'network': False,
+            'router': False,
+            'subnet': False,
+            'dhcp': False,
+        }
+        iso_creds = isolated_creds.IsolatedCreds('test class',
+                                                 password='fake_password',
+                                                 network_resources=net_dict)
+        self._mock_user_create('1234', 'fake_prim_user')
+        self._mock_tenant_create('1234', 'fake_prim_tenant')
+        net = mock.patch.object(iso_creds.network_admin_client,
+                                'delete_network')
+        net_mock = net.start()
+        subnet = mock.patch.object(iso_creds.network_admin_client,
+                                   'delete_subnet')
+        subnet_mock = subnet.start()
+        router = mock.patch.object(iso_creds.network_admin_client,
+                                   'delete_router')
+        router_mock = router.start()
+
+        iso_creds.get_primary_creds()
+        self.assertEqual(net_mock.mock_calls, [])
+        self.assertEqual(subnet_mock.mock_calls, [])
+        self.assertEqual(router_mock.mock_calls, [])
+        network = iso_creds.get_primary_network()
+        subnet = iso_creds.get_primary_subnet()
+        router = iso_creds.get_primary_router()
+        self.assertIsNone(network)
+        self.assertIsNone(subnet)
+        self.assertIsNone(router)
+
+    @mock.patch('tempest.common.rest_client.RestClient')
+    def test_router_without_network(self, MockRestClient):
+        net_dict = {
+            'network': False,
+            'router': True,
+            'subnet': False,
+            'dhcp': False,
+        }
+        iso_creds = isolated_creds.IsolatedCreds('test class',
+                                                 password='fake_password',
+                                                 network_resources=net_dict)
+        self._mock_user_create('1234', 'fake_prim_user')
+        self._mock_tenant_create('1234', 'fake_prim_tenant')
+        self.assertRaises(exceptions.InvalidConfiguration,
+                          iso_creds.get_primary_creds)
+
+    @mock.patch('tempest.common.rest_client.RestClient')
+    def test_subnet_without_network(self, MockRestClient):
+        net_dict = {
+            'network': False,
+            'router': False,
+            'subnet': True,
+            'dhcp': False,
+        }
+        iso_creds = isolated_creds.IsolatedCreds('test class',
+                                                 password='fake_password',
+                                                 network_resources=net_dict)
+        self._mock_user_create('1234', 'fake_prim_user')
+        self._mock_tenant_create('1234', 'fake_prim_tenant')
+        self.assertRaises(exceptions.InvalidConfiguration,
+                          iso_creds.get_primary_creds)
+
+    @mock.patch('tempest.common.rest_client.RestClient')
+    def test_dhcp_without_subnet(self, MockRestClient):
+        net_dict = {
+            'network': False,
+            'router': False,
+            'subnet': False,
+            'dhcp': True,
+        }
+        iso_creds = isolated_creds.IsolatedCreds('test class',
+                                                 password='fake_password',
+                                                 network_resources=net_dict)
+        self._mock_user_create('1234', 'fake_prim_user')
+        self._mock_tenant_create('1234', 'fake_prim_tenant')
+        self.assertRaises(exceptions.InvalidConfiguration,
+                          iso_creds.get_primary_creds)
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index f6ed445..bba4012 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -14,6 +14,7 @@
 
 import os
 import shutil
+import StringIO
 import subprocess
 import tempfile
 
@@ -33,6 +34,7 @@
         # Setup Test files
         self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
         self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
+        self.subunit_trace = os.path.join(self.directory, 'subunit-trace.py')
         self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
         self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
         self.init_file = os.path.join(self.test_dir, '__init__.py')
@@ -43,55 +45,48 @@
         shutil.copy('setup.py', self.setup_py)
         shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
         shutil.copy('tempest/tests/files/__init__.py', self.init_file)
+        shutil.copy('tools/subunit-trace.py', self.subunit_trace)
+        # copy over the pretty_tox scripts
+        shutil.copy('tools/pretty_tox.sh',
+                    os.path.join(self.directory, 'pretty_tox.sh'))
+        shutil.copy('tools/pretty_tox_serial.sh',
+                    os.path.join(self.directory, 'pretty_tox_serial.sh'))
+
+        self.stdout = StringIO.StringIO()
+        self.stderr = StringIO.StringIO()
+        # Change directory, run wrapper and check result
+        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+        os.chdir(self.directory)
+
+    def assertRunExit(self, cmd, expected):
+        p = subprocess.Popen(
+            "bash %s" % cmd, shell=True,
+            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        # wait in the general case is dangerous, however the amount of
+        # data coming back on those pipes is small enough it shouldn't be
+        # a problem.
+        p.wait()
+
+        self.assertEqual(
+            p.returncode, expected,
+            "Stdout: %s; Stderr: %s" % (p.stdout, p.stderr))
 
     def test_pretty_tox(self):
-        # Copy wrapper script and requirements:
-        pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
-        shutil.copy('tools/pretty_tox.sh', pretty_tox)
-        # Change directory, run wrapper and check result
-        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
-        os.chdir(self.directory)
         # Git init is required for the pbr testr command. pbr requires a git
         # version or an sdist to work. so make the test directory a git repo
         # too.
-        subprocess.call(['git', 'init'])
-        exit_code = subprocess.call('bash pretty_tox.sh tests.passing',
-                                    shell=True, stdout=DEVNULL, stderr=DEVNULL)
-        self.assertEqual(exit_code, 0)
+        subprocess.call(['git', 'init'], stderr=DEVNULL)
+        self.assertRunExit('pretty_tox.sh tests.passing', 0)
 
     def test_pretty_tox_fails(self):
-        # Copy wrapper script and requirements:
-        pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
-        shutil.copy('tools/pretty_tox.sh', pretty_tox)
-        # Change directory, run wrapper and check result
-        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
-        os.chdir(self.directory)
         # Git init is required for the pbr testr command. pbr requires a git
         # version or an sdist to work. so make the test directory a git repo
         # too.
-        subprocess.call(['git', 'init'])
-        exit_code = subprocess.call('bash pretty_tox.sh', shell=True,
-                                    stdout=DEVNULL, stderr=DEVNULL)
-        self.assertEqual(exit_code, 1)
+        subprocess.call(['git', 'init'], stderr=DEVNULL)
+        self.assertRunExit('pretty_tox.sh', 1)
 
     def test_pretty_tox_serial(self):
-        # Copy wrapper script and requirements:
-        pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
-        shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
-        # Change directory, run wrapper and check result
-        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
-        os.chdir(self.directory)
-        exit_code = subprocess.call('bash pretty_tox_serial.sh tests.passing',
-                                    shell=True, stdout=DEVNULL, stderr=DEVNULL)
-        self.assertEqual(exit_code, 0)
+        self.assertRunExit('pretty_tox_serial.sh tests.passing', 0)
 
     def test_pretty_tox_serial_fails(self):
-        # Copy wrapper script and requirements:
-        pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
-        shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
-        # Change directory, run wrapper and check result
-        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
-        os.chdir(self.directory)
-        exit_code = subprocess.call('bash pretty_tox_serial.sh', shell=True,
-                                    stdout=DEVNULL, stderr=DEVNULL)
-        self.assertEqual(exit_code, 1)
+        self.assertRunExit('pretty_tox_serial.sh', 1)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index e6a1638..b2eb18d 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -193,7 +193,6 @@
             instance.terminate()
         self.cancelResourceCleanUp(rcuk)
 
-    @test.skip_because(bug="1098891")
     @test.attr(type='smoke')
     def test_run_terminate_instance(self):
         # EC2 run, terminate immediately
@@ -211,18 +210,15 @@
             pass
         except exception.EC2ResponseError as exc:
             if self.ec2_error_code.\
-                client.InvalidInstanceID.NotFound.match(exc):
+                client.InvalidInstanceID.NotFound.match(exc) is None:
                 pass
             else:
                 raise
         else:
             self.assertNotEqual(instance.state, "running")
 
-    # NOTE(afazekas): doctored test case,
-    # with normal validation it would fail
-    @test.skip_because(bug="1182679")
     @test.attr(type='smoke')
-    def test_integration_1(self):
+    def test_compute_with_volumes(self):
         # EC2 1. integration test (not strict)
         image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
         sec_group_name = data_utils.rand_name("securitygroup-")
@@ -250,14 +246,20 @@
                                     instance_type=self.instance_type,
                                     key_name=self.keypair_name,
                                     security_groups=(sec_group_name,))
+
+        LOG.debug("Instance booted - state: %s",
+                  reservation.instances[0].state)
+
         self.addResourceCleanUp(self.destroy_reservation,
                                 reservation)
         volume = self.ec2_client.create_volume(1, self.zone)
+        LOG.debug("Volume created - status: %s", volume.status)
+
         self.addResourceCleanUp(self.destroy_volume_wait, volume)
         instance = reservation.instances[0]
-        LOG.info("state: %s", instance.state)
         if instance.state != "running":
             self.assertInstanceStateWait(instance, "running")
+        LOG.debug("Instance now running - state: %s", instance.state)
 
         address = self.ec2_client.allocate_address()
         rcuk_a = self.addResourceCleanUp(address.delete)
@@ -285,10 +287,21 @@
         volume.attach(instance.id, "/dev/vdh")
 
         def _volume_state():
+            """Return volume state realizing that 'in-use' is overloaded."""
             volume.update(validate=True)
-            return volume.status
+            status = volume.status
+            attached = volume.attach_data.status
+            LOG.debug("Volume %s is in status: %s, attach_status: %s",
+                      volume.id, status, attached)
+            # Nova reports 'in-use' on 'attaching' volumes because we
+            # have a single volume status, and EC2 has 2. Ensure that
+            # if we aren't attached yet we return something other than
+            # 'in-use'
+            if status == 'in-use' and attached != 'attached':
+                return 'attaching'
+            else:
+                return status
 
-        self.assertVolumeStatusWait(_volume_state, "in-use")
         wait.re_search_wait(_volume_state, "in-use")
 
         # NOTE(afazekas):  Different Hypervisor backends names
@@ -297,6 +310,7 @@
 
         def _part_state():
             current = ssh.get_partitions().split('\n')
+            LOG.debug("Partition map for instance: %s", current)
             if current > part_lines:
                 return 'INCREASE'
             if current < part_lines:
@@ -312,7 +326,6 @@
 
         self.assertVolumeStatusWait(_volume_state, "available")
         wait.re_search_wait(_volume_state, "available")
-        LOG.info("Volume %s state: %s", volume.id, volume.status)
 
         wait.state_wait(_part_state, 'DECREASE')
 
@@ -324,7 +337,7 @@
         self.assertAddressReleasedWait(address)
         self.cancelResourceCleanUp(rcuk_a)
 
-        LOG.info("state: %s", instance.state)
+        LOG.debug("Instance %s state: %s", instance.id, instance.state)
         if instance.state != "stopped":
             self.assertInstanceStateWait(instance, "stopped")
         # TODO(afazekas): move steps from teardown to the test case
diff --git a/test-requirements.txt b/test-requirements.txt
index 8d64167..b9c75c8 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,9 +1,10 @@
 hacking>=0.8.0,<0.9
 # needed for doc build
 docutils==0.9.1
-sphinx>=1.1.2,<1.2
+sphinx>=1.2.1,<1.3
 python-subunit>=0.0.18
 oslosphinx
 mox>=0.5.3
 mock>=1.0
 coverage>=3.6
+oslotest
diff --git a/tools/check_logs.py b/tools/check_logs.py
index e28c230..bc4eaca 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -37,6 +37,7 @@
     'ceilometer-alarm-evaluator',
     'ceilometer-anotification',
     'ceilometer-api',
+    'ceilometer-collector',
     'c-vol',
     'g-api',
     'h-api',
@@ -45,7 +46,6 @@
     'n-api',
     'n-cpu',
     'n-net',
-    'n-sch',
     'q-agt',
     'q-dhcp',
     'q-lbaas',
@@ -90,7 +90,7 @@
                     break
             if not whitelisted or dump_all_errors:
                 if print_log_name:
-                    print("Log File Has Errors: %s" % name)
+                    print("\nLog File Has Errors: %s" % name)
                     print_log_name = False
                 if not whitelisted:
                     had_errors = True
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index 46822e3..743b59d 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -101,7 +101,6 @@
             print('done.')
         else:
             print("venv already exists...")
-            pass
 
     def pip_install(self, *args):
         self.run_command(['tools/with_venv.sh',
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
index 07c35a0..0a04ce6 100755
--- a/tools/pretty_tox.sh
+++ b/tools/pretty_tox.sh
@@ -3,4 +3,4 @@
 set -o pipefail
 
 TESTRARGS=$1
-python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit2pyunit
+python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | $(dirname $0)/subunit-trace.py --no-failure-debug -f
diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh
index 42ce760..db70890 100755
--- a/tools/pretty_tox_serial.sh
+++ b/tools/pretty_tox_serial.sh
@@ -7,7 +7,8 @@
 if [ ! -d .testrepository ]; then
     testr init
 fi
-testr run --subunit $TESTRARGS | subunit2pyunit
+testr run --subunit $TESTRARGS | $(dirname $0)/subunit-trace.py -f -n
 retval=$?
 testr slowest
+
 exit $retval
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
new file mode 100755
index 0000000..9bfefe1
--- /dev/null
+++ b/tools/subunit-trace.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2014 Samsung Electronics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Trace a subunit stream in reasonable detail and high accuracy."""
+
+import argparse
+import functools
+import re
+import sys
+
+import mimeparse
+import subunit
+import testtools
+
+DAY_SECONDS = 60 * 60 * 24
+FAILS = []
+RESULTS = {}
+
+
+class Starts(testtools.StreamResult):
+
+    def __init__(self, output):
+        super(Starts, self).__init__()
+        self._output = output
+
+    def startTestRun(self):
+        self._neednewline = False
+        self._emitted = set()
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+               runnable=True, file_name=None, file_bytes=None, eof=False,
+               mime_type=None, route_code=None, timestamp=None):
+        super(Starts, self).status(
+            test_id, test_status,
+            test_tags=test_tags, runnable=runnable, file_name=file_name,
+            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+            route_code=route_code, timestamp=timestamp)
+        if not test_id:
+            if not file_bytes:
+                return
+            if not mime_type or mime_type == 'test/plain;charset=utf8':
+                mime_type = 'text/plain; charset=utf-8'
+            primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
+            content_type = testtools.content_type.ContentType(
+                primary, sub, parameters)
+            content = testtools.content.Content(
+                content_type, lambda: [file_bytes])
+            text = content.as_text()
+            if text and text[-1] not in '\r\n':
+                self._neednewline = True
+            self._output.write(text)
+        elif test_status == 'inprogress' and test_id not in self._emitted:
+            if self._neednewline:
+                self._neednewline = False
+                self._output.write('\n')
+            worker = ''
+            for tag in test_tags or ():
+                if tag.startswith('worker-'):
+                    worker = '(' + tag[7:] + ') '
+            if timestamp:
+                timestr = timestamp.isoformat()
+            else:
+                timestr = ''
+                self._output.write('%s: %s%s [start]\n' %
+                                   (timestr, worker, test_id))
+            self._emitted.add(test_id)
+
+
+def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
+    """Clean up the test name for display.
+
+    By default we strip out the tags in the test because they don't help us
+    in identifying the test that is run to it's result.
+
+    Make it possible to strip out the testscenarios information (not to
+    be confused with tempest scenarios) however that's often needed to
+    indentify generated negative tests.
+    """
+    if strip_tags:
+        tags_start = name.find('[')
+        tags_end = name.find(']')
+        if tags_start > 0 and tags_end > tags_start:
+            newname = name[:tags_start]
+            newname += name[tags_end + 1:]
+            name = newname
+
+    if strip_scenarios:
+        tags_start = name.find('(')
+        tags_end = name.find(')')
+        if tags_start > 0 and tags_end > tags_start:
+            newname = name[:tags_start]
+            newname += name[tags_end + 1:]
+            name = newname
+
+    return name
+
+
+def get_duration(timestamps):
+    start, end = timestamps
+    if not start or not end:
+        duration = ''
+    else:
+        delta = end - start
+        duration = '%d.%06ds' % (
+            delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
+    return duration
+
+
+def find_worker(test):
+    for tag in test['tags']:
+        if tag.startswith('worker-'):
+            return int(tag[7:])
+    return 'NaN'
+
+
+# Print out stdout/stderr if it exists, always
+def print_attachments(stream, test, all_channels=False):
+    """Print out subunit attachments.
+
+    Print out subunit attachments that contain content. This
+    runs in 2 modes, one for successes where we print out just stdout
+    and stderr, and an override that dumps all the attachments.
+    """
+    channels = ('stdout', 'stderr')
+    for name, detail in test['details'].items():
+        # NOTE(sdague): the subunit names are a little crazy, and actually
+        # are in the form pythonlogging:'' (with the colon and quotes)
+        name = name.split(':')[0]
+        if detail.content_type.type == 'test':
+            detail.content_type.type = 'text'
+        if (all_channels or name in channels) and detail.as_text():
+            title = "Captured %s:" % name
+            stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
+            # indent attachment lines 4 spaces to make them visually
+            # offset
+            for line in detail.as_text().split('\n'):
+                stream.write("    %s\n" % line)
+
+
+def show_outcome(stream, test, print_failures=False):
+    global RESULTS
+    status = test['status']
+    # TODO(sdague): ask lifeless why on this?
+    if status == 'exists':
+        return
+
+    worker = find_worker(test)
+    name = cleanup_test_name(test['id'])
+    duration = get_duration(test['timestamps'])
+
+    if worker not in RESULTS:
+        RESULTS[worker] = []
+    RESULTS[worker].append(test)
+
+    # don't count the end of the return code as a fail
+    if name == 'process-returncode':
+        return
+
+    if status == 'success':
+        stream.write('{%s} %s [%s] ... ok\n' % (
+            worker, name, duration))
+        print_attachments(stream, test)
+    elif status == 'fail':
+        FAILS.append(test)
+        stream.write('{%s} %s [%s] ... FAILED\n' % (
+            worker, name, duration))
+        if not print_failures:
+            print_attachments(stream, test, all_channels=True)
+    elif status == 'skip':
+        stream.write('{%s} %s ... SKIPPED: %s\n' % (
+            worker, name, test['details']['reason'].as_text()))
+    else:
+        stream.write('{%s} %s [%s] ... %s\n' % (
+            worker, name, duration, test['status']))
+        if not print_failures:
+            print_attachments(stream, test, all_channels=True)
+
+    stream.flush()
+
+
+def print_fails(stream):
+    """Print summary failure report.
+
+    Currently unused, however there remains debate on inline vs. at end
+    reporting, so leave the utility function for later use.
+    """
+    if not FAILS:
+        return
+    stream.write("\n==============================\n")
+    stream.write("Failed %s tests - output below:" % len(FAILS))
+    stream.write("\n==============================\n")
+    for f in FAILS:
+        stream.write("\n%s\n" % f['id'])
+        stream.write("%s\n" % ('-' * len(f['id'])))
+        print_attachments(stream, f, all_channels=True)
+    stream.write('\n')
+
+
+def count_tests(key, value):
+    count = 0
+    for k, v in RESULTS.items():
+        for item in v:
+            if key in item:
+                if re.search(value, item[key]):
+                    count += 1
+    return count
+
+
+def worker_stats(worker):
+    tests = RESULTS[worker]
+    num_tests = len(tests)
+    delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
+    return num_tests, delta
+
+
+def print_summary(stream):
+    stream.write("\n======\nTotals\n======\n")
+    stream.write("Run: %s\n" % count_tests('status', '.*'))
+    stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
+    stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
+    stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
+
+    # we could have no results, especially as we filter out the process-codes
+    if RESULTS:
+        stream.write("\n==============\nWorker Balance\n==============\n")
+
+        for w in range(max(RESULTS.keys()) + 1):
+            if w not in RESULTS:
+                stream.write(
+                    " - WARNING: missing Worker %s! "
+                    "Race in testr accounting.\n" % w)
+            else:
+                num, time = worker_stats(w)
+                stream.write(" - Worker %s (%s tests) => %ss\n" %
+                             (w, num, time))
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--no-failure-debug', '-n', action='store_true',
+                        dest='print_failures', help='Disable printing failure '
+                        'debug infomation in realtime')
+    parser.add_argument('--fails', '-f', action='store_true',
+                        dest='post_fails', help='Print failure debug '
+                        'information after the stream is proccesed')
+    return parser.parse_args()
+
+
+def main():
+    args = parse_args()
+    stream = subunit.ByteStreamToStreamResult(
+        sys.stdin, non_subunit_name='stdout')
+    starts = Starts(sys.stdout)
+    outcomes = testtools.StreamToDict(
+        functools.partial(show_outcome, sys.stdout,
+                          print_failures=args.print_failures))
+    summary = testtools.StreamSummary()
+    result = testtools.CopyStreamResult([starts, outcomes, summary])
+    result.startTestRun()
+    try:
+        stream.run(result)
+    finally:
+        result.stopTestRun()
+    if args.post_fails:
+        print_fails(sys.stdout)
+    print_summary(sys.stdout)
+    return (0 if summary.wasSuccessful() else 1)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/tools/tempest_auto_config.py b/tools/tempest_auto_config.py
deleted file mode 100644
index 5b8d05b..0000000
--- a/tools/tempest_auto_config.py
+++ /dev/null
@@ -1,395 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# This script aims to configure an initial OpenStack environment with all the
-# necessary configurations for tempest's run using nothing but OpenStack's
-# native API.
-# That includes, creating users, tenants, registering images (cirros),
-# configuring neutron and so on.
-#
-# ASSUMPTION: this script is run by an admin user as it is meant to configure
-# the OpenStack environment prior to actual use.
-
-# Config
-import ConfigParser
-import os
-import tarfile
-import urllib2
-
-# Default client libs
-import glanceclient as glance_client
-import keystoneclient.v2_0.client as keystone_client
-
-# Import OpenStack exceptions
-import glanceclient.exc as glance_exception
-import keystoneclient.exceptions as keystone_exception
-
-
-TEMPEST_TEMP_DIR = os.getenv("TEMPEST_TEMP_DIR", "/tmp").rstrip('/')
-TEMPEST_ROOT_DIR = os.getenv("TEMPEST_ROOT_DIR", os.getenv("HOME")).rstrip('/')
-
-# Environment variables override defaults
-TEMPEST_CONFIG_DIR = os.getenv("TEMPEST_CONFIG_DIR",
-                               "%s%s" % (TEMPEST_ROOT_DIR, "/etc")).rstrip('/')
-TEMPEST_CONFIG_FILE = os.getenv("TEMPEST_CONFIG_FILE",
-                                "%s%s" % (TEMPEST_CONFIG_DIR, "/tempest.conf"))
-TEMPEST_CONFIG_SAMPLE = os.getenv("TEMPEST_CONFIG_SAMPLE",
-                                  "%s%s" % (TEMPEST_CONFIG_DIR,
-                                            "/tempest.conf.sample"))
-# Image references
-IMAGE_DOWNLOAD_CHUNK_SIZE = 8 * 1024
-IMAGE_UEC_SOURCE_URL = os.getenv("IMAGE_UEC_SOURCE_URL",
-                                 "http://download.cirros-cloud.net/0.3.1/"
-                                 "cirros-0.3.1-x86_64-uec.tar.gz")
-TEMPEST_IMAGE_ID = os.getenv('IMAGE_ID')
-TEMPEST_IMAGE_ID_ALT = os.getenv('IMAGE_ID_ALT')
-IMAGE_STATUS_ACTIVE = 'active'
-
-
-class ClientManager(object):
-    """
-    Manager that provides access to the official python clients for
-    calling various OpenStack APIs.
-    """
-    def __init__(self):
-        self.identity_client = None
-        self.image_client = None
-        self.network_client = None
-        self.compute_client = None
-        self.volume_client = None
-
-    def get_identity_client(self, **kwargs):
-        """
-        Returns the openstack identity python client
-        :param username: a string representing the username
-        :param password: a string representing the user's password
-        :param tenant_name: a string representing the tenant name of the user
-        :param auth_url: a string representing the auth url of the identity
-        :param insecure: True if we wish to disable ssl certificate validation,
-        False otherwise
-        :returns an instance of openstack identity python client
-        """
-        if not self.identity_client:
-            self.identity_client = keystone_client.Client(**kwargs)
-
-        return self.identity_client
-
-    def get_image_client(self, version="1", *args, **kwargs):
-        """
-        This method returns OpenStack glance python client
-        :param version: a string representing the version of the glance client
-        to use.
-        :param string endpoint: A user-supplied endpoint URL for the glance
-                            service.
-        :param string token: Token for authentication.
-        :param integer timeout: Allows customization of the timeout for client
-                                http requests. (optional)
-        :return: a Client object representing the glance client
-        """
-        if not self.image_client:
-            self.image_client = glance_client.Client(version, *args, **kwargs)
-
-        return self.image_client
-
-
-def get_tempest_config(path_to_config):
-    """
-    Gets the tempest configuration file as a ConfigParser object
-    :param path_to_config: path to the config file
-    :return: a ConfigParser object representing the tempest configuration file
-    """
-    # get the sample config file from the sample
-    config = ConfigParser.ConfigParser()
-    config.readfp(open(path_to_config))
-
-    return config
-
-
-def update_config_admin_credentials(config, config_section):
-    """
-    Updates the tempest config with the admin credentials
-    :param config: a ConfigParser object representing the tempest config file
-    :param config_section: the section name where the admin credentials are
-    """
-    # Check if credentials are present, default uses the config credentials
-    OS_USERNAME = os.getenv('OS_USERNAME',
-                            config.get(config_section, "admin_username"))
-    OS_PASSWORD = os.getenv('OS_PASSWORD',
-                            config.get(config_section, "admin_password"))
-    OS_TENANT_NAME = os.getenv('OS_TENANT_NAME',
-                               config.get(config_section, "admin_tenant_name"))
-    OS_AUTH_URL = os.getenv('OS_AUTH_URL', config.get(config_section, "uri"))
-
-    if not (OS_AUTH_URL and
-            OS_USERNAME and
-            OS_PASSWORD and
-            OS_TENANT_NAME):
-        raise Exception("Admin environment variables not found.")
-
-    # TODO(tkammer): Add support for uri_v3
-    config_identity_params = {'uri': OS_AUTH_URL,
-                              'admin_username': OS_USERNAME,
-                              'admin_password': OS_PASSWORD,
-                              'admin_tenant_name': OS_TENANT_NAME}
-
-    update_config_section_with_params(config,
-                                      config_section,
-                                      config_identity_params)
-
-
-def update_config_section_with_params(config, config_section, params):
-    """
-    Updates a given config object with given params
-    :param config: a ConfigParser object representing the tempest config file
-    :param config_section: the section we would like to update
-    :param params: the parameters we wish to update for that section
-    """
-    for option, value in params.items():
-        config.set(config_section, option, value)
-
-
-def get_identity_client_kwargs(config, config_section):
-    """
-    Get the required arguments for the identity python client
-    :param config: a ConfigParser object representing the tempest config file
-    :param config_section: the section name in the configuration where the
-    arguments can be found
-    :return: a dictionary representing the needed arguments for the identity
-    client
-    """
-    username = config.get(config_section, 'admin_username')
-    password = config.get(config_section, 'admin_password')
-    tenant_name = config.get(config_section, 'admin_tenant_name')
-    auth_url = config.get(config_section, 'uri')
-    dscv = config.get(config_section, 'disable_ssl_certificate_validation')
-    kwargs = {'username': username,
-              'password': password,
-              'tenant_name': tenant_name,
-              'auth_url': auth_url,
-              'insecure': dscv}
-
-    return kwargs
-
-
-def create_user_with_tenant(identity_client, username, password, tenant_name):
-    """
-    Creates a user using a given identity client
-    :param identity_client: openstack identity python client
-    :param username: a string representing the username
-    :param password: a string representing the user's password
-    :param tenant_name: a string representing the tenant name of the user
-    """
-    # Try to create the necessary tenant
-    tenant_id = None
-    try:
-        tenant_description = "Tenant for Tempest %s user" % username
-        tenant = identity_client.tenants.create(tenant_name,
-                                                tenant_description)
-        tenant_id = tenant.id
-    except keystone_exception.Conflict:
-
-        # if already exist, use existing tenant
-        tenant_list = identity_client.tenants.list()
-        for tenant in tenant_list:
-            if tenant.name == tenant_name:
-                tenant_id = tenant.id
-
-    # Try to create the user
-    try:
-        email = "%s@test.com" % username
-        identity_client.users.create(name=username,
-                                     password=password,
-                                     email=email,
-                                     tenant_id=tenant_id)
-    except keystone_exception.Conflict:
-
-        # if already exist, use existing user
-        pass
-
-
-def create_users_and_tenants(identity_client,
-                             config,
-                             config_section):
-    """
-    Creates the two non admin users and tenants for tempest
-    :param identity_client: openstack identity python client
-    :param config: a ConfigParser object representing the tempest config file
-    :param config_section: the section name of identity in the config
-    """
-    # Get the necessary params from the config file
-    tenant_name = config.get(config_section, 'tenant_name')
-    username = config.get(config_section, 'username')
-    password = config.get(config_section, 'password')
-
-    alt_tenant_name = config.get(config_section, 'alt_tenant_name')
-    alt_username = config.get(config_section, 'alt_username')
-    alt_password = config.get(config_section, 'alt_password')
-
-    # Create the necessary users for the test runs
-    create_user_with_tenant(identity_client, username, password, tenant_name)
-    create_user_with_tenant(identity_client, alt_username, alt_password,
-                            alt_tenant_name)
-
-
-def get_image_client_kwargs(identity_client, config, config_section):
-    """
-    Get the required arguments for the image python client
-    :param identity_client: openstack identity python client
-    :param config: a ConfigParser object representing the tempest config file
-    :param config_section: the section name of identity in the config
-    :return: a dictionary representing the needed arguments for the image
-    client
-    """
-
-    token = identity_client.auth_token
-    endpoint = identity_client.\
-        service_catalog.url_for(service_type='image', endpoint_type='publicURL'
-                                )
-    dscv = config.get(config_section, 'disable_ssl_certificate_validation')
-    kwargs = {'endpoint': endpoint,
-              'token': token,
-              'insecure': dscv}
-
-    return kwargs
-
-
-def images_exist(image_client):
-    """
-    Checks whether the images ID's located in the environment variable are
-    indeed registered
-    :param image_client: the openstack python client representing the image
-    client
-    """
-    exist = True
-    if not TEMPEST_IMAGE_ID or not TEMPEST_IMAGE_ID_ALT:
-        exist = False
-    else:
-        try:
-            image_client.images.get(TEMPEST_IMAGE_ID)
-            image_client.images.get(TEMPEST_IMAGE_ID_ALT)
-        except glance_exception.HTTPNotFound:
-            exist = False
-
-    return exist
-
-
-def download_and_register_uec_images(image_client, download_url,
-                                     download_folder):
-    """
-    Downloads and registered the UEC AKI/AMI/ARI images
-    :param image_client:
-    :param download_url: the url of the uec tar file
-    :param download_folder: the destination folder we wish to save the file to
-    """
-    basename = os.path.basename(download_url)
-    path = os.path.join(download_folder, basename)
-
-    request = urllib2.urlopen(download_url)
-
-    # First, download the file
-    with open(path, "wb") as fp:
-        while True:
-            chunk = request.read(IMAGE_DOWNLOAD_CHUNK_SIZE)
-            if not chunk:
-                break
-
-            fp.write(chunk)
-
-    # Then extract and register images
-    tar = tarfile.open(path, "r")
-    for name in tar.getnames():
-        file_obj = tar.extractfile(name)
-        format = "aki"
-
-        if file_obj.name.endswith(".img"):
-            format = "ami"
-
-        if file_obj.name.endswith("initrd"):
-            format = "ari"
-
-        # Register images in image client
-        image_client.images.create(name=file_obj.name, disk_format=format,
-                                   container_format=format, data=file_obj,
-                                   is_public="true")
-
-    tar.close()
-
-
-def create_images(image_client, config, config_section,
-                  download_url=IMAGE_UEC_SOURCE_URL,
-                  download_folder=TEMPEST_TEMP_DIR):
-    """
-    Creates images for tempest's use and registers the environment variables
-    IMAGE_ID and IMAGE_ID_ALT with registered images
-    :param image_client: OpenStack python image client
-    :param config: a ConfigParser object representing the tempest config file
-    :param config_section: the section name where the IMAGE ids are set
-    :param download_url: the URL from which we should download the UEC tar
-    :param download_folder: the place where we want to save the download file
-    """
-    if not images_exist(image_client):
-        # Falls down to the default uec images
-        download_and_register_uec_images(image_client, download_url,
-                                         download_folder)
-        image_ids = []
-        for image in image_client.images.list():
-            image_ids.append(image.id)
-
-        os.environ["IMAGE_ID"] = image_ids[0]
-        os.environ["IMAGE_ID_ALT"] = image_ids[1]
-
-    params = {'image_ref': os.getenv("IMAGE_ID"),
-              'image_ref_alt': os.getenv("IMAGE_ID_ALT")}
-
-    update_config_section_with_params(config, config_section, params)
-
-
-def main():
-    """
-    Main module to control the script
-    """
-    # Check if config file exists or fall to the default sample otherwise
-    path_to_config = TEMPEST_CONFIG_SAMPLE
-
-    if os.path.isfile(TEMPEST_CONFIG_FILE):
-        path_to_config = TEMPEST_CONFIG_FILE
-
-    config = get_tempest_config(path_to_config)
-    update_config_admin_credentials(config, 'identity')
-
-    client_manager = ClientManager()
-
-    # Set the identity related info for tempest
-    identity_client_kwargs = get_identity_client_kwargs(config,
-                                                        'identity')
-    identity_client = client_manager.get_identity_client(
-        **identity_client_kwargs)
-
-    # Create the necessary users and tenants for tempest run
-    create_users_and_tenants(identity_client, config, 'identity')
-
-    # Set the image related info for tempest
-    image_client_kwargs = get_image_client_kwargs(identity_client,
-                                                  config,
-                                                  'identity')
-    image_client = client_manager.get_image_client(**image_client_kwargs)
-
-    # Create the necessary users and tenants for tempest run
-    create_images(image_client, config, 'compute')
-
-    # TODO(tkammer): add network implementation
-
-if __name__ == "__main__":
-    main()
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
deleted file mode 100755
index aa92c0b..0000000
--- a/tools/verify_tempest_config.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import json
-import sys
-
-import httplib2
-
-from tempest import clients
-from tempest import config
-
-
-CONF = config.CONF
-RAW_HTTP = httplib2.Http()
-
-
-def verify_glance_api_versions(os):
-    # Check glance api versions
-    __, versions = os.image_client.get_versions()
-    if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
-                                             versions):
-        print('Config option image api_v1 should be change to: %s' % (
-            not CONF.image_feature_enabled.api_v1))
-    if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
-        print('Config option image api_v2 should be change to: %s' % (
-            not CONF.image_feature_enabled.api_v2))
-
-
-def verify_nova_api_versions(os):
-    # Check nova api versions - only get base URL without PATH
-    os.servers_client.skip_path = True
-    # The nova base endpoint url includes the version but to get the versions
-    # list the unversioned endpoint is needed
-    v2_endpoint = os.servers_client.base_url
-    v2_endpoint_parts = v2_endpoint.split('/')
-    endpoint = v2_endpoint_parts[0] + '//' + v2_endpoint_parts[2]
-    __, body = RAW_HTTP.request(endpoint, 'GET')
-    body = json.loads(body)
-    # Restore full base_url
-    os.servers_client.skip_path = False
-    versions = map(lambda x: x['id'], body['versions'])
-    if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
-        print('Config option compute api_v3 should be change to: %s' % (
-              not CONF.compute_feature_enabled.api_v3))
-
-
-def get_extension_client(os, service):
-    extensions_client = {
-        'nova': os.extensions_client,
-        'nova_v3': os.extensions_v3_client,
-        'cinder': os.volumes_extension_client,
-        'neutron': os.network_client,
-        'swift': os.account_client,
-    }
-    if service not in extensions_client:
-        print('No tempest extensions client for %s' % service)
-        exit(1)
-    return extensions_client[service]
-
-
-def get_enabled_extensions(service):
-    extensions_options = {
-        'nova': CONF.compute_feature_enabled.api_extensions,
-        'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
-        'cinder': CONF.volume_feature_enabled.api_extensions,
-        'neutron': CONF.network_feature_enabled.api_extensions,
-        'swift': CONF.object_storage_feature_enabled.discoverable_apis,
-    }
-    if service not in extensions_options:
-        print('No supported extensions list option for %s' % service)
-        exit(1)
-    return extensions_options[service]
-
-
-def verify_extensions(os, service, results):
-    extensions_client = get_extension_client(os, service)
-    __, resp = extensions_client.list_extensions()
-    if isinstance(resp, dict):
-        # Neutron's extension 'name' field has is not a single word (it has
-        # spaces in the string) Since that can't be used for list option the
-        # api_extension option in the network-feature-enabled group uses alias
-        # instead of name.
-        if service == 'neutron':
-            extensions = map(lambda x: x['alias'], resp['extensions'])
-        elif service == 'swift':
-            # Remove Swift general information from extensions list
-            resp.pop('swift')
-            extensions = resp.keys()
-        else:
-            extensions = map(lambda x: x['name'], resp['extensions'])
-
-    else:
-        extensions = map(lambda x: x['name'], resp)
-    if not results.get(service):
-        results[service] = {}
-    extensions_opt = get_enabled_extensions(service)
-    if extensions_opt[0] == 'all':
-        results[service]['extensions'] = 'all'
-        return results
-    # Verify that all configured extensions are actually enabled
-    for extension in extensions_opt:
-        results[service][extension] = extension in extensions
-    # Verify that there aren't additional extensions enabled that aren't
-    # specified in the config list
-    for extension in extensions:
-        if extension not in extensions_opt:
-            results[service][extension] = False
-    return results
-
-
-def display_results(results):
-    for service in results:
-        # If all extensions are specified as being enabled there is no way to
-        # verify this so we just assume this to be true
-        if results[service].get('extensions'):
-            continue
-        extension_list = get_enabled_extensions(service)
-        for extension in results[service]:
-            if not results[service][extension]:
-                if extension in extension_list:
-                    print("%s extension: %s should not be included in the list"
-                          " of enabled extensions" % (service, extension))
-                else:
-                    print("%s extension: %s should be included in the list of "
-                          "enabled extensions" % (service, extension))
-
-
-def check_service_availability(os):
-    services = []
-    avail_services = []
-    codename_match = {
-        'volume': 'cinder',
-        'network': 'neutron',
-        'image': 'glance',
-        'object_storage': 'swift',
-        'compute': 'nova',
-        'orchestration': 'heat',
-        'metering': 'ceilometer',
-        'telemetry': 'ceilometer',
-        'data_processing': 'savanna',
-        'baremetal': 'ironic',
-        'identity': 'keystone'
-
-    }
-    # Get catalog list for endpoints to use for validation
-    __, endpoints = os.endpoints_client.list_endpoints()
-    for endpoint in endpoints:
-        __, service = os.service_client.get_service(endpoint['service_id'])
-        services.append(service['type'])
-    # Pull all catalog types from config file and compare against endpoint list
-    for cfgname in dir(CONF._config):
-        cfg = getattr(CONF, cfgname)
-        catalog_type = getattr(cfg, 'catalog_type', None)
-        if not catalog_type:
-            continue
-        else:
-            if cfgname == 'identity':
-                # Keystone is a required service for tempest
-                continue
-            if catalog_type not in services:
-                if getattr(CONF.service_available, codename_match[cfgname]):
-                    print('Endpoint type %s not found either disable service '
-                          '%s or fix the catalog_type in the config file' % (
-                          catalog_type, codename_match[cfgname]))
-            else:
-                if not getattr(CONF.service_available,
-                               codename_match[cfgname]):
-                    print('Endpoint type %s is available, service %s should be'
-                          ' set as available in the config file.' % (
-                          catalog_type, codename_match[cfgname]))
-                else:
-                    avail_services.append(codename_match[cfgname])
-    return avail_services
-
-
-def main(argv):
-    print('Running config verification...')
-    os = clients.ComputeAdminManager(interface='json')
-    services = check_service_availability(os)
-    results = {}
-    for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
-        if service == 'nova_v3' and 'nova' not in services:
-            continue
-        elif service not in services:
-            continue
-        results = verify_extensions(os, service, results)
-    verify_glance_api_versions(os)
-    verify_nova_api_versions(os)
-    display_results(results)
-
-
-if __name__ == "__main__":
-    main(sys.argv)
diff --git a/tox.ini b/tox.ini
index 4a625f8..6b4acc6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -54,7 +54,7 @@
 setenv = OS_TEST_TIMEOUT=1200
 # The regex below is used to select heat api/scenario tests tagged as slow.
 commands =
-  bash tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
+  bash tools/pretty_tox.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
 
 [testenv:large-ops]
 sitepackages = True
@@ -77,7 +77,7 @@
 [testenv:stress]
 sitepackages = True
 commands =
-    python -m tempest/stress/run_stress -a -d 3600 -S
+    run-tempest-stress -a -d 3600 -S
 
 [testenv:venv]
 commands = {posargs}
@@ -95,9 +95,10 @@
 
 [hacking]
 local-check-factory = tempest.hacking.checks.factory
+import_exceptions = tempest.services
 
 [flake8]
 # E125 is a won't fix until https://github.com/jcrocholl/pep8/issues/126 is resolved.  For further detail see https://review.openstack.org/#/c/36788/
-ignore = E125,H302,H404
+ignore = E125,H404
 show-source = True
 exclude = .git,.venv,.tox,dist,doc,openstack,*egg