Merge "Adds more testcases to test_telemetry_alarming_api"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 70c791b..70950b7 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -250,6 +250,19 @@
 # Should the tests ssh to instances? (boolean value)
 #run_ssh=false
 
+# Auth method used for authenticate to the instance. Valid
+# choices are: keypair, configured, adminpass. keypair: start
+# the servers with an ssh keypair. configured: use the
+# configured user and password. adminpass: use the injected
+# adminPass. disabled: avoid using ssh when it is an option.
+# (string value)
+#ssh_auth_method=keypair
+
+# How to connect to the instance? fixed: using the first ip
+# belongs the fixed network floating: creating and using a
+# floating ip (string value)
+#ssh_connect_method=fixed
+
 # User name used to authenticate to an instance. (string
 # value)
 #ssh_user=root
@@ -337,6 +350,10 @@
 # API key to use when authenticating as admin. (string value)
 #password=<None>
 
+# Domain name for authentication as admin (Keystone V3).The
+# same domain applies to user and project (string value)
+#domain_name=<None>
+
 
 [compute-feature-enabled]
 
@@ -503,6 +520,10 @@
 # API key to use when authenticating. (string value)
 #password=<None>
 
+# Domain name for authentication (Keystone V3).The same domain
+# applies to user and project (string value)
+#domain_name=<None>
+
 # Username of alternate user to use for Nova API requests.
 # (string value)
 #alt_username=<None>
@@ -515,6 +536,10 @@
 # (string value)
 #alt_password=<None>
 
+# Alternate domain name for authentication (Keystone V3).The
+# same domain applies to user and project (string value)
+#alt_domain_name=<None>
+
 # Administrative Username to use for Keystone API requests.
 # (string value)
 #admin_username=<None>
@@ -526,6 +551,10 @@
 # API key to use when authenticating as admin. (string value)
 #admin_password=<None>
 
+# Admin domain name for authentication (Keystone V3).The same
+# domain applies to user and project (string value)
+#admin_domain_name=<None>
+
 
 [identity-feature-enabled]
 
diff --git a/requirements.txt b/requirements.txt
index e97eece..5e396c6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,7 @@
 paramiko>=1.9.0
 netaddr>=0.7.6
 python-glanceclient>=0.9.0
-python-keystoneclient>=0.7.0
+python-keystoneclient>=0.8.0
 python-novaclient>=2.17.0
 python-neutronclient>=2.3.4,<3
 python-cinderclient>=1.0.6
@@ -20,7 +20,7 @@
 keyring>=2.1
 testrepository>=0.0.18
 oslo.config>=1.2.0
-six>=1.5.2
+six>=1.6.0
 iso8601>=0.1.9
 fixtures>=0.3.14
 testscenarios>=0.4
diff --git a/setup.cfg b/setup.cfg
index a701572..f4aa3e1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -17,6 +17,10 @@
     Programming Language :: Python :: 2
     Programming Language :: Python :: 2.7
 
+[entry_points]
+console_scripts =
+    verify-tempest-config = tempest.cmd.verify_tempest_config:main
+
 [build_sphinx]
 all_files = 1
 build-dir = doc/build
diff --git a/setup.py b/setup.py
index 70c2b3f..7363757 100755
--- a/setup.py
+++ b/setup.py
@@ -17,6 +17,14 @@
 # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
 import setuptools
 
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
+
 setuptools.setup(
     setup_requires=['pbr'],
     pbr=True)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index d1bda83..edeb2fc 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -30,10 +30,12 @@
 class BaseComputeTest(tempest.test.BaseTestCase):
     """Base test case class for all Compute API tests."""
 
+    _api_version = 3
     force_tenant_isolation = False
 
     @classmethod
     def setUpClass(cls):
+        cls.set_network_resources()
         super(BaseComputeTest, cls).setUpClass()
 
         os = cls.get_client_manager()
@@ -53,6 +55,57 @@
         cls.multi_user = cls.get_multi_user()
         cls.security_groups = []
 
+        if cls._api_version == 2:
+            cls.servers_client = cls.os.servers_client
+            cls.flavors_client = cls.os.flavors_client
+            cls.images_client = cls.os.images_client
+            cls.extensions_client = cls.os.extensions_client
+            cls.floating_ips_client = cls.os.floating_ips_client
+            cls.keypairs_client = cls.os.keypairs_client
+            cls.security_groups_client = cls.os.security_groups_client
+            cls.quotas_client = cls.os.quotas_client
+            cls.limits_client = cls.os.limits_client
+            cls.volumes_extensions_client = cls.os.volumes_extensions_client
+            cls.volumes_client = cls.os.volumes_client
+            cls.interfaces_client = cls.os.interfaces_client
+            cls.fixed_ips_client = cls.os.fixed_ips_client
+            cls.availability_zone_client = cls.os.availability_zone_client
+            cls.agents_client = cls.os.agents_client
+            cls.aggregates_client = cls.os.aggregates_client
+            cls.services_client = cls.os.services_client
+            cls.instance_usages_audit_log_client = \
+                cls.os.instance_usages_audit_log_client
+            cls.hypervisor_client = cls.os.hypervisor_client
+            cls.certificates_client = cls.os.certificates_client
+            cls.migrations_client = cls.os.migrations_client
+
+        elif cls._api_version == 3:
+            if not CONF.compute_feature_enabled.api_v3:
+                skip_msg = ("%s skipped as nova v3 api is not available" %
+                            cls.__name__)
+                raise cls.skipException(skip_msg)
+            cls.servers_client = cls.os.servers_v3_client
+            cls.images_client = cls.os.image_client
+            cls.flavors_client = cls.os.flavors_v3_client
+            cls.services_client = cls.os.services_v3_client
+            cls.extensions_client = cls.os.extensions_v3_client
+            cls.availability_zone_client = cls.os.availability_zone_v3_client
+            cls.interfaces_client = cls.os.interfaces_v3_client
+            cls.hypervisor_client = cls.os.hypervisor_v3_client
+            cls.keypairs_client = cls.os.keypairs_v3_client
+            cls.volumes_client = cls.os.volumes_client
+            cls.certificates_client = cls.os.certificates_v3_client
+            cls.keypairs_client = cls.os.keypairs_v3_client
+            cls.aggregates_client = cls.os.aggregates_v3_client
+            cls.hosts_client = cls.os.hosts_v3_client
+            cls.quotas_client = cls.os.quotas_v3_client
+            cls.version_client = cls.os.version_v3_client
+            cls.migrations_client = cls.os.migrations_v3_client
+        else:
+            msg = ("Unexpected API version is specified (%s)" %
+                   cls._api_version)
+            raise exceptions.InvalidConfiguration(message=msg)
+
     @classmethod
     def get_multi_user(cls):
         multi_user = True
@@ -91,6 +144,26 @@
                 pass
 
     @classmethod
+    def server_check_teardown(cls):
+        """Checks is the shared server clean enough for subsequent test.
+           Method will delete the server when it's dirty.
+           The setUp method is responsible for creating a new server.
+           Exceptions raised in tearDown class are fails the test case,
+           This method supposed to use only by tierDown methods, when
+           the shared server_id is stored in the server_id of the class.
+        """
+        if getattr(cls, 'server_id', None) is not None:
+            try:
+                cls.servers_client.wait_for_server_status(cls.server_id,
+                                                          'ACTIVE')
+            except Exception as exc:
+                LOG.exception(exc)
+                cls.servers_client.delete_server(cls.server_id)
+                cls.servers_client.wait_for_server_termination(cls.server_id)
+                cls.server_id = None
+                raise
+
+    @classmethod
     def clear_images(cls):
         for image_id in cls.images:
             try:
@@ -203,38 +276,12 @@
             LOG.warn("Unable to delete volume '%s' since it was not found. "
                      "Maybe it was already deleted?" % volume_id)
 
-
-class BaseV2ComputeTest(BaseComputeTest):
-
-    _interface = "json"
-
     @classmethod
-    def setUpClass(cls):
-        # By default compute tests do not create network resources
-        cls.set_network_resources()
-        super(BaseV2ComputeTest, cls).setUpClass()
-        cls.servers_client = cls.os.servers_client
-        cls.flavors_client = cls.os.flavors_client
-        cls.images_client = cls.os.images_client
-        cls.extensions_client = cls.os.extensions_client
-        cls.floating_ips_client = cls.os.floating_ips_client
-        cls.keypairs_client = cls.os.keypairs_client
-        cls.security_groups_client = cls.os.security_groups_client
-        cls.quotas_client = cls.os.quotas_client
-        cls.limits_client = cls.os.limits_client
-        cls.volumes_extensions_client = cls.os.volumes_extensions_client
-        cls.volumes_client = cls.os.volumes_client
-        cls.interfaces_client = cls.os.interfaces_client
-        cls.fixed_ips_client = cls.os.fixed_ips_client
-        cls.availability_zone_client = cls.os.availability_zone_client
-        cls.agents_client = cls.os.agents_client
-        cls.aggregates_client = cls.os.aggregates_client
-        cls.services_client = cls.os.services_client
-        cls.instance_usages_audit_log_client = \
-            cls.os.instance_usages_audit_log_client
-        cls.hypervisor_client = cls.os.hypervisor_client
-        cls.certificates_client = cls.os.certificates_client
-        cls.migrations_client = cls.os.migrations_client
+    def prepare_instance_network(cls):
+        if (CONF.compute.ssh_auth_method != 'disabled' and
+                CONF.compute.ssh_connect_method == 'floating'):
+            cls.set_network_resources(network=True, subnet=True, router=True,
+                                      dhcp=True)
 
     @classmethod
     def create_image_from_server(cls, server_id, **kwargs):
@@ -243,21 +290,25 @@
         if 'name' in kwargs:
             name = kwargs.pop('name')
 
-        resp, image = cls.images_client.create_image(
-            server_id, name)
+        if cls._api_version == 2:
+            resp, image = cls.images_client.create_image(server_id, name)
+        elif cls._api_version == 3:
+            resp, image = cls.servers_client.create_image(server_id, name)
         image_id = data_utils.parse_image_id(resp['location'])
         cls.images.append(image_id)
 
         if 'wait_until' in kwargs:
             cls.images_client.wait_for_image_status(image_id,
                                                     kwargs['wait_until'])
-            resp, image = cls.images_client.get_image(image_id)
+            if cls._api_version == 2:
+                resp, image = cls.images_client.get_image(image_id)
+            elif cls._api_version == 3:
+                resp, image = cls.images_client.get_image_meta(image_id)
 
             if kwargs['wait_until'] == 'ACTIVE':
                 if kwargs.get('wait_for_server', True):
                     cls.servers_client.wait_for_server_status(server_id,
                                                               'ACTIVE')
-
         return resp, image
 
     @classmethod
@@ -271,13 +322,24 @@
                 LOG.exception('Failed to delete server %s' % server_id)
                 pass
         resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
-        cls.password = server['adminPass']
+        if cls._api_version == 2:
+            cls.password = server['adminPass']
+        elif cls._api_version == 3:
+            cls.password = server['admin_password']
         return server['id']
 
     @classmethod
     def delete_volume(cls, volume_id):
         """Deletes the given volume and waits for it to be gone."""
-        cls._delete_volume(cls.volumes_extensions_client, volume_id)
+        if cls._api_version == 2:
+            cls._delete_volume(cls.volumes_extensions_client, volume_id)
+        elif cls._api_version == 3:
+            cls._delete_volume(cls.volumes_client, volume_id)
+
+
+class BaseV2ComputeTest(BaseComputeTest):
+    _api_version = 2
+    _interface = "json"
 
 
 class BaseV2ComputeAdminTest(BaseV2ComputeTest):
@@ -306,80 +368,9 @@
 
 
 class BaseV3ComputeTest(BaseComputeTest):
-
+    _api_version = 3
     _interface = "json"
 
-    @classmethod
-    def setUpClass(cls):
-        # By default compute tests do not create network resources
-        if cls._interface == "xml":
-            skip_msg = ("XML interface is being removed from Nova v3. "
-                        "%s will be removed shortly" % cls.__name__)
-            raise cls.skipException(skip_msg)
-
-        if not CONF.compute_feature_enabled.api_v3:
-            skip_msg = ("%s skipped as nova v3 api is not available" %
-                        cls.__name__)
-            raise cls.skipException(skip_msg)
-
-        cls.set_network_resources()
-        super(BaseV3ComputeTest, cls).setUpClass()
-
-        cls.servers_client = cls.os.servers_v3_client
-        cls.images_client = cls.os.image_client
-        cls.flavors_client = cls.os.flavors_v3_client
-        cls.services_client = cls.os.services_v3_client
-        cls.extensions_client = cls.os.extensions_v3_client
-        cls.availability_zone_client = cls.os.availability_zone_v3_client
-        cls.interfaces_client = cls.os.interfaces_v3_client
-        cls.hypervisor_client = cls.os.hypervisor_v3_client
-        cls.keypairs_client = cls.os.keypairs_v3_client
-        cls.volumes_client = cls.os.volumes_client
-        cls.certificates_client = cls.os.certificates_v3_client
-        cls.keypairs_client = cls.os.keypairs_v3_client
-        cls.aggregates_client = cls.os.aggregates_v3_client
-        cls.hosts_client = cls.os.hosts_v3_client
-        cls.quotas_client = cls.os.quotas_v3_client
-        cls.version_client = cls.os.version_v3_client
-        cls.migrations_client = cls.os.migrations_v3_client
-
-    @classmethod
-    def create_image_from_server(cls, server_id, **kwargs):
-        """Wrapper utility that returns an image created from the server."""
-        name = data_utils.rand_name(cls.__name__ + "-image")
-        if 'name' in kwargs:
-            name = kwargs.pop('name')
-
-        resp, image = cls.servers_client.create_image(
-            server_id, name)
-        image_id = data_utils.parse_image_id(resp['location'])
-        cls.images.append(image_id)
-
-        if 'wait_until' in kwargs:
-            cls.images_client.wait_for_image_status(image_id,
-                                                    kwargs['wait_until'])
-            resp, image = cls.images_client.get_image_meta(image_id)
-
-        return resp, image
-
-    @classmethod
-    def rebuild_server(cls, server_id, **kwargs):
-        # Destroy an existing server and creates a new one
-        try:
-            cls.servers_client.delete_server(server_id)
-            cls.servers_client.wait_for_server_termination(server_id)
-        except Exception:
-            LOG.exception('Failed to delete server %s' % server_id)
-            pass
-        resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
-        cls.password = server['admin_password']
-        return server['id']
-
-    @classmethod
-    def delete_volume(cls, volume_id):
-        """Deletes the given volume and waits for it to be gone."""
-        cls._delete_volume(cls.volumes_client, volume_id)
-
 
 class BaseV3ComputeAdminTest(BaseV3ComputeTest):
     """Base test case class for all Compute Admin API V3 tests."""
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index d2fd970..c81cec5 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -26,6 +26,11 @@
 
 class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
 
+    def tearDown(self):
+        """Terminate test instances created after a test is executed."""
+        self.server_check_teardown()
+        super(ImagesOneServerTestJSON, self).tearDown()
+
     def setUp(self):
         # NOTE(afazekas): Normally we use the same server with all test cases,
         # but if it has an issue, we build a new one
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 41a0590..9c4ab00 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
         for image_id in self.image_ids:
             self.client.delete_image(image_id)
             self.image_ids.remove(image_id)
+        self.server_check_teardown()
         super(ImagesOneServerNegativeTestJSON, self).tearDown()
 
     def setUp(self):
diff --git a/tempest/api/compute/servers/test_availability_zone.py b/tempest/api/compute/servers/test_availability_zone.py
index 7b12555..cf9837f 100644
--- a/tempest/api/compute/servers/test_availability_zone.py
+++ b/tempest/api/compute/servers/test_availability_zone.py
@@ -17,15 +17,15 @@
 from tempest import test
 
 
-class AZTestJSON(base.BaseV2ComputeTest):
-
+class AZV3Test(base.BaseComputeTest):
     """
     Tests Availability Zone API List
     """
+    _api_version = 3
 
     @classmethod
     def setUpClass(cls):
-        super(AZTestJSON, cls).setUpClass()
+        super(AZV3Test, cls).setUpClass()
         cls.client = cls.availability_zone_client
 
     @test.attr(type='gate')
@@ -36,5 +36,9 @@
         self.assertTrue(len(availability_zone) > 0)
 
 
-class AZTestXML(AZTestJSON):
+class AZV2TestJSON(AZV3Test):
+    _api_version = 2
+
+
+class AZV2TestXML(AZV2TestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index f0a8c8d..e135eca 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -32,6 +32,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersTestJSON, cls).setUpClass()
         cls.meta = {'hello': 'world'}
         cls.accessIPv4 = '1.1.1.1'
@@ -53,13 +54,6 @@
         resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
     @test.attr(type='smoke')
-    def test_create_server_response(self):
-        # Check that the required fields are returned with values
-        self.assertEqual(202, self.resp.status)
-        self.assertTrue(self.server_initial['id'] is not None)
-        self.assertTrue(self.server_initial['adminPass'] is not None)
-
-    @test.attr(type='smoke')
     def test_verify_server_details(self):
         # Verify the specified server attributes are set correctly
         self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
@@ -114,26 +108,10 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersWithSpecificFlavorTestJSON, cls).setUpClass()
-        cls.meta = {'hello': 'world'}
-        cls.accessIPv4 = '1.1.1.1'
-        cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
-        cls.name = data_utils.rand_name('server')
-        file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
-                       'contents': base64.b64encode(file_contents)}]
-        cls.client = cls.servers_client
         cls.flavor_client = cls.os_adm.flavors_client
-        cli_resp = cls.create_test_server(name=cls.name,
-                                          meta=cls.meta,
-                                          accessIPv4=cls.accessIPv4,
-                                          accessIPv6=cls.accessIPv6,
-                                          personality=personality,
-                                          disk_config=cls.disk_config)
-        cls.resp, cls.server_initial = cli_resp
-        cls.password = cls.server_initial['adminPass']
-        cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
-        resp, cls.server = cls.client.get_server(cls.server_initial['id'])
+        cls.client = cls.servers_client
 
     @testtools.skipUnless(CONF.compute.run_ssh,
                           'Instance validation tests are disabled.')
@@ -141,7 +119,7 @@
     def test_verify_created_server_ephemeral_disk(self):
         # Verify that the ephemeral disk is created when creating server
 
-        def create_flavor_with_extra_specs(self):
+        def create_flavor_with_extra_specs():
             flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
             flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
             ram = 64
@@ -154,12 +132,12 @@
                                           ram, vcpus, disk,
                                           flavor_with_eph_disk_id,
                                           ephemeral=1))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
+            self.addCleanup(flavor_clean_up, flavor['id'])
             self.assertEqual(200, resp.status)
 
             return flavor['id']
 
-        def create_flavor_without_extra_specs(self):
+        def create_flavor_without_extra_specs():
             flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
             flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
 
@@ -172,18 +150,18 @@
                             create_flavor(flavor_no_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_no_eph_disk_id))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
+            self.addCleanup(flavor_clean_up, flavor['id'])
             self.assertEqual(200, resp.status)
 
             return flavor['id']
 
-        def flavor_clean_up(self, flavor_id):
+        def flavor_clean_up(flavor_id):
             resp, body = self.flavor_client.delete_flavor(flavor_id)
             self.assertEqual(resp.status, 202)
             self.flavor_client.wait_for_resource_deletion(flavor_id)
 
-        flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
-        flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+        flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+        flavor_no_eph_disk_id = create_flavor_without_extra_specs()
 
         admin_pass = self.image_ssh_password
 
@@ -196,13 +174,18 @@
                                       adminPass=admin_pass,
                                       flavor=flavor_with_eph_disk_id))
         # Get partition number of server without extra specs.
+        _, server_no_eph_disk = self.client.get_server(
+            server_no_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_no_eph_disk,
-                                                  self.ssh_user, self.password)
-        partition_num = len(linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num = len(linux_client.get_partitions().split('\n'))
 
+        _, server_with_eph_disk = self.client.get_server(
+            server_with_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_with_eph_disk,
-                                                  self.ssh_user, self.password)
-        self.assertEqual(partition_num + 1, linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num_emph = len(linux_client.get_partitions().split('\n'))
+        self.assertEqual(partition_num + 1, partition_num_emph)
 
 
 class ServersTestManualDisk(ServersTestJSON):
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index cc2d1ee..80e6008 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -42,8 +42,15 @@
             # Rebuild server if something happened to it during a test
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+        _, server = self.client.get_server(self.server_id)
+        self.assertEqual(self.image_ref, server['image']['id'])
+        self.server_check_teardown()
+        super(ServerActionsTestJSON, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServerActionsTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
         cls.server_id = cls.rebuild_server(None)
@@ -125,7 +132,6 @@
                                                    metadata=meta,
                                                    personality=personality,
                                                    adminPass=password)
-        self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -145,6 +151,8 @@
             linux_client = remote_client.RemoteClient(server, self.ssh_user,
                                                       password)
             linux_client.validate_authentication()
+        if self.image_ref_alt != self.image_ref:
+            self.client.rebuild(self.server_id, self.image_ref)
 
     @test.attr(type='gate')
     def test_rebuild_server_in_stop_state(self):
@@ -157,11 +165,7 @@
         resp, server = self.client.stop(self.server_id)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
-        self.addCleanup(self.client.start, self.server_id)
         resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
-        self.addCleanup(self.client.wait_for_server_status, self.server_id,
-                        'SHUTOFF')
-        self.addCleanup(self.client.rebuild, self.server_id, old_image)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -175,6 +179,12 @@
         rebuilt_image_id = server['image']['id']
         self.assertEqual(new_image, rebuilt_image_id)
 
+        # Restore to the original image (The tearDown will test it again)
+        if self.image_ref_alt != self.image_ref:
+            self.client.rebuild(self.server_id, old_image)
+            self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+        self.client.start(self.server_id)
+
     def _detect_server_image_flavor(self, server_id):
         # Detects the current server image flavor ref.
         resp, server = self.client.get_server(server_id)
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index ddfc1d5..b7e4e38 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -60,25 +60,6 @@
         resp, server = self.create_test_server(personality=person)
         self.assertEqual('202', resp['status'])
 
-    @test.attr(type='gate')
-    def test_create_server_with_existent_personality_file(self):
-        # Any existing file that match specified file will be renamed to
-        # include the bak extension appended with a time stamp
-
-        # TODO(zhikunliu): will add validations when ssh instance validation
-        # re-factor is ready
-        file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
-                       'contents': base64.b64encode(file_contents)}]
-        resp, server = self.create_test_server(personality=personality,
-                                               wait_until="ACTIVE")
-        resp, image = self.create_image_from_server(server['id'],
-                                                    wait_until="ACTIVE")
-        resp, server = self.create_test_server(image_id=image['id'],
-                                               personality=personality,
-                                               wait_until="ACTIVE")
-        self.assertEqual('202', resp['status'])
-
 
 class ServerPersonalityTestXML(ServerPersonalityTestJSON):
     _interface = "xml"
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 65797e9..dae4709 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -12,12 +12,16 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import testtools
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class ServerRescueNegativeTestJSON(base.BaseV2ComputeTest):
 
@@ -67,6 +71,8 @@
         self.assertEqual(202, resp.status)
         self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_rescue_paused_instance(self):
         # Rescue a paused server
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index cc801b5..5ac667e 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -37,6 +37,10 @@
         except Exception:
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+        self.server_check_teardown()
+        super(ServersNegativeTestJSON, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
         super(ServersNegativeTestJSON, cls).setUpClass()
@@ -133,12 +137,11 @@
     def test_pause_paused_server(self):
         # Pause a paused server.
         self.client.pause_server(self.server_id)
-        self.addCleanup(self.client.unpause_server,
-                        self.server_id)
         self.client.wait_for_server_status(self.server_id, 'PAUSED')
         self.assertRaises(exceptions.Conflict,
                           self.client.pause_server,
                           self.server_id)
+        self.client.unpause_server(self.server_id)
 
     @test.attr(type=['negative', 'gate'])
     def test_rebuild_reboot_deleted_server(self):
@@ -350,13 +353,12 @@
     def test_suspend_server_invalid_state(self):
         # suspend a suspended server.
         resp, _ = self.client.suspend_server(self.server_id)
-        self.addCleanup(self.client.resume_server,
-                        self.server_id)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
         self.assertRaises(exceptions.Conflict,
                           self.client.suspend_server,
                           self.server_id)
+        self.client.resume_server(self.server_id)
 
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
                           'Suspend is not available.')
@@ -426,7 +428,6 @@
         # shelve a shelved server.
         resp, server = self.client.shelve_server(self.server_id)
         self.assertEqual(202, resp.status)
-        self.addCleanup(self.client.unshelve_server, self.server_id)
 
         offload_time = CONF.compute.shelved_offload_time
         if offload_time >= 0:
@@ -448,6 +449,8 @@
                           self.client.shelve_server,
                           self.server_id)
 
+        self.client.unshelve_server(self.server_id)
+
     @test.attr(type=['negative', 'gate'])
     def test_unshelve_non_existent_server(self):
         # unshelve a non existent server
diff --git a/tempest/api/compute/v2/__init__.py b/tempest/api/compute/v2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/api/compute/v2/__init__.py
diff --git a/tempest/api/compute/v3/images/test_images_oneserver.py b/tempest/api/compute/v3/images/test_images_oneserver.py
index 3aab1e1..795437b 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver.py
@@ -41,6 +41,11 @@
             # Usually it means the server had a serious accident
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+        """Terminate test instances created after a test is executed."""
+        self.server_check_teardown()
+        super(ImagesOneServerV3Test, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
         super(ImagesOneServerV3Test, cls).setUpClass()
diff --git a/tempest/api/compute/v3/images/test_images_oneserver_negative.py b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
index 7679eee..eed81c6 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
         for image_id in self.image_ids:
             self.client.delete_image(image_id)
             self.image_ids.remove(image_id)
+        self.server_check_teardown()
         super(ImagesOneServerNegativeV3Test, self).tearDown()
 
     def setUp(self):
diff --git a/tempest/api/compute/v3/servers/test_attach_volume.py b/tempest/api/compute/v3/servers/test_attach_volume.py
index 28d8517..e994c7f 100644
--- a/tempest/api/compute/v3/servers/test_attach_volume.py
+++ b/tempest/api/compute/v3/servers/test_attach_volume.py
@@ -33,6 +33,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(AttachVolumeV3Test, cls).setUpClass()
         cls.device = CONF.compute.volume_device_name
         if not CONF.service_available.cinder:
diff --git a/tempest/api/compute/v3/servers/test_availability_zone.py b/tempest/api/compute/v3/servers/test_availability_zone.py
deleted file mode 100644
index 5a1e07e..0000000
--- a/tempest/api/compute/v3/servers/test_availability_zone.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2014 NEC Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api.compute import base
-from tempest import test
-
-
-class AZV3Test(base.BaseV3ComputeTest):
-
-    """
-    Tests Availability Zone API List
-    """
-
-    @classmethod
-    def setUpClass(cls):
-        super(AZV3Test, cls).setUpClass()
-        cls.client = cls.availability_zone_client
-
-    @test.attr(type='gate')
-    def test_get_availability_zone_list_with_non_admin_user(self):
-        # List of availability zone with non-administrator user
-        resp, availability_zone = self.client.get_availability_zone_list()
-        self.assertEqual(200, resp.status)
-        self.assertTrue(len(availability_zone) > 0)
diff --git a/tempest/api/compute/v3/servers/test_create_server.py b/tempest/api/compute/v3/servers/test_create_server.py
index 80c40a2..c59fe91 100644
--- a/tempest/api/compute/v3/servers/test_create_server.py
+++ b/tempest/api/compute/v3/servers/test_create_server.py
@@ -32,6 +32,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersV3Test, cls).setUpClass()
         cls.meta = {'hello': 'world'}
         cls.accessIPv4 = '1.1.1.1'
@@ -53,13 +54,6 @@
         resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
     @test.attr(type='smoke')
-    def test_create_server_response(self):
-        # Check that the required fields are returned with values
-        self.assertEqual(202, self.resp.status)
-        self.assertTrue(self.server_initial['id'] is not None)
-        self.assertTrue(self.server_initial['admin_password'] is not None)
-
-    @test.attr(type='smoke')
     def test_verify_server_details(self):
         # Verify the specified server attributes are set correctly
         self.assertEqual(self.accessIPv4,
@@ -115,26 +109,10 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServersWithSpecificFlavorV3Test, cls).setUpClass()
-        cls.meta = {'hello': 'world'}
-        cls.accessIPv4 = '1.1.1.1'
-        cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
-        cls.name = data_utils.rand_name('server')
-        file_contents = 'This is a test file.'
-        personality = [{'path': '/test.txt',
-                       'contents': base64.b64encode(file_contents)}]
         cls.client = cls.servers_client
         cls.flavor_client = cls.flavors_admin_client
-        cli_resp = cls.create_test_server(name=cls.name,
-                                          meta=cls.meta,
-                                          access_ip_v4=cls.accessIPv4,
-                                          access_ip_v6=cls.accessIPv6,
-                                          personality=personality,
-                                          disk_config=cls.disk_config)
-        cls.resp, cls.server_initial = cli_resp
-        cls.password = cls.server_initial['admin_password']
-        cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
-        resp, cls.server = cls.client.get_server(cls.server_initial['id'])
 
     @testtools.skipUnless(CONF.compute.run_ssh,
                           'Instance validation tests are disabled.')
@@ -142,7 +120,7 @@
     def test_verify_created_server_ephemeral_disk(self):
         # Verify that the ephemeral disk is created when creating server
 
-        def create_flavor_with_extra_specs(self):
+        def create_flavor_with_extra_specs():
             flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
             flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
             ram = 512
@@ -154,13 +132,13 @@
                             create_flavor(flavor_with_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_with_eph_disk_id,
-                                          ephemeral=1, swap=1024, rxtx=1))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
-            self.assertEqual(200, resp.status)
+                                          ephemeral=1, rxtx=1))
+            self.addCleanup(flavor_clean_up, flavor['id'])
+            self.assertEqual(201, resp.status)
 
             return flavor['id']
 
-        def create_flavor_without_extra_specs(self):
+        def create_flavor_without_extra_specs():
             flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
             flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
 
@@ -173,18 +151,18 @@
                             create_flavor(flavor_no_eph_disk_name,
                                           ram, vcpus, disk,
                                           flavor_no_eph_disk_id))
-            self.addCleanup(self.flavor_clean_up, flavor['id'])
-            self.assertEqual(200, resp.status)
+            self.addCleanup(flavor_clean_up, flavor['id'])
+            self.assertEqual(201, resp.status)
 
             return flavor['id']
 
-        def flavor_clean_up(self, flavor_id):
+        def flavor_clean_up(flavor_id):
             resp, body = self.flavor_client.delete_flavor(flavor_id)
-            self.assertEqual(resp.status, 202)
+            self.assertEqual(resp.status, 204)
             self.flavor_client.wait_for_resource_deletion(flavor_id)
 
-        flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
-        flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+        flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+        flavor_no_eph_disk_id = create_flavor_without_extra_specs()
 
         admin_pass = self.image_ssh_password
 
@@ -197,13 +175,17 @@
                                       adminPass=admin_pass,
                                       flavor=flavor_with_eph_disk_id))
         # Get partition number of server without extra specs.
+        _, server_no_eph_disk = self.client.get_server(
+            server_no_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_no_eph_disk,
-                                                  self.ssh_user, self.password)
-        partition_num = len(linux_client.get_partitions())
-
+                                                  self.ssh_user, admin_pass)
+        partition_num = len(linux_client.get_partitions().split('\n'))
+        _, server_with_eph_disk = self.client.get_server(
+            server_with_eph_disk['id'])
         linux_client = remote_client.RemoteClient(server_with_eph_disk,
-                                                  self.ssh_user, self.password)
-        self.assertEqual(partition_num + 1, linux_client.get_partitions())
+                                                  self.ssh_user, admin_pass)
+        partition_num_emph = len(linux_client.get_partitions().split('\n'))
+        self.assertEqual(partition_num + 1, partition_num_emph)
 
 
 class ServersV3TestManualDisk(ServersV3Test):
diff --git a/tempest/api/compute/v3/servers/test_delete_server.py b/tempest/api/compute/v3/servers/test_delete_server.py
index 8f85557..add69ab 100644
--- a/tempest/api/compute/v3/servers/test_delete_server.py
+++ b/tempest/api/compute/v3/servers/test_delete_server.py
@@ -56,6 +56,8 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type='gate')
     def test_delete_server_while_in_pause_state(self):
         # Delete a server while it's VM state is Pause
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index c377c30..5b35e2a 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -39,8 +39,15 @@
             # Rebuild server if something happened to it during a test
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+        _, server = self.client.get_server(self.server_id)
+        self.assertEqual(self.image_ref, server['image']['id'])
+        self.server_check_teardown()
+        super(ServerActionsV3Test, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(ServerActionsV3Test, cls).setUpClass()
         cls.client = cls.servers_client
         cls.server_id = cls.rebuild_server(None)
@@ -116,7 +123,6 @@
                                                    name=new_name,
                                                    metadata=meta,
                                                    admin_password=password)
-        self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -137,6 +143,9 @@
                                                       password)
             linux_client.validate_authentication()
 
+        if self.image_ref_alt != self.image_ref:
+            self.client.rebuild(self.server_id, self.image_ref)
+
     @test.attr(type='gate')
     def test_rebuild_server_in_stop_state(self):
         # The server in stop state  should be rebuilt using the provided
@@ -148,11 +157,7 @@
         resp, server = self.client.stop(self.server_id)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
-        self.addCleanup(self.client.start, self.server_id)
         resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
-        self.addCleanup(self.client.wait_for_server_status, self.server_id,
-                        'SHUTOFF')
-        self.addCleanup(self.client.rebuild, self.server_id, old_image)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -166,6 +171,12 @@
         rebuilt_image_id = server['image']['id']
         self.assertEqual(new_image, rebuilt_image_id)
 
+        # Restore to the original image (The tearDown will test it again)
+        if self.image_ref_alt != self.image_ref:
+            self.client.rebuild(self.server_id, old_image)
+            self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+        self.client.start(self.server_id)
+
     def _detect_server_image_flavor(self, server_id):
         # Detects the current server image flavor ref.
         resp, server = self.client.get_server(server_id)
@@ -329,6 +340,8 @@
 
         self.wait_for(self._get_output)
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type='gate')
     def test_pause_unpause_server(self):
         resp, server = self.client.pause_server(self.server_id)
diff --git a/tempest/api/compute/v3/servers/test_server_rescue_negative.py b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
index 08fb127..eb6bcdd 100644
--- a/tempest/api/compute/v3/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
@@ -12,12 +12,16 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import testtools
 
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class ServerRescueNegativeV3Test(base.BaseV3ComputeTest):
 
@@ -66,6 +70,8 @@
         self.assertEqual(202, resp.status)
         self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_rescue_paused_instance(self):
         # Rescue a paused server
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index 586a52a..827c4c4 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -37,6 +37,10 @@
         except Exception:
             self.__class__.server_id = self.rebuild_server(self.server_id)
 
+    def tearDown(self):
+            self.server_check_teardown()
+            super(ServersNegativeV3Test, self).tearDown()
+
     @classmethod
     def setUpClass(cls):
         super(ServersNegativeV3Test, cls).setUpClass()
@@ -115,16 +119,17 @@
         self.assertRaises(exceptions.NotFound, self.client.reboot,
                           nonexistent_server, 'SOFT')
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+                          'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_pause_paused_server(self):
         # Pause a paused server.
         self.client.pause_server(self.server_id)
-        self.addCleanup(self.client.unpause_server,
-                        self.server_id)
         self.client.wait_for_server_status(self.server_id, 'PAUSED')
         self.assertRaises(exceptions.Conflict,
                           self.client.pause_server,
                           self.server_id)
+        self.client.unpause_server(self.server_id)
 
     @test.attr(type=['negative', 'gate'])
     def test_rebuild_reboot_deleted_server(self):
@@ -328,13 +333,12 @@
     def test_suspend_server_invalid_state(self):
         # suspend a suspended server.
         resp, _ = self.client.suspend_server(self.server_id)
-        self.addCleanup(self.client.resume_server,
-                        self.server_id)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
         self.assertRaises(exceptions.Conflict,
                           self.client.suspend_server,
                           self.server_id)
+        self.client.resume_server(self.server_id)
 
     @test.attr(type=['negative', 'gate'])
     def test_resume_non_existent_server(self):
@@ -402,7 +406,6 @@
         # shelve a shelved server.
         resp, server = self.client.shelve_server(self.server_id)
         self.assertEqual(202, resp.status)
-        self.addCleanup(self.client.unshelve_server, self.server_id)
 
         offload_time = CONF.compute.shelved_offload_time
         if offload_time >= 0:
@@ -423,6 +426,8 @@
                           self.client.shelve_server,
                           self.server_id)
 
+        self.client.unshelve_server(self.server_id)
+
     @test.attr(type=['negative', 'gate'])
     def test_unshelve_non_existent_server(self):
         # unshelve a non existent server
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index ab9d144..4585912 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -33,6 +33,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.prepare_instance_network()
         super(AttachVolumeTestJSON, cls).setUpClass()
         cls.device = CONF.compute.volume_device_name
         if not CONF.service_available.cinder:
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 84d5be6..fc313f2 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 from tempest import config
+from tempest import exceptions
 import tempest.test
 
 
@@ -27,46 +28,35 @@
     def setUpClass(cls):
         super(BaseDataProcessingTest, cls).setUpClass()
         if not CONF.service_available.sahara:
-            raise cls.skipException("Sahara support is required")
+            raise cls.skipException('Sahara support is required')
 
         os = cls.get_client_manager()
         cls.client = os.data_processing_client
 
-        # set some constants
         cls.flavor_ref = CONF.compute.flavor_ref
-        cls.simple_node_group_template = {
-            'plugin_name': 'vanilla',
-            'hadoop_version': '1.2.1',
-            'node_processes': [
-                "datanode",
-                "tasktracker"
-            ],
-            'flavor_id': cls.flavor_ref,
-            'node_configs': {
-                'HDFS': {
-                    'Data Node Heap Size': 1024
-                },
-                'MapReduce': {
-                    'Task Tracker Heap Size': 1024
-                }
-            }
-        }
 
         # add lists for watched resources
         cls._node_group_templates = []
+        cls._cluster_templates = []
 
     @classmethod
     def tearDownClass(cls):
-        # cleanup node group templates
-        for ngt_id in getattr(cls, '_node_group_templates', []):
-            try:
-                cls.client.delete_node_group_template(ngt_id)
-            except Exception:
-                # ignore errors while auto removing created resource
-                pass
+        cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
+                              cls.client.delete_cluster_template)
+        cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
+                              cls.client.delete_node_group_template)
         cls.clear_isolated_creds()
         super(BaseDataProcessingTest, cls).tearDownClass()
 
+    @staticmethod
+    def cleanup_resources(resource_id_list, method):
+        for resource_id in resource_id_list:
+            try:
+                method(resource_id)
+            except exceptions.NotFound:
+                # ignore errors while auto removing created resource
+                pass
+
     @classmethod
     def create_node_group_template(cls, name, plugin_name, hadoop_version,
                                    node_processes, flavor_id,
@@ -77,16 +67,32 @@
         object. All resources created in this method will be automatically
         removed in tearDownClass method.
         """
-
         resp, body = cls.client.create_node_group_template(name, plugin_name,
                                                            hadoop_version,
                                                            node_processes,
                                                            flavor_id,
                                                            node_configs,
                                                            **kwargs)
-
         # store id of created node group template
-        template_id = body['id']
-        cls._node_group_templates.append(template_id)
+        cls._node_group_templates.append(body['id'])
 
-        return resp, body, template_id
+        return resp, body
+
+    @classmethod
+    def create_cluster_template(cls, name, plugin_name, hadoop_version,
+                                node_groups, cluster_configs=None, **kwargs):
+        """Creates watched cluster template with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object. All resources created in this method will be automatically
+        removed in tearDownClass method.
+        """
+        resp, body = cls.client.create_cluster_template(name, plugin_name,
+                                                        hadoop_version,
+                                                        node_groups,
+                                                        cluster_configs,
+                                                        **kwargs)
+        # store id of created cluster template
+        cls._cluster_templates.append(body['id'])
+
+        return resp, body
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index a64c345..ed4cf1f 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -19,65 +19,87 @@
 
 
 class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
-    def _create_simple_node_group_template(self, template_name=None):
-        """Creates simple Node Group Template with optional name specified.
+    @classmethod
+    def setUpClass(cls):
+        super(NodeGroupTemplateTest, cls).setUpClass()
+        cls.node_group_template = {
+            'description': 'Test node group template',
+            'plugin_name': 'vanilla',
+            'hadoop_version': '1.2.1',
+            'node_processes': [
+                'datanode',
+                'tasktracker'
+            ],
+            'flavor_id': cls.flavor_ref,
+            'node_configs': {
+                'HDFS': {
+                    'Data Node Heap Size': 1024
+                },
+                'MapReduce': {
+                    'Task Tracker Heap Size': 1024
+                }
+            }
+        }
+
+    def _create_node_group_template(self, template_name=None):
+        """Creates Node Group Template with optional name specified.
 
         It creates template and ensures response status and template name.
         Returns id and name of created template.
         """
-
         if template_name is None:
             # generate random name if it's not specified
-            template_name = data_utils.rand_name('sahara')
+            template_name = data_utils.rand_name('sahara-ng-template')
 
-        # create simple node group template
-        resp, body, template_id = self.create_node_group_template(
-            template_name, **self.simple_node_group_template)
+        # create node group template
+        resp, body = self.create_node_group_template(
+            template_name, **self.node_group_template)
 
         # ensure that template created successfully
         self.assertEqual(202, resp.status)
         self.assertEqual(template_name, body['name'])
 
-        return template_id, template_name
+        return body['id'], template_name
 
     @attr(type='smoke')
     def test_node_group_template_create(self):
-        # just create and ensure template
-        self._create_simple_node_group_template()
+        template_name = data_utils.rand_name('sahara-ng-template')
+        resp, body = self.create_node_group_template(
+            template_name, **self.node_group_template)
+
+        # check that template created successfully
+        self.assertEqual(resp.status, 202)
+        self.assertEqual(template_name, body['name'])
+        self.assertDictContainsSubset(self.node_group_template, body)
 
     @attr(type='smoke')
     def test_node_group_template_list(self):
-        template_info = self._create_simple_node_group_template()
+        template_info = self._create_node_group_template()
 
         # check for node group template in list
         resp, templates = self.client.list_node_group_templates()
 
         self.assertEqual(200, resp.status)
-        templates_info = list([(template['id'], template['name'])
-                               for template in templates])
+        templates_info = [(template['id'], template['name'])
+                          for template in templates]
         self.assertIn(template_info, templates_info)
 
     @attr(type='smoke')
     def test_node_group_template_get(self):
-        template_id, template_name = self._create_simple_node_group_template()
+        template_id, template_name = self._create_node_group_template()
 
         # check node group template fetch by id
         resp, template = self.client.get_node_group_template(template_id)
 
         self.assertEqual(200, resp.status)
         self.assertEqual(template_name, template['name'])
-        self.assertEqual(self.simple_node_group_template['plugin_name'],
-                         template['plugin_name'])
-        self.assertEqual(self.simple_node_group_template['node_processes'],
-                         template['node_processes'])
-        self.assertEqual(self.simple_node_group_template['flavor_id'],
-                         template['flavor_id'])
+        self.assertDictContainsSubset(self.node_group_template, template)
 
     @attr(type='smoke')
     def test_node_group_template_delete(self):
-        template_id, template_name = self._create_simple_node_group_template()
+        template_id = self._create_node_group_template()[0]
 
         # delete the node group template by id
-        resp = self.client.delete_node_group_template(template_id)
+        resp = self.client.delete_node_group_template(template_id)[0]
 
-        self.assertEqual('204', resp[0]['status'])
+        self.assertEqual(204, resp.status)
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 8466c7b..b90891b 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -290,6 +290,7 @@
         return image_id
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_server_id(self):
         # The images should contain images filtered by server id
         resp, images = self.client.image_list_detail(
@@ -299,6 +300,7 @@
         self.assertEqual(self.snapshot_set, result_set)
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_type(self):
         # The list of servers should be filtered by image type
         params = {'image_type': 'snapshot'}
@@ -309,6 +311,7 @@
         self.assertIn(self.snapshot, result_set)
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_limit(self):
         # Verify only the expected number of results are returned
         resp, images = self.client.image_list_detail(limit=1)
@@ -317,6 +320,7 @@
         self.assertEqual(1, len(images))
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_by_change_since(self):
         # Verify an update image is returned
         # Becoming ACTIVE will modify the updated time
diff --git a/tempest/api/network/admin/test_load_balancer_admin_actions.py b/tempest/api/network/admin/test_load_balancer_admin_actions.py
index 6bcc118..bc7f1d6 100644
--- a/tempest/api/network/admin/test_load_balancer_admin_actions.py
+++ b/tempest/api/network/admin/test_load_balancer_admin_actions.py
@@ -43,6 +43,8 @@
             tenant_name)['id']
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
+        cls.pool = cls.create_pool(data_utils.rand_name('pool-'),
+                                   "ROUND_ROBIN", "HTTP", cls.subnet)
 
     @test.attr(type='smoke')
     def test_create_vip_as_admin_for_another_tenant(self):
@@ -102,6 +104,17 @@
         self.assertIsNotNone(pool['id'])
         self.assertEqual(self.tenant_id, pool['tenant_id'])
 
+    @test.attr(type='smoke')
+    def test_create_member_from_admin_user_other_tenant(self):
+        resp, body = self.admin_client.create_member(
+            address="10.0.9.47", protocol_port=80, pool_id=self.pool['id'],
+            tenant_id=self.tenant_id)
+        self.assertEqual('201', resp['status'])
+        member = body['member']
+        self.addCleanup(self.admin_client.delete_member, member['id'])
+        self.assertIsNotNone(member['id'])
+        self.assertEqual(self.tenant_id, member['tenant_id'])
+
 
 class LoadBalancerAdminTestXML(LoadBalancerAdminTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 696a1c3..425d3f2 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -174,14 +174,22 @@
         return subnet
 
     @classmethod
-    def create_port(cls, network):
+    def create_port(cls, network, **kwargs):
         """Wrapper utility that returns a test port."""
-        resp, body = cls.client.create_port(network_id=network['id'])
+        resp, body = cls.client.create_port(network_id=network['id'],
+                                            **kwargs)
         port = body['port']
         cls.ports.append(port)
         return port
 
     @classmethod
+    def update_port(cls, port, **kwargs):
+        """Wrapper utility that updates a test port."""
+        resp, body = cls.client.update_port(port['id'],
+                                            **kwargs)
+        return body['port']
+
+    @classmethod
     def create_router(cls, router_name=None, admin_state_up=False,
                       external_network_id=None, enable_snat=None):
         ext_gw_info = {}
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 0647069..555cbda 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -14,9 +14,12 @@
 
 from tempest.api.network import base
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+CONF = config.CONF
+
 
 class FWaaSExtensionTestJSON(base.BaseNetworkTest):
     _interface = 'json'
@@ -67,6 +70,20 @@
         except exceptions.NotFound:
             pass
 
+        self.client.wait_for_resource_deletion('firewall', fw_id)
+
+    def _wait_for_active(self, fw_id):
+        def _wait():
+            resp, firewall = self.client.show_firewall(fw_id)
+            self.assertEqual('200', resp['status'])
+            firewall = firewall['firewall']
+            return firewall['status'] == 'ACTIVE'
+
+        if not test.call_until_true(_wait, CONF.network.build_timeout,
+                                    CONF.network.build_interval):
+            m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
+            raise exceptions.TimeoutException(m)
+
     @test.attr(type='smoke')
     def test_list_firewall_rules(self):
         # List firewall rules
@@ -168,6 +185,15 @@
 
     @test.attr(type='smoke')
     def test_create_show_delete_firewall(self):
+        # Create tenant network resources required for an ACTIVE firewall
+        network = self.create_network()
+        subnet = self.create_subnet(network)
+        router = self.create_router(
+            data_utils.rand_name('router-'),
+            admin_state_up=True)
+        self.client.add_router_interface_with_subnet_id(
+            router['id'], subnet['id'])
+
         # Create firewall
         resp, body = self.client.create_firewall(
             name=data_utils.rand_name("firewall"),
@@ -177,11 +203,16 @@
         firewall_id = created_firewall['id']
         self.addCleanup(self._try_delete_firewall, firewall_id)
 
+        self._wait_for_active(firewall_id)
+
         # show a created firewall
         resp, firewall = self.client.show_firewall(firewall_id)
         self.assertEqual('200', resp['status'])
         firewall = firewall['firewall']
+
         for key, value in firewall.iteritems():
+            if key == 'status':
+                continue
             self.assertEqual(created_firewall[key], value)
 
         # list firewall
@@ -198,9 +229,6 @@
         # Delete firewall
         resp, _ = self.client.delete_firewall(firewall_id)
         self.assertEqual('204', resp['status'])
-        # Confirm deletion
-        # TODO(raies): Confirm deletion can be done only when,
-        # deleted firewall status is not "PENDING_DELETE".
 
 
 class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
index 3ab015e..673fc47 100644
--- a/tempest/api/network/test_load_balancer.py
+++ b/tempest/api/network/test_load_balancer.py
@@ -434,6 +434,40 @@
             self.assertEqual(member_id, body['member']['id'])
             self.assertIsNotNone(body['member']['admin_state_up'])
 
+    @test.attr(type='smoke')
+    def test_update_pool_related_to_member(self):
+        # Create new pool
+        resp, body = self.client.create_pool(
+            name=data_utils.rand_name("pool-"),
+            lb_method='ROUND_ROBIN',
+            protocol='HTTP',
+            subnet_id=self.subnet['id'])
+        self.assertEqual('201', resp['status'])
+        new_pool = body['pool']
+        self.addCleanup(self.client.delete_pool, new_pool['id'])
+        # Update member with new pool's id
+        resp, body = self.client.update_member(self.member['id'],
+                                               pool_id=new_pool['id'])
+        self.assertEqual('200', resp['status'])
+        # Confirm with show that pool_id change
+        resp, body = self.client.show_member(self.member['id'])
+        member = body['member']
+        self.assertEqual(member['pool_id'], new_pool['id'])
+        # Update member with old pool id, this is needed for clean up
+        resp, body = self.client.update_member(self.member['id'],
+                                               pool_id=self.pool['id'])
+        self.assertEqual('200', resp['status'])
+
+    @test.attr(type='smoke')
+    def test_update_member_weight(self):
+        resp, _ = self.client.update_member(self.member['id'],
+                                            weight=2)
+        self.assertEqual('200', resp['status'])
+        resp, body = self.client.show_member(self.member['id'])
+        self.assertEqual('200', resp['status'])
+        member = body['member']
+        self.assertEqual(2, member['weight'])
+
 
 class LoadBalancerTestXML(LoadBalancerTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 0175de7..00754e4 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -37,15 +37,9 @@
         create a subnet for a tenant
         list tenant's subnets
         show a tenant subnet details
-        port create
-        port delete
-        port list
-        port show
-        port update
         network update
         subnet update
         delete a network also deletes its subnets
-        create a port with no IP address associated with it
 
         All subnet tests are run once with ipv4 and once with ipv6.
 
@@ -115,13 +109,13 @@
     @test.attr(type='smoke')
     def test_show_network_fields(self):
         # Verify specific fields of a network
-        field_list = [('fields', 'id'), ('fields', 'name'), ]
+        fields = ['id', 'name']
         resp, body = self.client.show_network(self.network['id'],
-                                              field_list=field_list)
+                                              fields=fields)
         self.assertEqual('200', resp['status'])
         network = body['network']
-        self.assertEqual(len(network), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(network.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(network[field_name], self.network[field_name])
 
     @test.attr(type='smoke')
@@ -136,13 +130,13 @@
     @test.attr(type='smoke')
     def test_list_networks_fields(self):
         # Verify specific fields of the networks
-        resp, body = self.client.list_networks(fields='id')
+        fields = ['id', 'name']
+        resp, body = self.client.list_networks(fields=fields)
         self.assertEqual('200', resp['status'])
         networks = body['networks']
         self.assertNotEmpty(networks, "Network list returned is empty")
         for network in networks:
-            self.assertEqual(len(network), 1)
-            self.assertIn('id', network)
+            self.assertEqual(sorted(network.keys()), sorted(fields))
 
     @test.attr(type='smoke')
     def test_show_subnet(self):
@@ -158,13 +152,13 @@
     @test.attr(type='smoke')
     def test_show_subnet_fields(self):
         # Verify specific fields of a subnet
-        field_list = [('fields', 'id'), ('fields', 'cidr'), ]
+        fields = ['id', 'network_id']
         resp, body = self.client.show_subnet(self.subnet['id'],
-                                             field_list=field_list)
+                                             fields=fields)
         self.assertEqual('200', resp['status'])
         subnet = body['subnet']
-        self.assertEqual(len(subnet), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(subnet.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(subnet[field_name], self.subnet[field_name])
 
     @test.attr(type='smoke')
@@ -179,13 +173,13 @@
     @test.attr(type='smoke')
     def test_list_subnets_fields(self):
         # Verify specific fields of subnets
-        resp, body = self.client.list_subnets(fields='id')
+        fields = ['id', 'network_id']
+        resp, body = self.client.list_subnets(fields=fields)
         self.assertEqual('200', resp['status'])
         subnets = body['subnets']
         self.assertNotEmpty(subnets, "Subnet list returned is empty")
         for subnet in subnets:
-            self.assertEqual(len(subnet), 1)
-            self.assertIn('id', subnet)
+            self.assertEqual(sorted(subnet.keys()), sorted(fields))
 
     def _try_delete_network(self, net_id):
         # delete network, if it exists
@@ -222,32 +216,6 @@
         # it from the list.
         self.subnets.pop()
 
-    @test.attr(type='smoke')
-    def test_create_port_with_no_ip(self):
-        # For this test create a new network - do not use any previously
-        # created networks.
-        name = data_utils.rand_name('network-nosubnet-')
-        resp, body = self.client.create_network(name=name)
-        self.assertEqual('201', resp['status'])
-        network = body['network']
-        net_id = network['id']
-        self.networks.append(network)
-
-        # Now create a port for this network - without creating any
-        # subnets for this network - this ensures no IP for the port
-        resp, body = self.client.create_port(network_id=net_id)
-        self.assertEqual('201', resp['status'])
-        port = body['port']
-        port_id = port['id']
-        self.addCleanup(self.client.delete_port, port_id)
-
-        # Verify that the port does not have any IP address
-        resp, body = self.client.show_port(port_id)
-        self.assertEqual('200', resp['status'])
-        port_resp = body['port']
-        self.assertEqual(port_id, port_resp['id'])
-        self.assertEqual(port_resp['fixed_ips'], [])
-
 
 class NetworksTestXML(NetworksTestJSON):
     _interface = 'xml'
@@ -396,11 +364,10 @@
 
     @classmethod
     def setUpClass(cls):
-        super(NetworksIpV6TestJSON, cls).setUpClass()
         if not CONF.network_feature_enabled.ipv6:
-            cls.tearDownClass()
             skip_msg = "IPv6 Tests are disabled."
             raise cls.skipException(skip_msg)
+        super(NetworksIpV6TestJSON, cls).setUpClass()
 
 
 class NetworksIpV6TestXML(NetworksIpV6TestJSON):
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index bda5b2b..e6e6ea1 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -26,6 +26,16 @@
 class PortsTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
+    """
+    Test the following operations for ports:
+
+        port create
+        port delete
+        port list
+        port show
+        port update
+    """
+
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
@@ -47,6 +57,8 @@
         resp, body = self.client.create_port(network_id=self.network['id'])
         self.assertEqual('201', resp['status'])
         port = body['port']
+        # Schedule port deletion with verification upon test completion
+        self.addCleanup(self._delete_port, port['id'])
         self.assertTrue(port['admin_state_up'])
         # Verify port update
         new_name = "New_Port"
@@ -58,9 +70,6 @@
         updated_port = body['port']
         self.assertEqual(updated_port['name'], new_name)
         self.assertFalse(updated_port['admin_state_up'])
-        # Verify port deletion
-        resp, body = self.client.delete_port(port['id'])
-        self.assertEqual('204', resp['status'])
 
     @test.attr(type='smoke')
     def test_show_port(self):
@@ -80,17 +89,18 @@
         self.assertEqual(self.port['network_id'], port['network_id'])
         self.assertEqual(self.port['security_groups'],
                          port['security_groups'])
+        self.assertEqual(port['fixed_ips'], [])
 
     @test.attr(type='smoke')
     def test_show_port_fields(self):
         # Verify specific fields of a port
-        field_list = [('fields', 'id'), ]
+        fields = ['id', 'mac_address']
         resp, body = self.client.show_port(self.port['id'],
-                                           field_list=field_list)
+                                           fields=fields)
         self.assertEqual('200', resp['status'])
         port = body['port']
-        self.assertEqual(len(port), len(field_list))
-        for label, field_name in field_list:
+        self.assertEqual(sorted(port.keys()), sorted(fields))
+        for field_name in fields:
             self.assertEqual(port[field_name], self.port[field_name])
 
     @test.attr(type='smoke')
@@ -126,14 +136,37 @@
     @test.attr(type='smoke')
     def test_list_ports_fields(self):
         # Verify specific fields of ports
-        resp, body = self.client.list_ports(fields='id')
+        fields = ['id', 'mac_address']
+        resp, body = self.client.list_ports(fields=fields)
         self.assertEqual('200', resp['status'])
         ports = body['ports']
         self.assertNotEmpty(ports, "Port list returned is empty")
         # Asserting the fields returned are correct
         for port in ports:
-            self.assertEqual(len(port), 1)
-            self.assertIn('id', port)
+            self.assertEqual(sorted(fields), sorted(port.keys()))
+
+    @test.attr(type='smoke')
+    def test_update_port_with_second_ip(self):
+        # Create a network with two subnets
+        network = self.create_network()
+        subnet_1 = self.create_subnet(network)
+        subnet_2 = self.create_subnet(network)
+        fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
+        fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
+
+        # Create a port with a single IP address from first subnet
+        port = self.create_port(network,
+                                fixed_ips=fixed_ip_1)
+        self.assertEqual(1, len(port['fixed_ips']))
+
+        # Update the port with a second IP address from second subnet
+        fixed_ips = fixed_ip_1 + fixed_ip_2
+        port = self.update_port(port, fixed_ips=fixed_ips)
+        self.assertEqual(2, len(port['fixed_ips']))
+
+        # Update the port to return to a single IP address
+        port = self.update_port(port, fixed_ips=fixed_ip_1)
+        self.assertEqual(1, len(port['fixed_ips']))
 
 
 class PortsTestXML(PortsTestJSON):
@@ -255,11 +288,10 @@
 
     @classmethod
     def setUpClass(cls):
-        super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
         if not CONF.network_feature_enabled.ipv6:
-            cls.tearDownClass()
             skip_msg = "IPv6 Tests are disabled."
             raise cls.skipException(skip_msg)
+        super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
 
 
 class PortsAdminExtendedAttrsIpV6TestXML(
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index d919245..c1f468b 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -27,6 +27,7 @@
 class AccountQuotasTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountQuotasTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -101,7 +102,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(AccountQuotasTest, cls).tearDownClass()
 
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 29afebc..4677f97 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -27,6 +27,7 @@
 class AccountQuotasNegativeTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountQuotasNegativeTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -100,7 +101,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(AccountQuotasNegativeTest, cls).tearDownClass()
 
@@ -122,6 +124,7 @@
                           {"Quota-Bytes": "100"})
 
     @test.attr(type=["negative", "smoke"])
+    @test.skip_because(bug="1310597")
     @test.requires_ext(extension='account_quotas', service='object')
     def test_upload_large_object(self):
         object_name = data_utils.rand_name(name="TestObject")
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 4b895d8..7fb0604 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -29,10 +29,13 @@
 
 
 class AccountTest(base.BaseObjectTest):
+
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(AccountTest, cls).setUpClass()
-        cls.containers = []
         for i in moves.xrange(ord('a'), ord('f') + 1):
             name = data_utils.rand_name(name='%s-' % chr(i))
             cls.container_client.create_container(name)
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 6c71340..581c6d9 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -23,6 +23,7 @@
 class StaticWebTest(base.BaseObjectTest):
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(StaticWebTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -45,7 +46,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        cls.delete_containers([cls.container_name])
+        if hasattr(cls, "container_name"):
+            cls.delete_containers([cls.container_name])
         cls.data.teardown_all()
         super(StaticWebTest, cls).tearDownClass()
 
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 9bd986f..6bda83b 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -31,8 +31,10 @@
 
 
 class ContainerSyncTest(base.BaseObjectTest):
+    clients = {}
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ContainerSyncTest, cls).setUpClass()
         cls.containers = []
@@ -50,7 +52,6 @@
             int(container_sync_timeout / cls.container_sync_interval)
 
         # define container and object clients
-        cls.clients = {}
         cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
             (cls.container_client, cls.object_client)
         cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index 81db252..dc5585e 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -26,7 +26,11 @@
 
 class ObjectFormPostTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectFormPostTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index fe0c994..878bf6d 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -26,7 +26,11 @@
 
 class ObjectFormPostNegativeTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectFormPostNegativeTest, cls).setUpClass()
         cls.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index b057698..06e63a4 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -14,8 +14,10 @@
 #    under the License.
 
 import hashlib
+import random
 import re
 from six import moves
+import time
 
 from tempest.api.object_storage import base
 from tempest.common import custom_matchers
@@ -308,10 +310,7 @@
         # retrieve object's data (in response body)
 
         # create object
-        object_name = data_utils.rand_name(name='TestObject')
-        data = data_utils.arbitrary_string()
-        resp, _ = self.object_client.create_object(self.container_name,
-                                                   object_name, data)
+        object_name, data = self._create_object()
         # get object
         resp, body = self.object_client.get_object(self.container_name,
                                                    object_name)
@@ -321,6 +320,183 @@
         self.assertEqual(body, data)
 
     @test.attr(type='smoke')
+    def test_get_object_with_metadata(self):
+        # get object with metadata
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        metadata = {'X-Object-Meta-test-meta': 'Meta'}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=metadata)
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=None)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertIn('x-object-meta-test-meta', resp)
+        self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_range(self):
+        # get object with range
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(100)
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=None)
+        rand_num = random.randint(3, len(data) - 1)
+        metadata = {'Range': 'bytes=%s-%s' % (rand_num - 3, rand_num - 1)}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data[rand_num - 3: rand_num])
+
+    @test.attr(type='smoke')
+    def test_get_object_with_x_object_manifest(self):
+        # get object with x_object_manifest
+
+        # uploading segments
+        object_name, data_segments = self._upload_segments()
+        # creating a manifest file
+        object_prefix = '%s/%s' % (self.container_name, object_name)
+        metadata = {'X-Object-Manifest': object_prefix}
+        data_empty = ''
+        resp, body = self.object_client.create_object(
+            self.container_name,
+            object_name,
+            data_empty,
+            metadata=metadata)
+
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=None)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+
+        # Check only the existence of common headers with custom matcher
+        self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
+                        'Object', 'GET'))
+        self.assertIn('x-object-manifest', resp)
+
+        # Etag value of a large object is enclosed in double-quotations.
+        # This is a special case, therefore the formats of response headers
+        # are checked without a custom matcher.
+        self.assertTrue(resp['etag'].startswith('\"'))
+        self.assertTrue(resp['etag'].endswith('\"'))
+        self.assertTrue(resp['etag'].strip('\"').isalnum())
+        self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
+        self.assertNotEqual(len(resp['content-type']), 0)
+        self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+                                 resp['x-trans-id']))
+        self.assertNotEqual(len(resp['date']), 0)
+        self.assertEqual(resp['accept-ranges'], 'bytes')
+        self.assertEqual(resp['x-object-manifest'],
+                         '%s/%s' % (self.container_name, object_name))
+
+        self.assertEqual(''.join(data_segments), body)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_match(self):
+        # get object with if_match
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(10)
+        create_md5 = hashlib.md5(data).hexdigest()
+        create_metadata = {'Etag': create_md5}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        list_metadata = {'If-Match': create_md5}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_modified_since(self):
+        # get object with if_modified_since
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string()
+        time_now = time.time()
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=None)
+
+        http_date = time.ctime(time_now - 86400)
+        list_metadata = {'If-Modified-Since': http_date}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    def test_get_object_with_if_none_match(self):
+        # get object with if_none_match
+        object_name = data_utils.rand_name(name='TestObject')
+        data = data_utils.arbitrary_string(10)
+        create_md5 = hashlib.md5(data).hexdigest()
+        create_metadata = {'Etag': create_md5}
+        self.object_client.create_object(self.container_name,
+                                         object_name,
+                                         data,
+                                         metadata=create_metadata)
+
+        list_data = data_utils.arbitrary_string(15)
+        list_md5 = hashlib.md5(list_data).hexdigest()
+        list_metadata = {'If-None-Match': list_md5}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_if_unmodified_since(self):
+        # get object with if_unmodified_since
+        object_name, data = self._create_object()
+
+        time_now = time.time()
+        http_date = time.ctime(time_now + 86400)
+        list_metadata = {'If-Unmodified-Since': http_date}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
+    def test_get_object_with_x_newest(self):
+        # get object with x_newest
+        object_name, data = self._create_object()
+
+        list_metadata = {'X-Newest': 'true'}
+        resp, body = self.object_client.get_object(
+            self.container_name,
+            object_name,
+            metadata=list_metadata)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, data)
+
+    @test.attr(type='smoke')
     def test_copy_object_in_same_container(self):
         # create source object
         src_object_name = data_utils.rand_name(name='SrcObject')
@@ -498,10 +674,7 @@
         # Make a conditional request for an object using the If-None-Match
         # header, it should get downloaded only if the local file is different,
         # otherwise the response code should be 304 Not Modified
-        object_name = data_utils.rand_name(name='TestObject')
-        data = data_utils.arbitrary_string()
-        self.object_client.create_object(self.container_name,
-                                         object_name, data)
+        object_name, data = self._create_object()
         # local copy is identical, no download
         md5 = hashlib.md5(data).hexdigest()
         headers = {'If-None-Match': md5}
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index cf24f66..7d26433 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -27,7 +27,11 @@
 
 class ObjectTempUrlNegativeTest(base.BaseObjectTest):
 
+    metadata = {}
+    containers = []
+
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(ObjectTempUrlNegativeTest, cls).setUpClass()
 
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 7656ff3..4e40de9 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -30,7 +30,7 @@
     @classmethod
     def setUpClass(cls):
         super(BaseOrchestrationTest, cls).setUpClass()
-        cls.os = clients.OrchestrationManager()
+        cls.os = clients.Manager()
         if not CONF.service_available.heat:
             raise cls.skipException("Heat support is required")
         cls.build_timeout = CONF.orchestration.build_timeout
@@ -41,6 +41,7 @@
         cls.servers_client = cls.os.servers_client
         cls.keypairs_client = cls.os.keypairs_client
         cls.network_client = cls.os.network_client
+        cls.volumes_client = cls.os.volumes_client
         cls.stacks = []
         cls.keypairs = []
 
@@ -101,9 +102,8 @@
 
     @classmethod
     def load_template(cls, name, ext='yaml'):
-        loc = ["tempest", "api", "orchestration",
-               "stacks", "templates", "%s.%s" % (name, ext)]
-        fullpath = os.path.join(*loc)
+        loc = ["stacks", "templates", "%s.%s" % (name, ext)]
+        fullpath = os.path.join(os.path.dirname(__file__), *loc)
 
         with open(fullpath, "r") as f:
             content = f.read()
@@ -137,3 +137,8 @@
 
         return dict((r['resource_name'], r['resource_type'])
                     for r in resources)
+
+    def get_stack_output(self, stack_identifier, output_key):
+        resp, body = self.client.get_stack(stack_identifier)
+        self.assertEqual('200', resp['status'])
+        return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
new file mode 100644
index 0000000..3e03a30
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2013-05-23
+
+resources:
+    volume:
+        type: OS::Cinder::Volume
+        properties:
+            size: 1
+            description: a descriptive description
+
+outputs:
+  status:
+    description: status
+    value: { get_attr: ['volume', 'status'] }
+
+  size:
+    description: size
+    value: { get_attr: ['volume', 'size'] }
+
+  display_description:
+    description: display_description
+    value: { get_attr: ['volume', 'display_description'] }
+
+  volume_id:
+    value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
new file mode 100644
index 0000000..08e3da4
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
@@ -0,0 +1,25 @@
+heat_template_version: 2013-05-23
+
+resources:
+    volume:
+        deletion_policy: 'Retain'
+        type: OS::Cinder::Volume
+        properties:
+            size: 1
+            description: a descriptive description
+
+outputs:
+  status:
+    description: status
+    value: { get_attr: ['volume', 'status'] }
+
+  size:
+    description: size
+    value: { get_attr: ['volume', 'size'] }
+
+  display_description:
+    description: display_description
+    value: { get_attr: ['volume', 'display_description'] }
+
+  volume_id:
+    value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
index 275d040..63b03f4 100644
--- a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
+++ b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
@@ -36,7 +36,6 @@
       admin_state_up: false
       external_gateway_info:
         network: {get_param: ExternalNetworkId}
-        enable_snat: false
   RouterInterface:
     type: OS::Neutron::RouterInterface
     properties:
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index b96f6ce..3086d78 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -137,8 +137,6 @@
         self.assertEqual('NewRouter', router['name'])
         self.assertEqual(self.external_network_id,
                          router['external_gateway_info']['network_id'])
-        self.assertEqual(False,
-                         router['external_gateway_info']['enable_snat'])
         self.assertEqual(False, router['admin_state_up'])
 
     @test.attr(type='slow')
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
index cb5d941..4b845b1 100644
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -110,14 +110,6 @@
         # wait for create to complete.
         self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
 
-        # fetch the stack
-        resp, body = self.client.get_stack(sid)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
-        # fetch the stack
-        resp, body = self.client.get_stack(sid)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
         # This is an assert of great significance, as it means the following
         # has happened:
         # - cfn-init read the provided metadata and wrote out a file
@@ -125,5 +117,5 @@
         # - a cfn-signal was built which was signed with provided credentials
         # - the wait condition was fulfilled and the stack has changed state
         wait_status = json.loads(
-            self.stack_output(body, 'WaitConditionStatus'))
+            self.get_stack_output(sid, 'WaitConditionStatus'))
         self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
new file mode 100644
index 0000000..2544c41
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -0,0 +1,101 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CinderResourcesTest(base.BaseOrchestrationTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(CinderResourcesTest, cls).setUpClass()
+        if not CONF.service_available.cinder:
+            raise cls.skipException('Cinder support is required')
+
+    def _cinder_verify(self, volume_id):
+        self.assertIsNotNone(volume_id)
+        resp, volume = self.volumes_client.get_volume(volume_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual('available', volume.get('status'))
+        self.assertEqual(1, volume.get('size'))
+        self.assertEqual('a descriptive description',
+                         volume.get('display_description'))
+
+    def _outputs_verify(self, stack_identifier):
+        self.assertEqual('available',
+                         self.get_stack_output(stack_identifier, 'status'))
+        self.assertEqual('1',
+                         self.get_stack_output(stack_identifier, 'size'))
+        self.assertEqual('a descriptive description',
+                         self.get_stack_output(stack_identifier,
+                                               'display_description'))
+
+    @test.attr(type='gate')
+    def test_cinder_volume_create_delete(self):
+        """Create and delete a volume via OS::Cinder::Volume."""
+        stack_name = data_utils.rand_name('heat')
+        template = self.load_template('cinder_basic')
+        stack_identifier = self.create_stack(stack_name, template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        # Verify with cinder that the volume exists, with matching details
+        volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+        self._cinder_verify(volume_id)
+
+        # Verify the stack outputs are as expected
+        self._outputs_verify(stack_identifier)
+
+        # Delete the stack and ensure the volume is gone
+        self.client.delete_stack(stack_identifier)
+        self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+        self.assertRaises(exceptions.NotFound,
+                          self.volumes_client.get_volume,
+                          volume_id)
+
+    def _cleanup_volume(self, volume_id):
+        """Cleanup the volume direct with cinder."""
+        resp = self.volumes_client.delete_volume(volume_id)
+        self.assertEqual(202, resp[0].status)
+        self.volumes_client.wait_for_resource_deletion(volume_id)
+
+    @test.attr(type='gate')
+    def test_cinder_volume_create_delete_retain(self):
+        """Ensure the 'Retain' deletion policy is respected."""
+        stack_name = data_utils.rand_name('heat')
+        template = self.load_template('cinder_basic_delete_retain')
+        stack_identifier = self.create_stack(stack_name, template)
+        self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+        # Verify with cinder that the volume exists, with matching details
+        volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+        self.addCleanup(self._cleanup_volume, volume_id)
+        self._cinder_verify(volume_id)
+
+        # Verify the stack outputs are as expected
+        self._outputs_verify(stack_identifier)
+
+        # Delete the stack and ensure the volume is *not* gone
+        self.client.delete_stack(stack_identifier)
+        self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+        self._cinder_verify(volume_id)
+
+        # Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
new file mode 100644
index 0000000..012c231
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -0,0 +1,74 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.volume import base
+from tempest import test
+
+
+class VolumesServicesTestJSON(base.BaseVolumeV1AdminTest):
+    """
+    Tests Volume Services API.
+    volume service list requires admin privileges.
+    """
+    _interface = "json"
+
+    @classmethod
+    def setUpClass(cls):
+        super(VolumesServicesTestJSON, cls).setUpClass()
+        cls.client = cls.os_adm.volume_services_client
+        resp, cls.services = cls.client.list_services()
+        cls.host_name = cls.services[0]['host']
+        cls.binary_name = cls.services[0]['binary']
+
+    @test.attr(type='gate')
+    def test_list_services(self):
+        resp, services = self.client.list_services()
+        self.assertEqual(200, resp.status)
+        self.assertNotEqual(0, len(services))
+
+    @test.attr(type='gate')
+    def test_get_service_by_service_binary_name(self):
+        params = {'binary': self.binary_name}
+        resp, services = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertNotEqual(0, len(services))
+        for service in services:
+            self.assertEqual(self.binary_name, service['binary'])
+
+    @test.attr(type='gate')
+    def test_get_service_by_host_name(self):
+        services_on_host = [service for service in self.services if
+                            service['host'] == self.host_name]
+        params = {'host': self.host_name}
+
+        resp, services = self.client.list_services(params)
+
+        # we could have a periodic job checkin between the 2 service
+        # lookups, so only compare binary lists.
+        s1 = map(lambda x: x['binary'], services)
+        s2 = map(lambda x: x['binary'], services_on_host)
+        # sort the lists before comparing, to take out dependency
+        # on order.
+        self.assertEqual(sorted(s1), sorted(s2))
+
+    @test.attr(type='gate')
+    def test_get_service_by_service_and_host_name(self):
+        params = {'host': self.host_name, 'binary': self.binary_name}
+
+        resp, services = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(1, len(services))
+        self.assertEqual(self.host_name, services[0]['host'])
+        self.assertEqual(self.binary_name, services[0]['binary'])
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 4496f18..008f739 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -85,24 +85,6 @@
             self.volume['id'])
         self.assertEqual('error', volume_get['status'])
 
-    @test.attr(type='gate')
-    def test_volume_begin_detaching(self):
-        # test volume begin detaching : available -> detaching -> available
-        resp, body = self.client.volume_begin_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp_get, volume_get = self.client.get_volume(self.volume['id'])
-        self.assertEqual('detaching', volume_get['status'])
-
-    @test.attr(type='gate')
-    def test_volume_roll_detaching(self):
-        # test volume roll detaching : detaching -> in-use -> available
-        resp, body = self.client.volume_begin_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp, body = self.client.volume_roll_detaching(self.volume['id'])
-        self.assertEqual(202, resp.status)
-        resp_get, volume_get = self.client.get_volume(self.volume['id'])
-        self.assertEqual('in-use', volume_get['status'])
-
     def test_volume_force_delete_when_volume_is_creating(self):
         # test force delete when status of volume is creating
         self._create_reset_and_force_delete_temp_volume('creating')
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 2c6050c..ff616fc 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -107,6 +107,7 @@
         cls.snapshots_client = cls.os.snapshots_client
         cls.volumes_client = cls.os.volumes_client
         cls.backups_client = cls.os.backups_client
+        cls.volume_services_client = cls.os.volume_services_client
         cls.volumes_extension_client = cls.os.volumes_extension_client
 
     @classmethod
diff --git a/tempest/api_schema/compute/agents.py b/tempest/api_schema/compute/agents.py
new file mode 100644
index 0000000..b04cf64
--- /dev/null
+++ b/tempest/api_schema/compute/agents.py
@@ -0,0 +1,40 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+list_agents = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'agents': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'agent_id': {'type': ['integer', 'string']},
+                        'hypervisor': {'type': 'string'},
+                        'os': {'type': 'string'},
+                        'architecture': {'type': 'string'},
+                        'version': {'type': 'string'},
+                        'url': {'type': 'string', 'format': 'uri'},
+                        'md5hash': {'type': 'string'}
+                    },
+                    'required': ['agent_id', 'hypervisor', 'os',
+                                 'architecture', 'version', 'url', 'md5hash']
+                }
+            }
+        },
+        'required': ['agents']
+    }
+}
diff --git a/tempest/api_schema/compute/aggregates.py b/tempest/api_schema/compute/aggregates.py
index a70b356..a3ab3c8 100644
--- a/tempest/api_schema/compute/aggregates.py
+++ b/tempest/api_schema/compute/aggregates.py
@@ -12,9 +12,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
 aggregate = {
     'type': 'object',
-    'properties:': {
+    'properties': {
         'availability_zone': {'type': ['string', 'null']},
         'created_at': {'type': 'string'},
         'deleted': {'type': 'boolean'},
@@ -56,3 +58,9 @@
 }
 
 aggregate_set_metadata = get_aggregate
+# The 'updated_at' attribute of 'update_aggregate' can't be null.
+update_aggregate = copy.deepcopy(get_aggregate)
+update_aggregate['response_body']['properties']['aggregate']['properties'][
+    'updated_at'] = {
+        'type': 'string'
+    }
diff --git a/tempest/api_schema/compute/availability_zone.py b/tempest/api_schema/compute/availability_zone.py
new file mode 100644
index 0000000..c1abc64
--- /dev/null
+++ b/tempest/api_schema/compute/availability_zone.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# NOTE: This is the detail information for "get az detail" API.
+# The information is the same between v2 and v3 APIs.
+detail = {
+    'type': 'object',
+    'patternProperties': {
+        # NOTE: Here is for a hostname
+        '^[a-zA-Z0-9-_.]+$': {
+            'type': 'object',
+            'patternProperties': {
+                # NOTE: Here is for a service name
+                '^.*$': {
+                    'type': 'object',
+                    'properties': {
+                        'available': {'type': 'boolean'},
+                        'active': {'type': 'boolean'},
+                        'updated_at': {'type': 'string'}
+                    },
+                    'required': ['available', 'active', 'updated_at']
+                }
+            }
+        }
+    }
+}
diff --git a/tempest/api_schema/compute/certificates.py b/tempest/api_schema/compute/certificates.py
new file mode 100644
index 0000000..caac2ab
--- /dev/null
+++ b/tempest/api_schema/compute/certificates.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+_common_schema = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'certificate': {
+                'type': 'object',
+                'properties': {
+                    'data': {'type': 'string'},
+                    'private_key': {'type': 'string'},
+                },
+                'required': ['data', 'private_key'],
+            }
+        },
+        'required': ['certificate'],
+    }
+}
+
+get_certificate = copy.deepcopy(_common_schema)
+get_certificate['response_body']['properties']['certificate'][
+    'properties']['private_key'].update({'type': 'null'})
diff --git a/tempest/api_schema/compute/flavors.py b/tempest/api_schema/compute/flavors.py
index fd02780..aa019e4 100644
--- a/tempest/api_schema/compute/flavors.py
+++ b/tempest/api_schema/compute/flavors.py
@@ -36,6 +36,21 @@
     }
 }
 
+common_flavor_info = {
+    'type': 'object',
+    'properties': {
+        'name': {'type': 'string'},
+        'links': parameter_types.links,
+        'ram': {'type': 'integer'},
+        'vcpus': {'type': 'integer'},
+        'swap': {'type': 'integer'},
+        'disk': {'type': 'integer'},
+        'id': {'type': 'string'}
+    },
+    'required': ['name', 'links', 'ram', 'vcpus',
+                 'swap', 'disk', 'id']
+}
+
 common_flavor_list_details = {
     'status_code': [200],
     'response_body': {
@@ -43,22 +58,20 @@
         'properties': {
             'flavors': {
                 'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'name': {'type': 'string'},
-                        'links': parameter_types.links,
-                        'ram': {'type': 'integer'},
-                        'vcpus': {'type': 'integer'},
-                        'swap': {'type': 'integer'},
-                        'disk': {'type': 'integer'},
-                        'id': {'type': 'string'}
-                    },
-                    'required': ['name', 'links', 'ram', 'vcpus',
-                                 'swap', 'disk', 'id']
-                }
+                'items': common_flavor_info
             }
         },
         'required': ['flavors']
     }
 }
+
+common_flavor_details = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'flavor': common_flavor_info
+        },
+        'required': ['flavor']
+    }
+}
diff --git a/tempest/api_schema/compute/flavors_extra_specs.py b/tempest/api_schema/compute/flavors_extra_specs.py
new file mode 100644
index 0000000..4003d36
--- /dev/null
+++ b/tempest/api_schema/compute/flavors_extra_specs.py
@@ -0,0 +1,39 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+flavor_extra_specs = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'extra_specs': {
+                'type': 'object',
+                'patternProperties': {
+                    '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+                }
+            }
+        },
+        'required': ['extra_specs']
+    }
+}
+
+flavor_extra_specs_key = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'patternProperties': {
+            '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+        }
+    }
+}
diff --git a/tempest/api_schema/compute/hosts.py b/tempest/api_schema/compute/hosts.py
index a73e214..a9d6567 100644
--- a/tempest/api_schema/compute/hosts.py
+++ b/tempest/api_schema/compute/hosts.py
@@ -12,6 +12,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+common_start_up_body = {
+    'type': 'object',
+    'properties': {
+        'host': {'type': 'string'},
+        'power_action': {'enum': ['startup']}
+    },
+    'required': ['host', 'power_action']
+}
+
 list_hosts = {
     'status_code': [200],
     'response_body': {
diff --git a/tempest/api_schema/compute/interfaces.py b/tempest/api_schema/compute/interfaces.py
new file mode 100644
index 0000000..1e15c18
--- /dev/null
+++ b/tempest/api_schema/compute/interfaces.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_interface = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/migrations.py b/tempest/api_schema/compute/migrations.py
new file mode 100644
index 0000000..6723869
--- /dev/null
+++ b/tempest/api_schema/compute/migrations.py
@@ -0,0 +1,56 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+list_migrations = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'migrations': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        # NOTE: Now the type of 'id' is integer, but here
+                        # allows 'string' also because we will be able to
+                        # change it to 'uuid' in the future.
+                        'id': {'type': ['integer', 'string']},
+                        'status': {'type': 'string'},
+                        'instance_uuid': {'type': 'string'},
+                        'source_node': {'type': 'string'},
+                        'source_compute': {'type': 'string'},
+                        'dest_node': {'type': 'string'},
+                        'dest_compute': {'type': 'string'},
+                        'dest_host': {'type': 'string'},
+                        'old_instance_type_id': {
+                            'type': ['integer', 'string']
+                        },
+                        'new_instance_type_id': {
+                            'type': ['integer', 'string']
+                        },
+                        'created_at': {'type': 'string'},
+                        'updated_at': {'type': ['string', 'null']}
+                    },
+                    'required': [
+                        'id', 'status', 'instance_uuid', 'source_node',
+                        'source_compute', 'dest_node', 'dest_compute',
+                        'dest_host', 'old_instance_type_id',
+                        'new_instance_type_id', 'created_at', 'updated_at'
+                    ]
+                }
+            }
+        },
+        'required': ['migrations']
+    }
+}
diff --git a/tempest/api_schema/compute/parameter_types.py b/tempest/api_schema/compute/parameter_types.py
index 95d5b92..4a1dfdd 100644
--- a/tempest/api_schema/compute/parameter_types.py
+++ b/tempest/api_schema/compute/parameter_types.py
@@ -31,3 +31,37 @@
     'type': 'string',
     'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
 }
+
+access_ip_v4 = {
+    'type': 'string',
+    'anyOf': [{'format': 'ipv4'}, {'enum': ['']}]
+}
+
+access_ip_v6 = {
+    'type': 'string',
+    'anyOf': [{'format': 'ipv6'}, {'enum': ['']}]
+}
+
+addresses = {
+    'type': 'object',
+    'patternProperties': {
+        # NOTE: Here is for 'private' or something.
+        '^[a-zA-Z0-9-_.]+$': {
+            'type': 'array',
+            'items': {
+                'type': 'object',
+                'properties': {
+                    'version': {'type': 'integer'},
+                    'addr': {
+                        'type': 'string',
+                        'anyOf': [
+                            {'format': 'ipv4'},
+                            {'format': 'ipv6'}
+                        ]
+                    }
+                },
+                'required': ['version', 'addr']
+            }
+        }
+    }
+}
diff --git a/tempest/api_schema/compute/quotas.py b/tempest/api_schema/compute/quotas.py
new file mode 100644
index 0000000..f49771e
--- /dev/null
+++ b/tempest/api_schema/compute/quotas.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+common_quota_set = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'quota_set': {
+                'type': 'object',
+                'properties': {
+                    'instances': {'type': 'integer'},
+                    'cores': {'type': 'integer'},
+                    'ram': {'type': 'integer'},
+                    'floating_ips': {'type': 'integer'},
+                    'fixed_ips': {'type': 'integer'},
+                    'metadata_items': {'type': 'integer'},
+                    'key_pairs': {'type': 'integer'},
+                    'security_groups': {'type': 'integer'},
+                    'security_group_rules': {'type': 'integer'}
+                },
+                'required': ['instances', 'cores', 'ram',
+                             'floating_ips', 'fixed_ips',
+                             'metadata_items', 'key_pairs',
+                             'security_groups', 'security_group_rules']
+            }
+        },
+        'required': ['quota_set']
+    }
+}
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index 5eb5ecb..aefb8f5 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -12,6 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
+from tempest.api_schema.compute import parameter_types
+
 get_password = {
     'status_code': [200],
     'response_body': {
@@ -43,3 +47,73 @@
         'required': ['console']
     }
 }
+
+base_update_server = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'server': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': ['integer', 'string']},
+                    'name': {'type': 'string'},
+                    'status': {'type': 'string'},
+                    'image': {
+                        'type': 'object',
+                        'properties': {
+                            'id': {'type': ['integer', 'string']},
+                            'links': parameter_types.links
+                        },
+                        'required': ['id', 'links']
+                    },
+                    'flavor': {
+                        'type': 'object',
+                        'properties': {
+                            'id': {'type': ['integer', 'string']},
+                            'links': parameter_types.links
+                        },
+                        'required': ['id', 'links']
+                    },
+                    'user_id': {'type': 'string'},
+                    'tenant_id': {'type': 'string'},
+                    'created': {'type': 'string'},
+                    'updated': {'type': 'string'},
+                    'progress': {'type': 'integer'},
+                    'metadata': {'type': 'object'},
+                    'links': parameter_types.links,
+                    'addresses': parameter_types.addresses,
+                },
+                'required': ['id', 'name', 'status', 'image', 'flavor',
+                             'user_id', 'tenant_id', 'created', 'updated',
+                             'progress', 'metadata', 'links', 'addresses']
+            }
+        }
+    }
+}
+
+delete_server = {
+    'status_code': [204],
+}
+
+set_server_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': {
+                'type': 'object',
+                'patternProperties': {
+                    '^.+$': {'type': 'string'}
+                }
+            }
+        },
+        'required': ['metadata']
+    }
+}
+
+list_server_metadata = copy.deepcopy(set_server_metadata)
+
+delete_server_metadata_item = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v2/aggregates.py b/tempest/api_schema/compute/v2/aggregates.py
new file mode 100644
index 0000000..de3e12b
--- /dev/null
+++ b/tempest/api_schema/compute/v2/aggregates.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_aggregate = {
+    'status_code': [200]
+}
diff --git a/tempest/api_schema/compute/v2/availability_zone.py b/tempest/api_schema/compute/v2/availability_zone.py
new file mode 100644
index 0000000..d3d2787
--- /dev/null
+++ b/tempest/api_schema/compute/v2/availability_zone.py
@@ -0,0 +1,54 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import availability_zone as common
+
+
+base = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'availabilityZoneInfo': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'zoneName': {'type': 'string'},
+                        'zoneState': {
+                            'type': 'object',
+                            'properties': {
+                                'available': {'type': 'boolean'}
+                            },
+                            'required': ['available']
+                        },
+                        # NOTE: Here is the difference between detail and
+                        # non-detail.
+                        'hosts': {'type': 'null'}
+                    },
+                    'required': ['zoneName', 'zoneState', 'hosts']
+                }
+            }
+        },
+        'required': ['availabilityZoneInfo']
+    }
+}
+
+get_availability_zone_list = copy.deepcopy(base)
+
+get_availability_zone_list_detail = copy.deepcopy(base)
+get_availability_zone_list_detail['response_body']['properties'][
+    'availabilityZoneInfo']['items']['properties']['hosts'] = common.detail
diff --git a/tempest/api_schema/compute/v2/certificates.py b/tempest/api_schema/compute/v2/certificates.py
new file mode 100644
index 0000000..1eb38ce
--- /dev/null
+++ b/tempest/api_schema/compute/v2/certificates.py
@@ -0,0 +1,19 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
diff --git a/tempest/api_schema/compute/v2/flavors.py b/tempest/api_schema/compute/v2/flavors.py
index 999ca19..bee6ecb 100644
--- a/tempest/api_schema/compute/v2/flavors.py
+++ b/tempest/api_schema/compute/v2/flavors.py
@@ -31,3 +31,27 @@
                           'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
 # 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
 # are API extensions. So they are not 'required'.
+
+unset_flavor_extra_specs = {
+    'status_code': [200]
+}
+
+create_get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+create_get_flavor_details['response_body']['properties']['flavor'][
+    'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+create_get_flavor_details['response_body']['properties']['flavor'][
+    'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+                          'os-flavor-access:is_public': {'type': 'boolean'},
+                          'rxtx_factor': {'type': 'number'},
+                          'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+delete_flavor = {
+    'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/hosts.py b/tempest/api_schema/compute/v2/hosts.py
index cd6bd7b..9ec8848 100644
--- a/tempest/api_schema/compute/v2/hosts.py
+++ b/tempest/api_schema/compute/v2/hosts.py
@@ -14,18 +14,12 @@
 
 import copy
 
-body = {
-    'type': 'object',
-    'properties': {
-        'host': {'type': 'string'},
-        'power_action': {'enum': ['startup']}
-    },
-    'required': ['host', 'power_action']
-}
+from tempest.api_schema.compute import hosts
+
 
 startup_host = {
     'status_code': [200],
-    'response_body': body
+    'response_body': hosts.common_start_up_body
 }
 
 # The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
index fad6b56..d121060 100644
--- a/tempest/api_schema/compute/v2/images.py
+++ b/tempest/api_schema/compute/v2/images.py
@@ -14,43 +14,46 @@
 
 from tempest.api_schema.compute import parameter_types
 
+common_image_schema = {
+    'type': 'object',
+    'properties': {
+        'id': {'type': 'string'},
+        'status': {'type': 'string'},
+        'updated': {'type': 'string'},
+        'links': parameter_types.links,
+        'name': {'type': 'string'},
+        'created': {'type': 'string'},
+        'minDisk': {'type': 'integer'},
+        'minRam': {'type': 'integer'},
+        'progress': {'type': 'integer'},
+        'metadata': {'type': 'object'},
+        'server': {
+            'type': 'object',
+            'properties': {
+                # NOTE: Now the type of 'id' is integer, but here
+                # allows 'string' also because we will be able to
+                # change it to 'uuid' in the future.
+                'id': {'type': ['integer', 'string']},
+                'links': parameter_types.links
+            },
+            'required': ['id', 'links']
+        },
+        'OS-EXT-IMG-SIZE:size': {'type': 'integer'}
+    },
+    # 'server' attributes only comes in response body if image is
+    # associated with any server. 'OS-EXT-IMG-SIZE:size' is API
+    # extension, So those are not defined as 'required'.
+    'required': ['id', 'status', 'updated', 'links', 'name',
+                 'created', 'minDisk', 'minRam', 'progress',
+                 'metadata']
+}
+
 get_image = {
     'status_code': [200],
     'response_body': {
         'type': 'object',
         'properties': {
-            'image': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'status': {'type': 'string'},
-                    'updated': {'type': 'string'},
-                    'links': parameter_types.links,
-                    'name': {'type': 'string'},
-                    'created': {'type': 'string'},
-                    'OS-EXT-IMG-SIZE:size': {'type': 'integer'},
-                    'minDisk': {'type': 'integer'},
-                    'minRam': {'type': 'integer'},
-                    'progress': {'type': 'integer'},
-                    'metadata': {'type': 'object'},
-                    'server': {
-                        'type': 'object',
-                        'properties': {
-                            # NOTE: Now the type of 'id' is integer, but here
-                            # allows 'string' also because we will be able to
-                            # change it to 'uuid' in the future.
-                            'id': {'type': ['integer', 'string']},
-                            'links': parameter_types.links
-                        },
-                        'required': ['id', 'links']
-                    }
-                },
-                # 'server' attributes only comes in response body if image is
-                # associated with any server. So it is not 'required'
-                'required': ['id', 'status', 'updated', 'links', 'name',
-                             'created', 'OS-EXT-IMG-SIZE:size', 'minDisk',
-                             'minRam', 'progress', 'metadata']
-            }
+            'image': common_image_schema
         },
         'required': ['image']
     }
@@ -67,20 +70,7 @@
                     'type': 'object',
                     'properties': {
                         'id': {'type': 'string'},
-                        'links': {
-                            'type': 'array',
-                            'items': {
-                                'type': 'object',
-                                'properties': {
-                                    'href': {
-                                        'type': 'string',
-                                        'format': 'uri'
-                                    },
-                                    'rel': {'type': 'string'}
-                                },
-                                'required': ['href', 'rel']
-                            }
-                        },
+                        'links': parameter_types.links,
                         'name': {'type': 'string'}
                     },
                     'required': ['id', 'links', 'name']
@@ -92,7 +82,17 @@
 }
 
 create_image = {
-    'status_code': [202]
+    'status_code': [202],
+    'response_header': {
+        'type': 'object',
+        'properties': {
+            'location': {
+                'type': 'string',
+                'format': 'uri'
+            }
+        },
+        'required': ['location']
+    }
 }
 
 delete = {
@@ -120,3 +120,17 @@
         'required': ['meta']
     }
 }
+
+list_images_details = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'images': {
+                'type': 'array',
+                'items': common_image_schema
+            }
+        },
+        'required': ['images']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/quotas.py b/tempest/api_schema/compute/v2/quotas.py
index 17dc4dd..31c0458 100644
--- a/tempest/api_schema/compute/v2/quotas.py
+++ b/tempest/api_schema/compute/v2/quotas.py
@@ -12,39 +12,36 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-quota_set = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'instances': {'type': 'integer'},
-                    'cores': {'type': 'integer'},
-                    'ram': {'type': 'integer'},
-                    'floating_ips': {'type': 'integer'},
-                    'fixed_ips': {'type': 'integer'},
-                    'metadata_items': {'type': 'integer'},
-                    'injected_files': {'type': 'integer'},
-                    'injected_file_content_bytes': {'type': 'integer'},
-                    'injected_file_path_bytes': {'type': 'integer'},
-                    'key_pairs': {'type': 'integer'},
-                    'security_groups': {'type': 'integer'},
-                    'security_group_rules': {'type': 'integer'}
-                },
-                'required': ['id', 'instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'injected_files',
-                             'injected_file_content_bytes',
-                             'injected_file_path_bytes', 'key_pairs',
-                             'security_groups', 'security_group_rules']
-            }
-        },
-        'required': ['quota_set']
-    }
-}
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_files'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['required'].extend([
+    'id',
+    'injected_files',
+    'injected_file_content_bytes',
+    'injected_file_path_bytes'])
+
+quota_set_update = copy.deepcopy(quotas.common_quota_set)
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_files'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set'][
+    'required'].extend(['injected_files',
+                        'injected_file_content_bytes',
+                        'injected_file_path_bytes'])
 
 delete_quota = {
     'status_code': [202]
diff --git a/tempest/api_schema/compute/v2/security_groups.py b/tempest/api_schema/compute/v2/security_groups.py
index 6dd44cd..8b4bead 100644
--- a/tempest/api_schema/compute/v2/security_groups.py
+++ b/tempest/api_schema/compute/v2/security_groups.py
@@ -12,6 +12,49 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+common_security_group_rule = {
+    'from_port': {'type': ['integer', 'null']},
+    'to_port': {'type': ['integer', 'null']},
+    'group': {
+        'type': 'object',
+        'properties': {
+            'tenant_id': {'type': 'string'},
+            'name': {'type': 'string'}
+        }
+    },
+    'ip_protocol': {'type': ['string', 'null']},
+    # 'parent_group_id' can be UUID so defining it as 'string' also.
+    'parent_group_id': {'type': ['string', 'integer', 'null']},
+    'ip_range': {
+        'type': 'object',
+        'properties': {
+            'cidr': {'type': 'string'}
+        }
+        # When optional argument is provided in request body
+        # like 'group_id' then, attribute 'cidr' does not
+        # comes in response body. So it is not 'required'.
+    },
+    'id': {'type': ['string', 'integer']}
+}
+
+common_security_group = {
+    'type': 'object',
+    'properties': {
+        'id': {'type': ['integer', 'string']},
+        'name': {'type': 'string'},
+        'tenant_id': {'type': 'string'},
+        'rules': {
+            'type': 'array',
+            'items': {
+                'type': ['object', 'null'],
+                'properties': common_security_group_rule
+            }
+        },
+        'description': {'type': 'string'},
+    },
+    'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
+}
+
 list_security_groups = {
     'status_code': [200],
     'response_body': {
@@ -19,24 +62,24 @@
         'properties': {
             'security_groups': {
                 'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'id': {'type': ['integer', 'string']},
-                        'name': {'type': 'string'},
-                        'tenant_id': {'type': 'string'},
-                        'rules': {'type': 'array'},
-                        'description': {'type': 'string'},
-                    },
-                    'required': ['id', 'name', 'tenant_id', 'rules',
-                                 'description'],
-                }
+                'items': common_security_group
             }
         },
         'required': ['security_groups']
     }
 }
 
+get_security_group = create_security_group = update_security_group = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'security_group': common_security_group
+        },
+        'required': ['security_group']
+    }
+}
+
 create_security_group_rule = {
     'status_code': [200],
     'response_body': {
@@ -44,25 +87,7 @@
         'properties': {
             'security_group_rule': {
                 'type': 'object',
-                'properties': {
-                    'from_port': {'type': 'integer'},
-                    'to_port': {'type': 'integer'},
-                    'group': {'type': 'object'},
-                    'ip_protocol': {'type': 'string'},
-                    # 'parent_group_id' can be UUID so defining it
-                    # as 'string' also.
-                    'parent_group_id': {'type': ['integer', 'string']},
-                    'id': {'type': ['integer', 'string']},
-                    'ip_range': {
-                        'type': 'object',
-                        'properties': {
-                            'cidr': {'type': 'string'}
-                        }
-                        # When optional argument is provided in request body
-                        # like 'group_id' then, attribute 'cidr' does not
-                        # comes in response body. So it is not 'required'.
-                    }
-                },
+                'properties': common_security_group_rule,
                 'required': ['from_port', 'to_port', 'group', 'ip_protocol',
                              'parent_group_id', 'id', 'ip_range']
             }
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index 81d2ef7..05d37af 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -12,7 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
 from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
 
 create_server = {
     'status_code': [202],
@@ -43,6 +46,20 @@
     }
 }
 
+update_server = copy.deepcopy(servers.base_update_server)
+update_server['response_body']['properties']['server']['properties'].update({
+    'hostId': {'type': 'string'},
+    'OS-DCF:diskConfig': {'type': 'string'},
+    'accessIPv4': parameter_types.access_ip_v4,
+    'accessIPv6': parameter_types.access_ip_v6
+})
+update_server['response_body']['properties']['server']['required'].append(
+    # NOTE: OS-DCF:diskConfig and accessIPv4/v6 are API
+    # extensions, and some environments return a response
+    # without these attributes. So they are not 'required'.
+    'hostId'
+)
+
 list_virtual_interfaces = {
     'status_code': [200],
     'response_body': {
@@ -66,3 +83,43 @@
         'required': ['virtual_interfaces']
     }
 }
+
+attach_volume = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volumeAttachment': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string'},
+                    'device': {'type': 'string'},
+                    'volumeId': {'type': 'string'},
+                    'serverId': {'type': ['integer', 'string']}
+                },
+                'required': ['id', 'device', 'volumeId', 'serverId']
+            }
+        },
+        'required': ['volumeAttachment']
+    }
+}
+
+detach_volume = {
+    'status_code': [202]
+}
+
+set_get_server_metadata_item = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'meta': {
+                'type': 'object',
+                'patternProperties': {
+                    '^.+$': {'type': 'string'}
+                }
+            }
+        },
+        'required': ['meta']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/tenant_usages.py b/tempest/api_schema/compute/v2/tenant_usages.py
new file mode 100644
index 0000000..0b824a1
--- /dev/null
+++ b/tempest/api_schema/compute/v2/tenant_usages.py
@@ -0,0 +1,92 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+_server_usages = {
+    'type': 'array',
+    'items': {
+        'type': 'object',
+        'properties': {
+            'ended_at': {
+                'oneOf': [
+                    {'type': 'string'},
+                    {'type': 'null'}
+                ]
+            },
+            'flavor': {'type': 'string'},
+            'hours': {'type': 'number'},
+            'instance_id': {'type': 'string'},
+            'local_gb': {'type': 'integer'},
+            'memory_mb': {'type': 'integer'},
+            'name': {'type': 'string'},
+            'started_at': {'type': 'string'},
+            'state': {'type': 'string'},
+            'tenant_id': {'type': 'string'},
+            'uptime': {'type': 'integer'},
+            'vcpus': {'type': 'integer'},
+        },
+        'required': ['ended_at', 'flavor', 'hours', 'instance_id', 'local_gb',
+                     'memory_mb', 'name', 'started_at', 'state', 'tenant_id',
+                     'uptime', 'vcpus']
+    }
+}
+
+_tenant_usage_list = {
+    'type': 'object',
+    'properties': {
+        'server_usages': _server_usages,
+        'start': {'type': 'string'},
+        'stop': {'type': 'string'},
+        'tenant_id': {'type': 'string'},
+        'total_hours': {'type': 'number'},
+        'total_local_gb_usage': {'type': 'number'},
+        'total_memory_mb_usage': {'type': 'number'},
+        'total_vcpus_usage': {'type': 'number'},
+    },
+    'required': ['start', 'stop', 'tenant_id',
+                 'total_hours', 'total_local_gb_usage',
+                 'total_memory_mb_usage', 'total_vcpus_usage']
+}
+
+# 'required' of get_tenant is different from list_tenant's.
+_tenant_usage_get = copy.deepcopy(_tenant_usage_list)
+_tenant_usage_get['required'] = ['server_usages', 'start', 'stop', 'tenant_id',
+                                 'total_hours', 'total_local_gb_usage',
+                                 'total_memory_mb_usage', 'total_vcpus_usage']
+
+list_tenant = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'tenant_usages': {
+                'type': 'array',
+                'items': _tenant_usage_list
+            }
+        },
+        'required': ['tenant_usages']
+    }
+}
+
+get_tenant = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'tenant_usage': _tenant_usage_get
+        },
+        'required': ['tenant_usage']
+    }
+}
diff --git a/tempest/api_schema/compute/v3/aggregates.py b/tempest/api_schema/compute/v3/aggregates.py
new file mode 100644
index 0000000..358e455
--- /dev/null
+++ b/tempest/api_schema/compute/v3/aggregates.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+delete_aggregate = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/availability_zone.py b/tempest/api_schema/compute/v3/availability_zone.py
new file mode 100644
index 0000000..5f36c33
--- /dev/null
+++ b/tempest/api_schema/compute/v3/availability_zone.py
@@ -0,0 +1,53 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import availability_zone as common
+
+
+base = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'availability_zone_info': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'zone_name': {'type': 'string'},
+                        'zone_state': {
+                            'type': 'object',
+                            'properties': {
+                                'available': {'type': 'boolean'}
+                            },
+                            'required': ['available']
+                        },
+                        # NOTE: Here is the difference between detail and
+                        # non-detail
+                        'hosts': {'type': 'null'}
+                    },
+                    'required': ['zone_name', 'zone_state', 'hosts']
+                }
+            }
+        },
+        'required': ['availability_zone_info']
+    }
+}
+
+get_availability_zone_list = copy.deepcopy(base)
+get_availability_zone_list_detail = copy.deepcopy(base)
+get_availability_zone_list_detail['response_body']['properties'][
+    'availability_zone_info']['items']['properties']['hosts'] = common.detail
diff --git a/tempest/api_schema/compute/v3/certificates.py b/tempest/api_schema/compute/v3/certificates.py
new file mode 100644
index 0000000..0723a16
--- /dev/null
+++ b/tempest/api_schema/compute/v3/certificates.py
@@ -0,0 +1,20 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
+create_certificate['status_code'] = [201]
diff --git a/tempest/api_schema/compute/v3/flavors.py b/tempest/api_schema/compute/v3/flavors.py
index 542d2b1..52010f5 100644
--- a/tempest/api_schema/compute/v3/flavors.py
+++ b/tempest/api_schema/compute/v3/flavors.py
@@ -15,6 +15,7 @@
 import copy
 
 from tempest.api_schema.compute import flavors
+from tempest.api_schema.compute import flavors_extra_specs
 
 list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
 
@@ -31,3 +32,37 @@
 # So they are not 'required'.
 list_flavors_details['response_body']['properties']['flavors']['items'][
     'required'].extend(['disabled', 'ephemeral'])
+
+set_flavor_extra_specs = copy.deepcopy(flavors_extra_specs.flavor_extra_specs)
+set_flavor_extra_specs['status_code'] = [201]
+
+unset_flavor_extra_specs = {
+    'status_code': [204]
+}
+
+get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+get_flavor_details['response_body']['properties']['flavor'][
+    'properties'].update({'disabled': {'type': 'boolean'},
+                          'ephemeral': {'type': 'integer'},
+                          'flavor-access:is_public': {'type': 'boolean'},
+                          'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+get_flavor_details['response_body']['properties']['flavor'][
+    'required'].extend(['disabled', 'ephemeral'])
+
+
+create_flavor_details = copy.deepcopy(get_flavor_details)
+
+# Overriding the status code for create flavor V3 API.
+create_flavor_details['status_code'] = [201]
+
+delete_flavor = {
+    'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/hosts.py b/tempest/api_schema/compute/v3/hosts.py
index 2cf8f9b..575a6e2 100644
--- a/tempest/api_schema/compute/v3/hosts.py
+++ b/tempest/api_schema/compute/v3/hosts.py
@@ -13,14 +13,16 @@
 #    under the License.
 
 import copy
-from tempest.api_schema.compute.v2 import hosts
+
+from tempest.api_schema.compute import hosts
+
 
 startup_host = {
     'status_code': [200],
     'response_body': {
         'type': 'object',
         'properties': {
-            'host': hosts.body
+            'host': hosts.common_start_up_body
         },
         'required': ['host']
     }
diff --git a/tempest/api_schema/compute/v3/quotas.py b/tempest/api_schema/compute/v3/quotas.py
index aec1e80..a3212ed 100644
--- a/tempest/api_schema/compute/v3/quotas.py
+++ b/tempest/api_schema/compute/v3/quotas.py
@@ -12,34 +12,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-quota_set = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'instances': {'type': 'integer'},
-                    'cores': {'type': 'integer'},
-                    'ram': {'type': 'integer'},
-                    'floating_ips': {'type': 'integer'},
-                    'fixed_ips': {'type': 'integer'},
-                    'metadata_items': {'type': 'integer'},
-                    'key_pairs': {'type': 'integer'},
-                    'security_groups': {'type': 'integer'},
-                    'security_group_rules': {'type': 'integer'}
-                },
-                'required': ['id', 'instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'key_pairs',
-                             'security_groups', 'security_group_rules']
-            }
-        },
-        'required': ['quota_set']
-    }
-}
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set'][
+    'required'].extend(['id'])
 
 quota_common_info = {
     'type': 'object',
@@ -51,34 +32,27 @@
     'required': ['reserved', 'limit', 'in_use']
 }
 
-quota_set_detail = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'instances': quota_common_info,
-                    'cores': quota_common_info,
-                    'ram': quota_common_info,
-                    'floating_ips': quota_common_info,
-                    'fixed_ips': quota_common_info,
-                    'metadata_items': quota_common_info,
-                    'key_pairs': quota_common_info,
-                    'security_groups': quota_common_info,
-                    'security_group_rules': quota_common_info
-                },
-                'required': ['id', 'instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'key_pairs',
-                             'security_groups', 'security_group_rules']
-            }
-        },
-        'required': ['quota_set']
-    }
-}
+quota_set_detail = copy.deepcopy(quotas.common_quota_set)
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'instances'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'cores'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'ram'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'floating_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'fixed_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'metadata_items'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'key_pairs'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'security_groups'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'security_group_rules'] = quota_common_info
 
 delete_quota = {
     'status_code': [204]
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 390962e..6926dbd 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -12,7 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
 from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
 
 create_server = {
     'status_code': [202],
@@ -29,8 +32,8 @@
                     'os-security-groups:security_groups': {'type': 'array'},
                     'links': parameter_types.links,
                     'admin_password': {'type': 'string'},
-                    'os-access-ips:access_ip_v4': {'type': 'string'},
-                    'os-access-ips:access_ip_v6': {'type': 'string'}
+                    'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+                    'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
                 },
                 # NOTE: os-access-ips:access_ip_v4/v6 are API extension,
                 # and some environments return a response without these
@@ -42,3 +45,34 @@
         'required': ['server']
     }
 }
+
+addresses_v3 = copy.deepcopy(parameter_types.addresses)
+addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
+    'properties'].update({
+        'type': {'type': 'string'},
+        'mac_addr': {'type': 'string'}
+    })
+addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
+    'required'].extend(
+        ['type', 'mac_addr']
+    )
+
+update_server = copy.deepcopy(servers.base_update_server)
+update_server['response_body']['properties']['server']['properties'].update({
+    'addresses': addresses_v3,
+    'host_id': {'type': 'string'},
+    'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+    'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
+})
+update_server['response_body']['properties']['server']['required'].append(
+    # NOTE: os-access-ips:access_ip_v4/v6 are API extension,
+    # and some environments return a response without these
+    # attributes. So they are not 'required'.
+    'host_id'
+)
+
+attach_detach_volume = {
+    'status_code': [202]
+}
+
+set_get_server_metadata_item = copy.deepcopy(servers.set_server_metadata)
diff --git a/tempest/auth.py b/tempest/auth.py
index 5fc923f..9c51edb 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -43,11 +43,11 @@
         :param client_type: 'tempest' or 'official'
         :param interface: 'json' or 'xml'. Applicable for tempest client only
         """
+        credentials = self._convert_credentials(credentials)
         if self.check_credentials(credentials):
             self.credentials = credentials
         else:
             raise TypeError("Invalid credentials")
-        self.credentials = credentials
         self.client_type = client_type
         self.interface = interface
         if self.client_type == 'tempest' and self.interface is None:
@@ -56,6 +56,13 @@
         self.alt_auth_data = None
         self.alt_part = None
 
+    def _convert_credentials(self, credentials):
+        # Support dict credentials for backwards compatibility
+        if isinstance(credentials, dict):
+            return get_credentials(**credentials)
+        else:
+            return credentials
+
     def __str__(self):
         return "Creds :{creds}, client type: {client_type}, interface: " \
                "{interface}, cached auth data: {cache}".format(
@@ -73,29 +80,55 @@
     def _get_auth(self):
         raise NotImplementedError
 
+    def _fill_credentials(self, auth_data_body):
+        raise NotImplementedError
+
+    def fill_credentials(self):
+        """
+        Fill credentials object with data from auth
+        """
+        auth_data = self.get_auth()
+        self._fill_credentials(auth_data[1])
+        return self.credentials
+
     @classmethod
     def check_credentials(cls, credentials):
         """
-        Verify credentials are valid. Subclasses can do a better check.
+        Verify credentials are valid.
         """
-        return isinstance(credentials, dict)
+        return isinstance(credentials, Credentials) and credentials.is_valid()
 
     @property
     def auth_data(self):
-        if self.cache is None or self.is_expired(self.cache):
-            self.cache = self._get_auth()
-        return self.cache
+        return self.get_auth()
 
     @auth_data.deleter
     def auth_data(self):
         self.clear_auth()
 
+    def get_auth(self):
+        """
+        Returns auth from cache if available, else auth first
+        """
+        if self.cache is None or self.is_expired(self.cache):
+            self.set_auth()
+        return self.cache
+
+    def set_auth(self):
+        """
+        Forces setting auth, ignores cache if it exists.
+        Refills credentials
+        """
+        self.cache = self._get_auth()
+        self._fill_credentials(self.cache[1])
+
     def clear_auth(self):
         """
         Can be called to clear the access cache so that next request
         will fetch a new token and base_url.
         """
         self.cache = None
+        self.credentials.reset()
 
     def is_expired(self, auth_data):
         raise NotImplementedError
@@ -218,16 +251,6 @@
 
     EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
 
-    @classmethod
-    def check_credentials(cls, credentials, scoped=True):
-        # tenant_name is optional if not scoped
-        valid = super(KeystoneV2AuthProvider, cls).check_credentials(
-            credentials) and 'username' in credentials and \
-            'password' in credentials
-        if scoped:
-            valid = valid and 'tenant_name' in credentials
-        return valid
-
     def _auth_client(self):
         if self.client_type == 'tempest':
             if self.interface == 'json':
@@ -240,13 +263,25 @@
     def _auth_params(self):
         if self.client_type == 'tempest':
             return dict(
-                user=self.credentials['username'],
-                password=self.credentials['password'],
-                tenant=self.credentials.get('tenant_name', None),
+                user=self.credentials.username,
+                password=self.credentials.password,
+                tenant=self.credentials.tenant_name,
                 auth_data=True)
         else:
             raise NotImplementedError
 
+    def _fill_credentials(self, auth_data_body):
+        tenant = auth_data_body['token']['tenant']
+        user = auth_data_body['user']
+        if self.credentials.tenant_name is None:
+            self.credentials.tenant_name = tenant['name']
+        if self.credentials.tenant_id is None:
+            self.credentials.tenant_id = tenant['id']
+        if self.credentials.username is None:
+            self.credentials.username = user['name']
+        if self.credentials.user_id is None:
+            self.credentials.user_id = user['id']
+
     def base_url(self, filters, auth_data=None):
         """
         Filters can be:
@@ -303,16 +338,6 @@
 
     EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
 
-    @classmethod
-    def check_credentials(cls, credentials, scoped=True):
-        # tenant_name is optional if not scoped
-        valid = super(KeystoneV3AuthProvider, cls).check_credentials(
-            credentials) and 'username' in credentials and \
-            'password' in credentials and 'domain_name' in credentials
-        if scoped:
-            valid = valid and 'tenant_name' in credentials
-        return valid
-
     def _auth_client(self):
         if self.client_type == 'tempest':
             if self.interface == 'json':
@@ -325,14 +350,47 @@
     def _auth_params(self):
         if self.client_type == 'tempest':
             return dict(
-                user=self.credentials['username'],
-                password=self.credentials['password'],
-                tenant=self.credentials.get('tenant_name', None),
-                domain=self.credentials['domain_name'],
+                user=self.credentials.username,
+                password=self.credentials.password,
+                tenant=self.credentials.tenant_name,
+                domain=self.credentials.user_domain_name,
                 auth_data=True)
         else:
             raise NotImplementedError
 
+    def _fill_credentials(self, auth_data_body):
+        # project or domain, depending on the scope
+        project = auth_data_body.get('project', None)
+        domain = auth_data_body.get('domain', None)
+        # user is always there
+        user = auth_data_body['user']
+        # Set project fields
+        if project is not None:
+            if self.credentials.project_name is None:
+                self.credentials.project_name = project['name']
+            if self.credentials.project_id is None:
+                self.credentials.project_id = project['id']
+            if self.credentials.project_domain_id is None:
+                self.credentials.project_domain_id = project['domain']['id']
+            if self.credentials.project_domain_name is None:
+                self.credentials.project_domain_name = \
+                    project['domain']['name']
+        # Set domain fields
+        if domain is not None:
+            if self.credentials.domain_id is None:
+                self.credentials.domain_id = domain['id']
+            if self.credentials.domain_name is None:
+                self.credentials.domain_name = domain['name']
+        # Set user fields
+        if self.credentials.username is None:
+            self.credentials.username = user['name']
+        if self.credentials.user_id is None:
+            self.credentials.user_id = user['id']
+        if self.credentials.user_domain_id is None:
+            self.credentials.user_domain_id = user['domain']['id']
+        if self.credentials.user_domain_name is None:
+            self.credentials.user_domain_name = user['domain']['name']
+
     def base_url(self, filters, auth_data=None):
         """
         Filters can be:
@@ -398,3 +456,248 @@
                                             self.EXPIRY_DATE_FORMAT)
         return expiry - self.token_expiry_threshold <= \
             datetime.datetime.utcnow()
+
+
+def get_default_credentials(credential_type, fill_in=True):
+    """
+    Returns configured credentials of the specified type
+    based on the configured auth_version
+    """
+    return get_credentials(fill_in=fill_in, credential_type=credential_type)
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+    """
+    Builds a credentials object based on the configured auth_version
+
+    :param credential_type (string): requests credentials from tempest
+           configuration file. Valid values are defined in
+           Credentials.TYPE.
+    :param kwargs (dict): take into account only if credential_type is
+           not specified or None. Dict of credential key/value pairs
+
+    Examples:
+
+        Returns credentials from the provided parameters:
+        >>> get_credentials(username='foo', password='bar')
+
+        Returns credentials from tempest configuration:
+        >>> get_credentials(credential_type='user')
+    """
+    if CONF.identity.auth_version == 'v2':
+        credential_class = KeystoneV2Credentials
+        auth_provider_class = KeystoneV2AuthProvider
+    elif CONF.identity.auth_version == 'v3':
+        credential_class = KeystoneV3Credentials
+        auth_provider_class = KeystoneV3AuthProvider
+    else:
+        raise exceptions.InvalidConfiguration('Unsupported auth version')
+    if credential_type is not None:
+        creds = credential_class.get_default(credential_type)
+    else:
+        creds = credential_class(**kwargs)
+    # Fill in the credentials fields that were not specified
+    if fill_in:
+        auth_provider = auth_provider_class(creds)
+        creds = auth_provider.fill_credentials()
+    return creds
+
+
+class Credentials(object):
+    """
+    Set of credentials for accessing OpenStack services
+
+    ATTRIBUTES: list of valid class attributes representing credentials.
+
+    TYPES: types of credentials available in the configuration file.
+           For each key there's a tuple (section, prefix) to match the
+           configuration options.
+    """
+
+    ATTRIBUTES = []
+    TYPES = {
+        'identity_admin': ('identity', 'admin'),
+        'compute_admin': ('compute_admin', None),
+        'user': ('identity', None),
+        'alt_user': ('identity', 'alt')
+    }
+
+    def __init__(self, **kwargs):
+        """
+        Enforce the available attributes at init time (only).
+        Additional attributes can still be set afterwards if tests need
+        to do so.
+        """
+        self._initial = kwargs
+        self._apply_credentials(kwargs)
+
+    def _apply_credentials(self, attr):
+        for key in attr.keys():
+            if key in self.ATTRIBUTES:
+                setattr(self, key, attr[key])
+            else:
+                raise exceptions.InvalidCredentials
+
+    def __str__(self):
+        """
+        Represent only attributes included in self.ATTRIBUTES
+        """
+        _repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
+        return str(_repr)
+
+    def __eq__(self, other):
+        """
+        Credentials are equal if attributes in self.ATTRIBUTES are equal
+        """
+        return str(self) == str(other)
+
+    def __getattr__(self, key):
+        # If an attribute is set, __getattr__ is not invoked
+        # If an attribute is not set, and it is a known one, return None
+        if key in self.ATTRIBUTES:
+            return None
+        else:
+            raise AttributeError
+
+    def __delitem__(self, key):
+        # For backwards compatibility, support dict behaviour
+        if key in self.ATTRIBUTES:
+            delattr(self, key)
+        else:
+            raise AttributeError
+
+    def get(self, item, default):
+        # In this patch act as dict for backward compatibility
+        try:
+            return getattr(self, item)
+        except AttributeError:
+            return default
+
+    @classmethod
+    def get_default(cls, credentials_type):
+        if credentials_type not in cls.TYPES:
+            raise exceptions.InvalidCredentials()
+        creds = cls._get_default(credentials_type)
+        if not creds.is_valid():
+            raise exceptions.InvalidConfiguration()
+        return creds
+
+    @classmethod
+    def _get_default(cls, credentials_type):
+        raise NotImplementedError
+
+    def is_valid(self):
+        raise NotImplementedError
+
+    def reset(self):
+        # First delete all known attributes
+        for key in self.ATTRIBUTES:
+            if getattr(self, key) is not None:
+                delattr(self, key)
+        # Then re-apply initial setup
+        self._apply_credentials(self._initial)
+
+
+class KeystoneV2Credentials(Credentials):
+
+    CONF_ATTRIBUTES = ['username', 'password', 'tenant_name']
+    ATTRIBUTES = ['user_id', 'tenant_id']
+    ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+    @classmethod
+    def _get_default(cls, credentials_type='user'):
+        params = {}
+        section, prefix = cls.TYPES[credentials_type]
+        for attr in cls.CONF_ATTRIBUTES:
+            _section = getattr(CONF, section)
+            if prefix is None:
+                params[attr] = getattr(_section, attr)
+            else:
+                params[attr] = getattr(_section, prefix + "_" + attr)
+        return cls(**params)
+
+    def is_valid(self):
+        """
+        Minimum set of valid credentials, are username and password.
+        Tenant is optional.
+        """
+        return None not in (self.username, self.password)
+
+
+class KeystoneV3Credentials(KeystoneV2Credentials):
+    """
+    Credentials suitable for the Keystone Identity V3 API
+    """
+
+    CONF_ATTRIBUTES = ['domain_name', 'password', 'tenant_name', 'username']
+    ATTRIBUTES = ['project_domain_id', 'project_domain_name', 'project_id',
+                  'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
+                  'user_domain_name', 'user_id']
+    ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+    def __init__(self, **kwargs):
+        """
+        If domain is not specified, load the one configured for the
+        identity manager.
+        """
+        domain_fields = set(x for x in self.ATTRIBUTES if 'domain' in x)
+        if not domain_fields.intersection(kwargs.keys()):
+            kwargs['user_domain_name'] = CONF.identity.admin_domain_name
+        super(KeystoneV3Credentials, self).__init__(**kwargs)
+
+    def __setattr__(self, key, value):
+        parent = super(KeystoneV3Credentials, self)
+        # for tenant_* set both project and tenant
+        if key == 'tenant_id':
+            parent.__setattr__('project_id', value)
+        elif key == 'tenant_name':
+            parent.__setattr__('project_name', value)
+        # for project_* set both project and tenant
+        if key == 'project_id':
+            parent.__setattr__('tenant_id', value)
+        elif key == 'project_name':
+            parent.__setattr__('tenant_name', value)
+        # for *_domain_* set both user and project if not set yet
+        if key == 'user_domain_id':
+            if self.project_domain_id is None:
+                parent.__setattr__('project_domain_id', value)
+        if key == 'project_domain_id':
+            if self.user_domain_id is None:
+                parent.__setattr__('user_domain_id', value)
+        if key == 'user_domain_name':
+            if self.project_domain_name is None:
+                parent.__setattr__('project_domain_name', value)
+        if key == 'project_domain_name':
+            if self.user_domain_name is None:
+                parent.__setattr__('user_domain_name', value)
+        # support domain_name coming from config
+        if key == 'domain_name':
+            parent.__setattr__('user_domain_name', value)
+            parent.__setattr__('project_domain_name', value)
+        # finally trigger default behaviour for all attributes
+        parent.__setattr__(key, value)
+
+    def is_valid(self):
+        """
+        Valid combinations of v3 credentials (excluding token, scope)
+        - User id, password (optional domain)
+        - User name, password and its domain id/name
+        For the scope, valid combinations are:
+        - None
+        - Project id (optional domain)
+        - Project name and its domain id/name
+        """
+        valid_user_domain = any(
+            [self.user_domain_id is not None,
+             self.user_domain_name is not None])
+        valid_project_domain = any(
+            [self.project_domain_id is not None,
+             self.project_domain_name is not None])
+        valid_user = any(
+            [self.user_id is not None,
+             self.username is not None and valid_user_domain])
+        valid_project = any(
+            [self.project_name is None and self.project_id is None,
+             self.project_id is not None,
+             self.project_name is not None and valid_project_domain])
+        return all([self.password is not None, valid_user, valid_project])
diff --git a/tempest/clients.py b/tempest/clients.py
index 0ebbd7c..646a2d9 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -161,6 +161,8 @@
     VolumeHostsClientJSON
 from tempest.services.volume.json.admin.volume_quotas_client import \
     VolumeQuotasClientJSON
+from tempest.services.volume.json.admin.volume_services_client import \
+    VolumesServicesClientJSON
 from tempest.services.volume.json.admin.volume_types_client import \
     VolumeTypesClientJSON
 from tempest.services.volume.json.backups_client import BackupsClientJSON
@@ -174,6 +176,8 @@
     VolumeHostsClientXML
 from tempest.services.volume.xml.admin.volume_quotas_client import \
     VolumeQuotasClientXML
+from tempest.services.volume.xml.admin.volume_services_client import \
+    VolumesServicesClientXML
 from tempest.services.volume.xml.admin.volume_types_client import \
     VolumeTypesClientXML
 from tempest.services.volume.xml.backups_client import BackupsClientXML
@@ -240,6 +244,8 @@
             self.availability_zone_client = AvailabilityZoneClientXML(
                 self.auth_provider)
             self.service_client = ServiceClientXML(self.auth_provider)
+            self.volume_services_client = VolumesServicesClientXML(
+                self.auth_provider)
             self.aggregates_client = AggregatesClientXML(self.auth_provider)
             self.services_client = ServicesClientXML(self.auth_provider)
             self.tenant_usages_client = TenantUsagesClientXML(
@@ -315,6 +321,8 @@
             self.services_v3_client = ServicesV3ClientJSON(
                 self.auth_provider)
             self.service_client = ServiceClientJSON(self.auth_provider)
+            self.volume_services_client = VolumesServicesClientJSON(
+                self.auth_provider)
             self.agents_v3_client = AgentsV3ClientJSON(self.auth_provider)
             self.aggregates_v3_client = AggregatesV3ClientJSON(
                 self.auth_provider)
@@ -432,24 +440,6 @@
                       service=service)
 
 
-class OrchestrationManager(Manager):
-    """
-    Manager object that uses the admin credentials for its
-    so that heat templates can create users
-    """
-    def __init__(self, interface='json', service=None):
-        base = super(OrchestrationManager, self)
-        # heat currently needs an admin user so that stacks can create users
-        # however the tests need the demo tenant so that the neutron
-        # private network is the default. DO NOT change this auth combination
-        # until heat can run with the demo user.
-        base.__init__(CONF.identity.admin_username,
-                      CONF.identity.admin_password,
-                      CONF.identity.tenant_name,
-                      interface=interface,
-                      service=service)
-
-
 class OfficialClientManager(manager.Manager):
     """
     Manager that provides access to the official python clients for
diff --git a/tempest/cmd/__init__.py b/tempest/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cmd/__init__.py
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
new file mode 100755
index 0000000..7b2e60b
--- /dev/null
+++ b/tempest/cmd/verify_tempest_config.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python
+
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import argparse
+import json
+import os
+import sys
+import urlparse
+
+import httplib2
+from six.moves import configparser
+
+from tempest import clients
+from tempest import config
+
+
+CONF = config.CONF
+RAW_HTTP = httplib2.Http()
+CONF_FILE = None
+OUTFILE = sys.stdout
+
+
+def _get_config_file():
+    default_config_dir = os.path.join(os.path.abspath(
+        os.path.dirname(os.path.dirname(__file__))), "etc")
+    default_config_file = "tempest.conf"
+
+    conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
+    conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
+    path = os.path.join(conf_dir, conf_file)
+    fd = open(path, 'rw')
+    return fd
+
+
+def change_option(option, group, value):
+    config_parse = configparser.SafeConfigParser()
+    config_parse.optionxform = str
+    config_parse.readfp(CONF_FILE)
+    if not config_parse.has_section(group):
+        config_parse.add_section(group)
+    config_parse.set(group, option, str(value))
+    global OUTFILE
+    config_parse.write(OUTFILE)
+
+
+def print_and_or_update(option, group, value, update):
+    print('Config option %s in group %s should be changed to: %s'
+          % (option, group, value))
+    if update:
+        change_option(option, group, value)
+
+
+def verify_glance_api_versions(os, update):
+    # Check glance api versions
+    __, versions = os.image_client.get_versions()
+    if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
+                                             versions):
+        print_and_or_update('api_v1', 'image_feature_enabled',
+                            not CONF.image_feature_enabled.api_v1, update)
+    if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'image_feature_enabled',
+                            not CONF.image_feature_enabled.api_v2, update)
+
+
+def _get_unversioned_endpoint(base_url):
+    endpoint_parts = urlparse.urlparse(base_url)
+    endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
+    return endpoint
+
+
+def _get_api_versions(os, service):
+    client_dict = {
+        'nova': os.servers_client,
+        'keystone': os.identity_client,
+        'cinder': os.volumes_client,
+    }
+    client_dict[service].skip_path()
+    endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
+    __, body = RAW_HTTP.request(endpoint, 'GET')
+    client_dict[service].reset_path()
+    body = json.loads(body)
+    if service == 'keystone':
+        versions = map(lambda x: x['id'], body['versions']['values'])
+    else:
+        versions = map(lambda x: x['id'], body['versions'])
+    return versions
+
+
+def verify_keystone_api_versions(os, update):
+    # Check keystone api versions
+    versions = _get_api_versions(os, 'keystone')
+    if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'identity_feature_enabled',
+                            not CONF.identity_feature_enabled.api_v2, update)
+    if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
+        print_and_or_update('api_v3', 'identity_feature_enabled',
+                            not CONF.identity_feature_enabled.api_v3, update)
+
+
+def verify_nova_api_versions(os, update):
+    versions = _get_api_versions(os, 'nova')
+    if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
+        print_and_or_update('api_v3', 'compute_feature_enabled',
+                            not CONF.compute_feature_enabled.api_v3, update)
+
+
+def verify_cinder_api_versions(os, update):
+    # Check cinder api versions
+    versions = _get_api_versions(os, 'cinder')
+    if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
+        print_and_or_update('api_v1', 'volume_feature_enabled',
+                            not CONF.volume_feature_enabled.api_v1, update)
+    if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
+        print_and_or_update('api_v2', 'volume_feature_enabled',
+                            not CONF.volume_feature_enabled.api_v2, update)
+
+
+def get_extension_client(os, service):
+    extensions_client = {
+        'nova': os.extensions_client,
+        'nova_v3': os.extensions_v3_client,
+        'cinder': os.volumes_extension_client,
+        'neutron': os.network_client,
+        'swift': os.account_client,
+    }
+    if service not in extensions_client:
+        print('No tempest extensions client for %s' % service)
+        exit(1)
+    return extensions_client[service]
+
+
+def get_enabled_extensions(service):
+    extensions_options = {
+        'nova': CONF.compute_feature_enabled.api_extensions,
+        'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
+        'cinder': CONF.volume_feature_enabled.api_extensions,
+        'neutron': CONF.network_feature_enabled.api_extensions,
+        'swift': CONF.object_storage_feature_enabled.discoverable_apis,
+    }
+    if service not in extensions_options:
+        print('No supported extensions list option for %s' % service)
+        exit(1)
+    return extensions_options[service]
+
+
+def verify_extensions(os, service, results):
+    extensions_client = get_extension_client(os, service)
+    __, resp = extensions_client.list_extensions()
+    if isinstance(resp, dict):
+        # Neutron's extension 'name' field has is not a single word (it has
+        # spaces in the string) Since that can't be used for list option the
+        # api_extension option in the network-feature-enabled group uses alias
+        # instead of name.
+        if service == 'neutron':
+            extensions = map(lambda x: x['alias'], resp['extensions'])
+        elif service == 'swift':
+            # Remove Swift general information from extensions list
+            resp.pop('swift')
+            extensions = resp.keys()
+        else:
+            extensions = map(lambda x: x['name'], resp['extensions'])
+
+    else:
+        extensions = map(lambda x: x['name'], resp)
+    if not results.get(service):
+        results[service] = {}
+    extensions_opt = get_enabled_extensions(service)
+    if extensions_opt[0] == 'all':
+        results[service]['extensions'] = extensions
+        return results
+    # Verify that all configured extensions are actually enabled
+    for extension in extensions_opt:
+        results[service][extension] = extension in extensions
+    # Verify that there aren't additional extensions enabled that aren't
+    # specified in the config list
+    for extension in extensions:
+        if extension not in extensions_opt:
+            results[service][extension] = False
+    return results
+
+
+def display_results(results, update, replace):
+    update_dict = {
+        'swift': 'object-storage-feature-enabled',
+        'nova': 'compute-feature-enabled',
+        'nova_v3': 'compute-feature-enabled',
+        'cinder': 'volume-feature-enabled',
+        'neutron': 'network-feature-enabled',
+    }
+    for service in results:
+        # If all extensions are specified as being enabled there is no way to
+        # verify this so we just assume this to be true
+        if results[service].get('extensions'):
+            if replace:
+                output_list = results[service].get('extensions')
+            else:
+                output_list = ['all']
+        else:
+            extension_list = get_enabled_extensions(service)
+            output_list = []
+            for extension in results[service]:
+                if not results[service][extension]:
+                    if extension in extension_list:
+                        print("%s extension: %s should not be included in the "
+                              "list of enabled extensions" % (service,
+                                                              extension))
+                    else:
+                        print("%s extension: %s should be included in the list"
+                              " of enabled extensions" % (service, extension))
+                        output_list.append(extension)
+                else:
+                    output_list.append(extension)
+        if update:
+            # Sort List
+            output_list.sort()
+            # Convert list to a string
+            output_string = ', '.join(output_list)
+            if service == 'swift':
+                change_option('discoverable_apis', update_dict[service],
+                              output_string)
+            elif service == 'nova_v3':
+                change_option('api_v3_extensions', update_dict[service],
+                              output_string)
+            else:
+                change_option('api_extensions', update_dict[service],
+                              output_string)
+
+
+def check_service_availability(os, update):
+    services = []
+    avail_services = []
+    codename_match = {
+        'volume': 'cinder',
+        'network': 'neutron',
+        'image': 'glance',
+        'object_storage': 'swift',
+        'compute': 'nova',
+        'orchestration': 'heat',
+        'metering': 'ceilometer',
+        'telemetry': 'ceilometer',
+        'data_processing': 'sahara',
+        'baremetal': 'ironic',
+        'identity': 'keystone',
+        'queuing': 'marconi',
+        'database': 'trove'
+    }
+    # Get catalog list for endpoints to use for validation
+    __, endpoints = os.endpoints_client.list_endpoints()
+    for endpoint in endpoints:
+        __, service = os.service_client.get_service(endpoint['service_id'])
+        services.append(service['type'])
+    # Pull all catalog types from config file and compare against endpoint list
+    for cfgname in dir(CONF._config):
+        cfg = getattr(CONF, cfgname)
+        catalog_type = getattr(cfg, 'catalog_type', None)
+        if not catalog_type:
+            continue
+        else:
+            if cfgname == 'identity':
+                # Keystone is a required service for tempest
+                continue
+            if catalog_type not in services:
+                if getattr(CONF.service_available, codename_match[cfgname]):
+                    print('Endpoint type %s not found either disable service '
+                          '%s or fix the catalog_type in the config file' % (
+                          catalog_type, codename_match[cfgname]))
+                    if update:
+                        change_option(codename_match[cfgname],
+                                      'service_available', False)
+            else:
+                if not getattr(CONF.service_available,
+                               codename_match[cfgname]):
+                    print('Endpoint type %s is available, service %s should be'
+                          ' set as available in the config file.' % (
+                          catalog_type, codename_match[cfgname]))
+                    if update:
+                        change_option(codename_match[cfgname],
+                                      'service_available', True)
+                else:
+                    avail_services.append(codename_match[cfgname])
+    return avail_services
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-u', '--update', action='store_true',
+                        help='Update the config file with results from api '
+                             'queries. This assumes whatever is set in the '
+                             'config file is incorrect. In the case of '
+                             'endpoint checks where it could either be the '
+                             'incorrect catalog type or the service available '
+                             'option the service available option is assumed '
+                             'to be incorrect and is thus changed')
+    parser.add_argument('-o', '--output',
+                        help="Output file to write an updated config file to. "
+                             "This has to be a separate file from the "
+                             "original config file. If one isn't specified "
+                             "with -u the new config file will be printed to "
+                             "STDOUT")
+    parser.add_argument('-r', '--replace-ext', action='store_true',
+                        help="If specified the all option will be replaced "
+                             "with a full list of extensions")
+    args = parser.parse_args()
+    return args
+
+
+def main():
+    print('Running config verification...')
+    opts = parse_args()
+    update = opts.update
+    replace = opts.replace_ext
+    global CONF_FILE
+    global OUTFILE
+    if update:
+        CONF_FILE = _get_config_file()
+        if opts.output:
+            OUTFILE = open(opts.output, 'w+')
+    os = clients.ComputeAdminManager(interface='json')
+    services = check_service_availability(os, update)
+    results = {}
+    for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
+        if service == 'nova_v3' and 'nova' not in services:
+            continue
+        elif service not in services:
+            continue
+        results = verify_extensions(os, service, results)
+    verify_keystone_api_versions(os, update)
+    verify_glance_api_versions(os, update)
+    verify_nova_api_versions(os, update)
+    verify_cinder_api_versions(os, update)
+    display_results(results, update, replace)
+    if CONF_FILE:
+        CONF_FILE.close()
+    OUTFILE.close()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 87d65d0..57b98f7 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -123,7 +123,7 @@
                 raise Exception("non-integer list types not supported")
         result = []
         if schema_type not in self.types_dict:
-            raise Exception("generator (%s) doesn't support type: %s"
+            raise TypeError("generator (%s) doesn't support type: %s"
                             % (self.__class__.__name__, schema_type))
         for generator in self.types_dict[schema_type]:
             ret = generator(schema)
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index c54a8e8..9a50c0b 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -17,6 +17,7 @@
 import keystoneclient.v2_0.client as keystoneclient
 import neutronclient.v2_0.client as neutronclient
 
+from tempest import auth
 from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import config
@@ -33,6 +34,7 @@
                  password='pass', network_resources=None):
         self.network_resources = network_resources
         self.isolated_creds = {}
+        self.isolated_creds_old_style = {}
         self.isolated_net_resources = {}
         self.ports = []
         self.name = name
@@ -185,22 +187,19 @@
                 self._assign_user_role(tenant['id'], user['id'], role['id'])
             else:
                 self._assign_user_role(tenant.id, user.id, role.id)
-        return user, tenant
+        return self._get_credentials(user, tenant), user, tenant
 
-    def _get_cred_names(self, user, tenant):
+    def _get_credentials(self, user, tenant):
         if self.tempest_client:
-            username = user.get('name')
-            tenant_name = tenant.get('name')
+            user_get = user.get
+            tenant_get = tenant.get
         else:
-            username = user.name
-            tenant_name = tenant.name
-        return username, tenant_name
-
-    def _get_tenant_id(self, tenant):
-        if self.tempest_client:
-            return tenant.get('id')
-        else:
-            return tenant.id
+            user_get = user.__dict__.get
+            tenant_get = tenant.__dict__.get
+        return auth.get_credentials(
+            username=user_get('name'), user_id=user_get('id'),
+            tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
+            password=self.password)
 
     def _create_network_resources(self, tenant_id):
         network = None
@@ -315,22 +314,28 @@
             self.network_admin_client.add_interface_router(router_id, body)
 
     def get_primary_tenant(self):
-        return self.isolated_creds.get('primary')[1]
+        # Deprecated. Maintained until all tests are ported
+        return self.isolated_creds_old_style.get('primary')[1]
 
     def get_primary_user(self):
-        return self.isolated_creds.get('primary')[0]
+        # Deprecated. Maintained until all tests are ported
+        return self.isolated_creds_old_style.get('primary')[0]
 
     def get_alt_tenant(self):
-        return self.isolated_creds.get('alt')[1]
+        # Deprecated. Maintained until all tests are ported
+        return self.isolated_creds_old_style.get('alt')[1]
 
     def get_alt_user(self):
-        return self.isolated_creds.get('alt')[0]
+        # Deprecated. Maintained until all tests are ported
+        return self.isolated_creds_old_style.get('alt')[0]
 
     def get_admin_tenant(self):
-        return self.isolated_creds.get('admin')[1]
+        # Deprecated. Maintained until all tests are ported
+        return self.isolated_creds_old_style.get('admin')[1]
 
     def get_admin_user(self):
-        return self.isolated_creds.get('admin')[0]
+        # Deprecated. Maintained until all tests are ported
+        return self.isolated_creds_old_style.get('admin')[0]
 
     def get_primary_network(self):
         return self.isolated_net_resources.get('primary')[0]
@@ -359,62 +364,38 @@
     def get_alt_router(self):
         return self.isolated_net_resources.get('alt')[2]
 
-    def get_primary_creds(self):
-        if self.isolated_creds.get('primary'):
-            user, tenant = self.isolated_creds['primary']
-            username, tenant_name = self._get_cred_names(user, tenant)
+    def get_credentials(self, credential_type, old_style):
+        if self.isolated_creds.get(credential_type):
+            credentials = self.isolated_creds[credential_type]
         else:
-            user, tenant = self._create_creds()
-            username, tenant_name = self._get_cred_names(user, tenant)
-            self.isolated_creds['primary'] = (user, tenant)
-            LOG.info("Acquired isolated creds:\n user: %s, tenant: %s"
-                     % (username, tenant_name))
+            is_admin = (credential_type == 'admin')
+            credentials, user, tenant = self._create_creds(admin=is_admin)
+            self.isolated_creds[credential_type] = credentials
+            # Maintained until tests are ported
+            self.isolated_creds_old_style[credential_type] = (user, tenant)
+            LOG.info("Acquired isolated creds:\n credentials: %s"
+                     % credentials)
             if CONF.service_available.neutron:
                 network, subnet, router = self._create_network_resources(
-                    self._get_tenant_id(tenant))
-                self.isolated_net_resources['primary'] = (
+                    credentials.tenant_id)
+                self.isolated_net_resources[credential_type] = (
                     network, subnet, router,)
                 LOG.info("Created isolated network resources for : \n"
-                         + " user: %s, tenant: %s" % (username, tenant_name))
-        return username, tenant_name, self.password
+                         + " credentials: %s" % credentials)
+        if old_style:
+            return (credentials.username, credentials.tenant_name,
+                    credentials.password)
+        else:
+            return credentials
 
-    def get_admin_creds(self):
-        if self.isolated_creds.get('admin'):
-            user, tenant = self.isolated_creds['admin']
-            username, tenant_name = self._get_cred_names(user, tenant)
-        else:
-            user, tenant = self._create_creds(admin=True)
-            username, tenant_name = self._get_cred_names(user, tenant)
-            self.isolated_creds['admin'] = (user, tenant)
-            LOG.info("Acquired admin isolated creds:\n user: %s, tenant: %s"
-                     % (username, tenant_name))
-            if CONF.service_available.neutron:
-                network, subnet, router = self._create_network_resources(
-                    self._get_tenant_id(tenant))
-                self.isolated_net_resources['admin'] = (
-                    network, subnet, router,)
-                LOG.info("Created isolated network resources for : \n"
-                         + " user: %s, tenant: %s" % (username, tenant_name))
-        return username, tenant_name, self.password
+    def get_primary_creds(self, old_style=True):
+        return self.get_credentials('primary', old_style)
 
-    def get_alt_creds(self):
-        if self.isolated_creds.get('alt'):
-            user, tenant = self.isolated_creds['alt']
-            username, tenant_name = self._get_cred_names(user, tenant)
-        else:
-            user, tenant = self._create_creds()
-            username, tenant_name = self._get_cred_names(user, tenant)
-            self.isolated_creds['alt'] = (user, tenant)
-            LOG.info("Acquired alt isolated creds:\n user: %s, tenant: %s"
-                     % (username, tenant_name))
-            if CONF.service_available.neutron:
-                network, subnet, router = self._create_network_resources(
-                    self._get_tenant_id(tenant))
-                self.isolated_net_resources['alt'] = (
-                    network, subnet, router,)
-                LOG.info("Created isolated network resources for : \n"
-                         + " user: %s, tenant: %s" % (username, tenant_name))
-        return username, tenant_name, self.password
+    def get_admin_creds(self, old_style=True):
+        return self.get_credentials('admin', old_style)
+
+    def get_alt_creds(self, old_style=True):
+        return self.get_credentials('alt', old_style)
 
     def _clear_isolated_router(self, router_id, router_name):
         net_client = self.network_admin_client
@@ -505,29 +486,16 @@
         if not self.isolated_creds:
             return
         self._clear_isolated_net_resources()
-        for cred in self.isolated_creds:
-            user, tenant = self.isolated_creds.get(cred)
+        for creds in self.isolated_creds.itervalues():
             try:
-                if self.tempest_client:
-                    self._delete_user(user['id'])
-                else:
-                    self._delete_user(user.id)
+                self._delete_user(creds.user_id)
             except exceptions.NotFound:
-                if self.tempest_client:
-                    name = user['name']
-                else:
-                    name = user.name
-                LOG.warn("user with name: %s not found for delete" % name)
+                LOG.warn("user with name: %s not found for delete" %
+                         creds.username)
                 pass
             try:
-                if self.tempest_client:
-                    self._delete_tenant(tenant['id'])
-                else:
-                    self._delete_tenant(tenant.id)
+                self._delete_tenant(creds.tenant_id)
             except exceptions.NotFound:
-                if self.tempest_client:
-                    name = tenant['name']
-                else:
-                    name = tenant.name
-                LOG.warn("tenant with name: %s not found for delete" % name)
+                LOG.warn("tenant with name: %s not found for delete" %
+                         creds.tenant_name)
                 pass
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 8c07d4f..8667445 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -282,7 +282,7 @@
         return ""
 
     def _log_request(self, method, req_url, resp,
-                     secs="", req_headers=None,
+                     secs="", req_headers={},
                      req_body=None, resp_body=None):
         # if we have the request id, put it in the right part of the log
         extra = dict(request_id=self._get_request_id(resp))
@@ -306,6 +306,8 @@
         # world this is important to match
         trace_regex = CONF.debug.trace_requests
         if trace_regex and re.search(trace_regex, caller_name):
+            if 'X-Auth-Token' in req_headers:
+                req_headers['X-Auth-Token'] = '<omitted>'
             log_fmt = """Request (%s): %s %s %s%s
     Request - Headers: %s
         Body: %s
@@ -369,7 +371,7 @@
             # Parse one-item-like xmls (user, role, etc)
             return common.xml_to_json(element)
 
-    def response_checker(self, method, url, headers, body, resp, resp_body):
+    def response_checker(self, method, resp, resp_body):
         if (resp.status in set((204, 205, 304)) or resp.status < 200 or
                 method.upper() == 'HEAD') and resp_body:
             raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
@@ -413,8 +415,7 @@
                           resp_body=resp_body)
 
         # Verify HTTP response codes
-        self.response_checker(method, url, req_headers, req_body, resp,
-                              resp_body)
+        self.response_checker(method, resp, resp_body)
 
         return resp, resp_body
 
@@ -593,10 +594,12 @@
                 msg = ("The status code(%s) is different than the expected "
                        "one(%s)") % (resp.status, response_code)
                 raise exceptions.InvalidHttpSuccessCode(msg)
-            response_schema = schema.get('response_body')
-            if response_schema:
+
+            # Check the body of a response
+            body_schema = schema.get('response_body')
+            if body_schema:
                 try:
-                    jsonschema.validate(body, response_schema)
+                    jsonschema.validate(body, body_schema)
                 except jsonschema.ValidationError as ex:
                     msg = ("HTTP response body is invalid (%s)") % ex
                     raise exceptions.InvalidHTTPResponseBody(msg)
@@ -605,6 +608,15 @@
                     msg = ("HTTP response body should not exist (%s)") % body
                     raise exceptions.InvalidHTTPResponseBody(msg)
 
+            # Check the header of a response
+            header_schema = schema.get('response_header')
+            if header_schema:
+                try:
+                    jsonschema.validate(resp, header_schema)
+                except jsonschema.ValidationError as ex:
+                    msg = ("HTTP response header is invalid (%s)") % ex
+                    raise exceptions.InvalidHTTPResponseHeader(msg)
+
 
 class NegativeRestClient(RestClient):
     """
diff --git a/tempest/config.py b/tempest/config.py
index b9fe572..634634d 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -72,6 +72,10 @@
                default=None,
                help="API key to use when authenticating.",
                secret=True),
+    cfg.StrOpt('domain_name',
+               default=None,
+               help="Domain name for authentication (Keystone V3)."
+                    "The same domain applies to user and project"),
     cfg.StrOpt('alt_username',
                default=None,
                help="Username of alternate user to use for Nova API "
@@ -84,6 +88,10 @@
                default=None,
                help="API key to use when authenticating as alternate user.",
                secret=True),
+    cfg.StrOpt('alt_domain_name',
+               default=None,
+               help="Alternate domain name for authentication (Keystone V3)."
+                    "The same domain applies to user and project"),
     cfg.StrOpt('admin_username',
                default=None,
                help="Administrative Username to use for "
@@ -96,6 +104,10 @@
                default=None,
                help="API key to use when authenticating as admin.",
                secret=True),
+    cfg.StrOpt('admin_domain_name',
+               default=None,
+               help="Admin domain name for authentication (Keystone V3)."
+                    "The same domain applies to user and project"),
 ]
 
 identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
@@ -159,6 +171,19 @@
     cfg.BoolOpt('run_ssh',
                 default=False,
                 help="Should the tests ssh to instances?"),
+    cfg.StrOpt('ssh_auth_method',
+               default='keypair',
+               help="Auth method used for authenticate to the instance. "
+                    "Valid choices are: keypair, configured, adminpass. "
+                    "keypair: start the servers with an ssh keypair. "
+                    "configured: use the configured user and password. "
+                    "adminpass: use the injected adminPass. "
+                    "disabled: avoid using ssh when it is an option."),
+    cfg.StrOpt('ssh_connect_method',
+               default='fixed',
+               help="How to connect to the instance? "
+                    "fixed: using the first ip belongs the fixed network "
+                    "floating: creating and using a floating ip"),
     cfg.StrOpt('ssh_user',
                default='root',
                help="User name used to authenticate to an instance."),
@@ -289,6 +314,10 @@
                default=None,
                help="API key to use when authenticating as admin.",
                secret=True),
+    cfg.StrOpt('domain_name',
+               default=None,
+               help="Domain name for authentication as admin (Keystone V3)."
+                    "The same domain applies to user and project"),
 ]
 
 image_group = cfg.OptGroup(name='image',
@@ -983,6 +1012,13 @@
             self.compute_admin.username = self.identity.admin_username
             self.compute_admin.password = self.identity.admin_password
             self.compute_admin.tenant_name = self.identity.admin_tenant_name
+        cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
+                             group='identity')
+        cfg.CONF.set_default('alt_domain_name',
+                             self.identity.admin_domain_name,
+                             group='identity')
+        cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
+                             group='compute-admin')
 
     def __init__(self, parse_conf=True):
         """Initialize a configuration from a conf directory and conf file."""
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 857e1e8..4eb1cea 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -118,7 +118,7 @@
 
 
 class StackResourceBuildErrorException(TempestException):
-    message = ("Resource %(resource_name) in stack %(stack_identifier)s is "
+    message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
                "in %(resource_status)s status due to "
                "'%(resource_status_reason)s'")
 
@@ -197,6 +197,10 @@
     message = "HTTP response body is invalid json or xml"
 
 
+class InvalidHTTPResponseHeader(RestClientException):
+    message = "HTTP response header is invalid"
+
+
 class InvalidContentType(RestClientException):
     message = "Invalid content type provided"
 
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 270851d..183d422 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -12,8 +12,11 @@
 #   License for the specific language governing permissions and limitations
 #   under the License.
 
+import os
 import re
 
+import pep8
+
 
 PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
                   'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
@@ -22,7 +25,7 @@
 PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
 TEST_DEFINITION = re.compile(r'^\s*def test.*')
 SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
-SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
 VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
 
 
@@ -55,6 +58,10 @@
 
 
 def no_setupclass_for_unit_tests(physical_line, filename):
+
+    if pep8.noqa(physical_line):
+        return
+
     if 'tempest/tests' in filename:
         if SETUPCLASS_DEFINITION.match(physical_line):
             return (physical_line.find('def'),
@@ -75,8 +82,32 @@
             return 0, "T106: Don't put vi configuration in source files"
 
 
+def service_tags_not_in_module_path(physical_line, filename):
+    """Check that a service tag isn't in the module path
+
+    A service tag should only be added if the service name isn't already in
+    the module path.
+
+    T107
+    """
+    # NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
+    # created for services like heat which would cause false negatives for
+    # those tests, so just exclude the scenario tests.
+    if 'tempest/scenario' not in filename:
+        matches = SCENARIO_DECORATOR.match(physical_line)
+        if matches:
+            services = matches.group(1).split(',')
+            for service in services:
+                service_name = service.strip().strip("'")
+                modulepath = os.path.split(filename)[0]
+                if service_name in modulepath:
+                    return (physical_line.find(service_name),
+                            "T107: service tag should not be in path")
+
+
 def factory(register):
     register(import_no_clients_in_api)
     register(scenario_tests_need_service_tags)
     register(no_setupclass_for_unit_tests)
     register(no_vi_headers)
+    register(service_tags_not_in_module_path)
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index b7a30f8..0210c56 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -31,7 +31,7 @@
     Test large operations.
 
     This test below:
-    * Spin up multiple instances in one nova call
+    * Spin up multiple instances in one nova call, and repeat three times
     * as a regular user
     * TODO: same thing for cinder
 
@@ -69,3 +69,5 @@
             return
         self.glance_image_create()
         self.nova_boot()
+        self.nova_boot()
+        self.nova_boot()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 5f71461..d771aed 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -118,7 +118,7 @@
             self.floating_ips[floating_ip] = server
             self.server_ips[server.id] = floating_ip.floating_ip_address
         else:
-            self.server_ips[server.id] = server.networks[net.name][0]
+            self.server_ips[server.id] = server.networks[net['name']][0]
         self.assertTrue(self.servers_keypairs)
         return server
 
@@ -302,6 +302,7 @@
                     "http://{0}/".format(vip_ip)).read())
         return resp
 
+    @test.skip_because(bug='1295165')
     @test.attr(type='smoke')
     @test.services('compute', 'network')
     def test_load_balancer_basic(self):
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index b9ee040..c76a117 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -335,8 +335,6 @@
         if should_succeed:
             msg = "Timed out waiting for %s to become reachable" % ip
         else:
-            # todo(yfried): remove this line when bug 1252620 is fixed
-            return True
             msg = "%s is reachable" % ip
         try:
             self.assertTrue(self._check_remote_connectivity(access_point, ip,
@@ -422,11 +420,15 @@
         access_point_ssh = self._connect_to_access_point(tenant)
         mac_addr = access_point_ssh.get_mac_address()
         mac_addr = mac_addr.strip().lower()
-        port_list = self.network_client.list_ports()['ports']
+        # Get the fixed_ips and mac_address fields of all ports. Select
+        # only those two columns to reduce the size of the response.
+        port_list = self.network_client.list_ports(
+            fields=['fixed_ips', 'mac_address'])['ports']
         port_detail_list = [
             (port['fixed_ips'][0]['subnet_id'],
              port['fixed_ips'][0]['ip_address'],
-             port['mac_address'].lower()) for port in port_list
+             port['mac_address'].lower())
+            for port in port_list if port['fixed_ips']
         ]
         server_ip = self._get_server_ip(tenant.access_point)
         subnet_id = tenant.subnet.id
diff --git a/tempest/services/compute/json/agents_client.py b/tempest/services/compute/json/agents_client.py
index 19821e7..98d8896 100644
--- a/tempest/services/compute/json/agents_client.py
+++ b/tempest/services/compute/json/agents_client.py
@@ -15,6 +15,7 @@
 import json
 import urllib
 
+from tempest.api_schema.compute import agents as common_schema
 from tempest.api_schema.compute.v2 import agents as schema
 from tempest.common import rest_client
 from tempest import config
@@ -37,7 +38,9 @@
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
-        return resp, json.loads(body).get('agents')
+        body = json.loads(body)
+        self.validate_response(common_schema.list_agents, resp, body)
+        return resp, body['agents']
 
     def create_agent(self, **kwargs):
         """Create an agent build."""
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index fe67102..5c0b5d3 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -16,6 +16,7 @@
 import json
 
 from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v2 import aggregates as v2_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -61,11 +62,14 @@
         resp, body = self.put('os-aggregates/%s' % str(aggregate_id), put_body)
 
         body = json.loads(body)
+        self.validate_response(schema.update_aggregate, resp, body)
         return resp, body['aggregate']
 
     def delete_aggregate(self, aggregate_id):
         """Deletes the given aggregate."""
-        return self.delete("os-aggregates/%s" % str(aggregate_id))
+        resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+        self.validate_response(v2_schema.delete_aggregate, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         try:
diff --git a/tempest/services/compute/json/availability_zone_client.py b/tempest/services/compute/json/availability_zone_client.py
index 9278d5b..1c067e8 100644
--- a/tempest/services/compute/json/availability_zone_client.py
+++ b/tempest/services/compute/json/availability_zone_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute.v2 import availability_zone as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,9 +32,12 @@
     def get_availability_zone_list(self):
         resp, body = self.get('os-availability-zone')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list, resp, body)
         return resp, body['availabilityZoneInfo']
 
     def get_availability_zone_list_detail(self):
         resp, body = self.get('os-availability-zone/detail')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list_detail, resp,
+                               body)
         return resp, body['availabilityZoneInfo']
diff --git a/tempest/services/compute/json/certificates_client.py b/tempest/services/compute/json/certificates_client.py
index c05e352..1d04628 100644
--- a/tempest/services/compute/json/certificates_client.py
+++ b/tempest/services/compute/json/certificates_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v2 import certificates as v2schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,6 +33,7 @@
         url = "os-certificates/%s" % (id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_certificate, resp, body)
         return resp, body['certificate']
 
     def create_certificate(self):
@@ -38,4 +41,5 @@
         url = "os-certificates"
         resp, body = self.post(url, None)
         body = json.loads(body)
+        self.validate_response(v2schema.create_certificate, resp, body)
         return resp, body['certificate']
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index 0206b82..89cbe1d 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -18,6 +18,8 @@
 
 from tempest.api_schema.compute import flavors as common_schema
 from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute import flavors_extra_specs \
+    as schema_extra_specs
 from tempest.api_schema.compute.v2 import flavors as v2schema
 from tempest.common import rest_client
 from tempest import config
@@ -54,6 +56,7 @@
     def get_flavor_details(self, flavor_id):
         resp, body = self.get("flavors/%s" % str(flavor_id))
         body = json.loads(body)
+        self.validate_response(v2schema.create_get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -77,11 +80,14 @@
         resp, body = self.post('flavors', post_body)
 
         body = json.loads(body)
+        self.validate_response(v2schema.create_get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def delete_flavor(self, flavor_id):
         """Deletes the given flavor."""
-        return self.delete("flavors/%s" % str(flavor_id))
+        resp, body = self.delete("flavors/{0}".format(flavor_id))
+        self.validate_response(v2schema.delete_flavor, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         # Did not use get_flavor_details(id) for verification as it gives
@@ -99,12 +105,16 @@
         resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec(self, flavor_id):
         """Gets extra Specs details of the mentioned flavor."""
         resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -112,6 +122,8 @@
         resp, body = self.get('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
                               key))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
@@ -119,12 +131,16 @@
         resp, body = self.put('flavors/%s/os-extra_specs/%s' %
                               (flavor_id, key), json.dumps(kwargs))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def unset_flavor_extra_spec(self, flavor_id, key):
         """Unsets extra Specs from the mentioned flavor."""
-        return self.delete('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
-                           key))
+        resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
+                                 (str(flavor_id), key))
+        self.validate_response(v2schema.unset_flavor_extra_specs, resp, body)
+        return resp, body
 
     def list_flavor_access(self, flavor_id):
         """Gets flavor access information given the flavor id."""
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index bd39a04..af7752a 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -70,6 +70,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_images_details, resp, body)
         return resp, body['images']
 
     def get_image(self, image_id):
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index 9928b94..2f165a2 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -16,6 +16,7 @@
 import json
 import time
 
+from tempest.api_schema.compute import interfaces as common_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -58,6 +59,7 @@
     def delete_interface(self, server, port_id):
         resp, body = self.delete('servers/%s/os-interface/%s' % (server,
                                                                  port_id))
+        self.validate_response(common_schema.delete_interface, resp, body)
         return resp, body
 
     def wait_for_interface_status(self, server, port_id, status):
diff --git a/tempest/services/compute/json/migrations_client.py b/tempest/services/compute/json/migrations_client.py
index a13349e..beef5d2 100644
--- a/tempest/services/compute/json/migrations_client.py
+++ b/tempest/services/compute/json/migrations_client.py
@@ -15,6 +15,7 @@
 import json
 import urllib
 
+from tempest.api_schema.compute import migrations as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -36,4 +37,5 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_migrations, resp, body)
         return resp, body['migrations']
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 9bddf2c..7e828d8 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -110,6 +110,7 @@
                                   post_body)
 
         body = json.loads(body)
+        self.validate_response(schema.quota_set_update, resp, body)
         return resp, body['quota_set']
 
     def delete_quota_set(self, tenant_id):
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 7411fb7..c19baf3 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -47,6 +47,7 @@
         url = "os-security-groups/%s" % str(security_group_id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_security_group, resp, body)
         return resp, body['security_group']
 
     def create_security_group(self, name, description):
@@ -62,6 +63,7 @@
         post_body = json.dumps({'security_group': post_body})
         resp, body = self.post('os-security-groups', post_body)
         body = json.loads(body)
+        self.validate_response(schema.get_security_group, resp, body)
         return resp, body['security_group']
 
     def update_security_group(self, security_group_id, name=None,
@@ -81,6 +83,7 @@
         resp, body = self.put('os-security-groups/%s' % str(security_group_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_security_group, resp, body)
         return resp, body['security_group']
 
     def delete_security_group(self, security_group_id):
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index cf1dad9..f6b76e9 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -126,6 +126,7 @@
         post_body = json.dumps({'server': post_body})
         resp, body = self.put("servers/%s" % str(server_id), post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_server, resp, body)
         return resp, body['server']
 
     def get_server(self, server_id):
@@ -136,7 +137,9 @@
 
     def delete_server(self, server_id):
         """Deletes the given server."""
-        return self.delete("servers/%s" % str(server_id))
+        resp, body = self.delete("servers/%s" % str(server_id))
+        self.validate_response(common_schema.delete_server, resp, body)
+        return resp, body
 
     def list_servers(self, params=None):
         """Lists all servers for a user."""
@@ -274,6 +277,7 @@
     def list_server_metadata(self, server_id):
         resp, body = self.get("servers/%s/metadata" % str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.list_server_metadata, resp, body)
         return resp, body['metadata']
 
     def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -284,6 +288,7 @@
         resp, body = self.put('servers/%s/metadata' % str(server_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.set_server_metadata, resp, body)
         return resp, body['metadata']
 
     def update_server_metadata(self, server_id, meta):
@@ -296,6 +301,8 @@
     def get_server_metadata_item(self, server_id, key):
         resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
         body = json.loads(body)
+        self.validate_response(schema.set_get_server_metadata_item,
+                               resp, body)
         return resp, body['meta']
 
     def set_server_metadata_item(self, server_id, key, meta):
@@ -303,11 +310,15 @@
         resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
                               post_body)
         body = json.loads(body)
+        self.validate_response(schema.set_get_server_metadata_item,
+                               resp, body)
         return resp, body['meta']
 
     def delete_server_metadata_item(self, server_id, key):
         resp, body = self.delete("servers/%s/metadata/%s" %
                                  (str(server_id), key))
+        self.validate_response(common_schema.delete_server_metadata_item,
+                               resp, body)
         return resp, body
 
     def stop(self, server_id, **kwargs):
@@ -326,12 +337,15 @@
         })
         resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
                                post_body)
+        body = json.loads(body)
+        self.validate_response(schema.attach_volume, resp, body)
         return resp, body
 
     def detach_volume(self, server_id, volume_id):
         """Detaches a volume from a server instance."""
         resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
                                  (server_id, volume_id))
+        self.validate_response(schema.detach_volume, resp, body)
         return resp, body
 
     def add_security_group(self, server_id, name):
diff --git a/tempest/services/compute/json/tenant_usages_client.py b/tempest/services/compute/json/tenant_usages_client.py
index f3a67dd..f8adae7 100644
--- a/tempest/services/compute/json/tenant_usages_client.py
+++ b/tempest/services/compute/json/tenant_usages_client.py
@@ -16,6 +16,7 @@
 import json
 import urllib
 
+from tempest.api_schema.compute.v2 import tenant_usages as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -35,6 +36,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_tenant, resp, body)
         return resp, body['tenant_usages'][0]
 
     def get_tenant_usage(self, tenant_id, params=None):
@@ -44,4 +46,5 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_tenant, resp, body)
         return resp, body['tenant_usage']
diff --git a/tempest/services/compute/v3/json/agents_client.py b/tempest/services/compute/v3/json/agents_client.py
index e1c286c..48be54c 100644
--- a/tempest/services/compute/v3/json/agents_client.py
+++ b/tempest/services/compute/v3/json/agents_client.py
@@ -15,6 +15,7 @@
 import json
 import urllib
 
+from tempest.api_schema.compute import agents as common_schema
 from tempest.api_schema.compute.v3 import agents as schema
 from tempest.common import rest_client
 from tempest import config
@@ -34,7 +35,9 @@
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
-        return resp, self._parse_resp(body)
+        body = json.loads(body)
+        self.validate_response(common_schema.list_agents, resp, body)
+        return resp, body['agents']
 
     def create_agent(self, **kwargs):
         """Create an agent build."""
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 8d7440e..2487ee7 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -16,6 +16,7 @@
 import json
 
 from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v3 import aggregates as v3_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -61,11 +62,14 @@
         resp, body = self.put('os-aggregates/%s' % str(aggregate_id), put_body)
 
         body = json.loads(body)
+        self.validate_response(schema.update_aggregate, resp, body)
         return resp, body['aggregate']
 
     def delete_aggregate(self, aggregate_id):
         """Deletes the given aggregate."""
-        return self.delete("os-aggregates/%s" % str(aggregate_id))
+        resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+        self.validate_response(v3_schema.delete_aggregate, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         try:
diff --git a/tempest/services/compute/v3/json/availability_zone_client.py b/tempest/services/compute/v3/json/availability_zone_client.py
index bad2de9..bf74e68 100644
--- a/tempest/services/compute/v3/json/availability_zone_client.py
+++ b/tempest/services/compute/v3/json/availability_zone_client.py
@@ -15,6 +15,7 @@
 
 import json
 
+from tempest.api_schema.compute.v3 import availability_zone as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,9 +32,12 @@
     def get_availability_zone_list(self):
         resp, body = self.get('os-availability-zone')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list, resp, body)
         return resp, body['availability_zone_info']
 
     def get_availability_zone_list_detail(self):
         resp, body = self.get('os-availability-zone/detail')
         body = json.loads(body)
+        self.validate_response(schema.get_availability_zone_list_detail, resp,
+                               body)
         return resp, body['availability_zone_info']
diff --git a/tempest/services/compute/v3/json/certificates_client.py b/tempest/services/compute/v3/json/certificates_client.py
index f8beeb9..be9b3c3 100644
--- a/tempest/services/compute/v3/json/certificates_client.py
+++ b/tempest/services/compute/v3/json/certificates_client.py
@@ -15,6 +15,8 @@
 
 import json
 
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v3 import certificates as v3schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -31,6 +33,7 @@
         url = "os-certificates/%s" % (id)
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.get_certificate, resp, body)
         return resp, body['certificate']
 
     def create_certificate(self):
@@ -38,4 +41,5 @@
         url = "os-certificates"
         resp, body = self.post(url, None)
         body = json.loads(body)
+        self.validate_response(v3schema.create_certificate, resp, body)
         return resp, body['certificate']
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index 189fe3f..5afab5a 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -18,6 +18,8 @@
 
 from tempest.api_schema.compute import flavors as common_schema
 from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute import flavors_extra_specs \
+    as schema_extra_specs
 from tempest.api_schema.compute.v3 import flavors as v3schema
 from tempest.common import rest_client
 from tempest import config
@@ -54,6 +56,7 @@
     def get_flavor_details(self, flavor_id):
         resp, body = self.get("flavors/%s" % str(flavor_id))
         body = json.loads(body)
+        self.validate_response(v3schema.get_flavor_details, resp, body)
         return resp, body['flavor']
 
     def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -77,11 +80,14 @@
         resp, body = self.post('flavors', post_body)
 
         body = json.loads(body)
+        self.validate_response(v3schema.create_flavor_details, resp, body)
         return resp, body['flavor']
 
     def delete_flavor(self, flavor_id):
         """Deletes the given flavor."""
-        return self.delete("flavors/%s" % str(flavor_id))
+        resp, body = self.delete("flavors/{0}".format(flavor_id))
+        self.validate_response(v3schema.delete_flavor, resp, body)
+        return resp, body
 
     def is_resource_deleted(self, id):
         # Did not use get_flavor_details(id) for verification as it gives
@@ -99,12 +105,15 @@
         resp, body = self.post('flavors/%s/flavor-extra-specs' % flavor_id,
                                post_body)
         body = json.loads(body)
+        self.validate_response(v3schema.set_flavor_extra_specs, resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec(self, flavor_id):
         """Gets extra Specs details of the mentioned flavor."""
         resp, body = self.get('flavors/%s/flavor-extra-specs' % flavor_id)
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs,
+                               resp, body)
         return resp, body['extra_specs']
 
     def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -112,6 +121,8 @@
         resp, body = self.get('flavors/%s/flavor-extra-specs/%s' %
                               (str(flavor_id), key))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
@@ -119,12 +130,16 @@
         resp, body = self.put('flavors/%s/flavor-extra-specs/%s' %
                               (flavor_id, key), json.dumps(kwargs))
         body = json.loads(body)
+        self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+                               resp, body)
         return resp, body
 
     def unset_flavor_extra_spec(self, flavor_id, key):
         """Unsets extra Specs from the mentioned flavor."""
-        return self.delete('flavors/%s/flavor-extra-specs/%s' %
-                           (str(flavor_id), key))
+        resp, body = self.delete('flavors/%s/flavor-extra-specs/%s' %
+                                 (str(flavor_id), key))
+        self.validate_response(v3schema.unset_flavor_extra_specs, resp, body)
+        return resp, body
 
     def list_flavor_access(self, flavor_id):
         """Gets flavor access information given the flavor id."""
diff --git a/tempest/services/compute/v3/json/interfaces_client.py b/tempest/services/compute/v3/json/interfaces_client.py
index b45426c..25c8db7 100644
--- a/tempest/services/compute/v3/json/interfaces_client.py
+++ b/tempest/services/compute/v3/json/interfaces_client.py
@@ -16,6 +16,7 @@
 import json
 import time
 
+from tempest.api_schema.compute import interfaces as common_schema
 from tempest.common import rest_client
 from tempest import config
 from tempest import exceptions
@@ -59,6 +60,7 @@
         resp, body =\
             self.delete('servers/%s/os-attach-interfaces/%s' % (server,
                                                                 port_id))
+        self.validate_response(common_schema.delete_interface, resp, body)
         return resp, body
 
     def wait_for_interface_status(self, server, port_id, status):
diff --git a/tempest/services/compute/v3/json/migration_client.py b/tempest/services/compute/v3/json/migration_client.py
index efd39b7..c821567 100644
--- a/tempest/services/compute/v3/json/migration_client.py
+++ b/tempest/services/compute/v3/json/migration_client.py
@@ -12,8 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import json
 import urllib
 
+from tempest.api_schema.compute import migrations as schema
 from tempest.common import rest_client
 from tempest import config
 
@@ -34,4 +36,6 @@
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
-        return resp, self._parse_resp(body)
+        body = json.loads(body)
+        self.validate_response(schema.list_migrations, resp, body)
+        return resp, body['migrations']
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index caeb315..a6d49f1 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -126,6 +126,7 @@
         post_body = json.dumps({'server': post_body})
         resp, body = self.put("servers/%s" % str(server_id), post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_server, resp, body)
         return resp, body['server']
 
     def get_server(self, server_id):
@@ -136,7 +137,9 @@
 
     def delete_server(self, server_id):
         """Deletes the given server."""
-        return self.delete("servers/%s" % str(server_id))
+        resp, body = self.delete("servers/%s" % str(server_id))
+        self.validate_response(common_schema.delete_server, resp, body)
+        return resp, body
 
     def list_servers(self, params=None):
         """Lists all servers for a user."""
@@ -282,6 +285,7 @@
     def list_server_metadata(self, server_id):
         resp, body = self.get("servers/%s/metadata" % str(server_id))
         body = json.loads(body)
+        self.validate_response(common_schema.list_server_metadata, resp, body)
         return resp, body['metadata']
 
     def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -292,6 +296,7 @@
         resp, body = self.put('servers/%s/metadata' % str(server_id),
                               post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.set_server_metadata, resp, body)
         return resp, body['metadata']
 
     def update_server_metadata(self, server_id, meta):
@@ -304,6 +309,8 @@
     def get_server_metadata_item(self, server_id, key):
         resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
         body = json.loads(body)
+        self.validate_response(schema.set_get_server_metadata_item,
+                               resp, body)
         return resp, body['metadata']
 
     def set_server_metadata_item(self, server_id, key, meta):
@@ -311,11 +318,15 @@
         resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
                               post_body)
         body = json.loads(body)
+        self.validate_response(schema.set_get_server_metadata_item,
+                               resp, body)
         return resp, body['metadata']
 
     def delete_server_metadata_item(self, server_id, key):
         resp, body = self.delete("servers/%s/metadata/%s" %
                                  (str(server_id), key))
+        self.validate_response(common_schema.delete_server_metadata_item,
+                               resp, body)
         return resp, body
 
     def stop(self, server_id, **kwargs):
@@ -326,12 +337,17 @@
 
     def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
         """Attaches a volume to a server instance."""
-        return self.action(server_id, 'attach', None, volume_id=volume_id,
-                           device=device)
+        resp, body = self.action(server_id, 'attach', None,
+                                 volume_id=volume_id, device=device)
+        self.validate_response(schema.attach_detach_volume, resp, body)
+        return resp, body
 
     def detach_volume(self, server_id, volume_id):
         """Detaches a volume from a server instance."""
-        return self.action(server_id, 'detach', None, volume_id=volume_id)
+        resp, body = self.action(server_id, 'detach', None,
+                                 volume_id=volume_id)
+        self.validate_response(schema.attach_detach_volume, resp, body)
+        return resp, body
 
     def live_migrate_server(self, server_id, dest_host, use_block_migration):
         """This should be called with administrator privileges ."""
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index e96b44b..c7b5f93 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -32,7 +32,6 @@
 
         It returns pair: resp and parsed resource(s) body.
         """
-
         resp, body = req_fun(uri, headers={
             'Content-Type': 'application/json'
         }, *args, **kwargs)
@@ -48,7 +47,7 @@
     def get_node_group_template(self, tmpl_id):
         """Returns the details of a single node group template."""
 
-        uri = "node-group-templates/%s" % tmpl_id
+        uri = 'node-group-templates/%s' % tmpl_id
         return self._request_and_parse(self.get, uri, 'node_group_template')
 
     def create_node_group_template(self, name, plugin_name, hadoop_version,
@@ -59,7 +58,7 @@
         It supports passing additional params using kwargs and returns created
         object.
         """
-        uri = "node-group-templates"
+        uri = 'node-group-templates'
         body = kwargs.copy()
         body.update({
             'name': name,
@@ -75,7 +74,7 @@
     def delete_node_group_template(self, tmpl_id):
         """Deletes the specified node group template by id."""
 
-        uri = "node-group-templates/%s" % tmpl_id
+        uri = 'node-group-templates/%s' % tmpl_id
         return self.delete(uri)
 
     def list_plugins(self):
@@ -87,7 +86,45 @@
     def get_plugin(self, plugin_name, plugin_version=None):
         """Returns the details of a single plugin."""
 
-        uri = "plugins/%s" % plugin_name
+        uri = 'plugins/%s' % plugin_name
         if plugin_version:
             uri += '/%s' % plugin_version
         return self._request_and_parse(self.get, uri, 'plugin')
+
+    def list_cluster_templates(self):
+        """List all cluster templates for a user."""
+
+        uri = 'cluster-templates'
+        return self._request_and_parse(self.get, uri, 'cluster_templates')
+
+    def get_cluster_template(self, tmpl_id):
+        """Returns the details of a single cluster template."""
+
+        uri = 'cluster-templates/%s' % tmpl_id
+        return self._request_and_parse(self.get, uri, 'cluster_template')
+
+    def create_cluster_template(self, name, plugin_name, hadoop_version,
+                                node_groups, cluster_configs=None,
+                                **kwargs):
+        """Creates cluster template with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object.
+        """
+        uri = 'cluster-templates'
+        body = kwargs.copy()
+        body.update({
+            'name': name,
+            'plugin_name': plugin_name,
+            'hadoop_version': hadoop_version,
+            'node_groups': node_groups,
+            'cluster_configs': cluster_configs or dict(),
+        })
+        return self._request_and_parse(self.post, uri, 'cluster_template',
+                                       body=json.dumps(body))
+
+    def delete_cluster_template(self, tmpl_id):
+        """Deletes the specified cluster template by id."""
+
+        uri = 'cluster-templates/%s' % tmpl_id
+        return self.delete(uri)
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 34c61b0..2a797b2 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -104,7 +104,7 @@
         def _list(**filters):
             uri = self.get_uri(plural_name)
             if filters:
-                uri += '?' + urllib.urlencode(filters)
+                uri += '?' + urllib.urlencode(filters, doseq=1)
             resp, body = self.get(uri)
             result = {plural_name: self.deserialize_list(body)}
             return resp, result
@@ -120,14 +120,14 @@
         return _delete
 
     def _shower(self, resource_name):
-        def _show(resource_id, field_list=[]):
-            # field_list is a sequence of two-element tuples, with the
-            # first element being 'fields'. An example:
-            # [('fields', 'id'), ('fields', 'name')]
+        def _show(resource_id, **fields):
+            # fields is a dict which key is 'fields' and value is a
+            # list of field's name. An example:
+            # {'fields': ['id', 'name']}
             plural = self.pluralize(resource_name)
             uri = '%s/%s' % (self.get_uri(plural), resource_id)
-            if field_list:
-                uri += '?' + urllib.urlencode(field_list)
+            if fields:
+                uri += '?' + urllib.urlencode(fields, doseq=1)
             resp, body = self.get(uri)
             body = self.deserialize_single(body)
             return resp, body
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 53a3325..f3f4eb6 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -77,11 +77,16 @@
         resp, body = self.head(url)
         return resp, body
 
-    def get_object(self, container, object_name):
+    def get_object(self, container, object_name, metadata=None):
         """Retrieve object's data."""
 
+        headers = {}
+        if metadata:
+            for key in metadata:
+                headers[str(key)] = metadata[key]
+
         url = "{0}/{1}".format(container, object_name)
-        resp, body = self.get(url)
+        resp, body = self.get(url, headers=headers)
         return resp, body
 
     def copy_object_in_same_container(self, container, src_object_name,
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/json/admin/volume_services_client.py
new file mode 100644
index 0000000..d43c04a
--- /dev/null
+++ b/tempest/services/volume/json/admin/volume_services_client.py
@@ -0,0 +1,38 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(VolumesServicesClientJSON, self).__init__(auth_provider)
+        self.service = CONF.volume.catalog_type
+
+    def list_services(self, params=None):
+        url = 'os-services'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['services']
diff --git a/tempest/services/volume/xml/admin/volume_services_client.py b/tempest/services/volume/xml/admin/volume_services_client.py
new file mode 100644
index 0000000..7bad16d
--- /dev/null
+++ b/tempest/services/volume/xml/admin/volume_services_client.py
@@ -0,0 +1,42 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common import rest_client
+from tempest.common import xml_utils
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientXML(rest_client.RestClient):
+    TYPE = "xml"
+
+    def __init__(self, auth_provider):
+        super(VolumesServicesClientXML, self).__init__(auth_provider)
+        self.service = CONF.volume.catalog_type
+
+    def list_services(self, params=None):
+        url = 'os-services'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        node = etree.fromstring(body)
+        body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
+        return resp, body
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index c330165..478cd07 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -32,8 +32,6 @@
                                 stderr=subprocess.PIPE)
         proc.wait()
         success = proc.returncode == 0
-        self.logger.info("%s(%s): %s", self.server_id, self.floating['ip'],
-                         "pong!" if success else "no pong :(")
         return success
 
     def tcp_connect_scan(self, addr, port):
@@ -58,11 +56,17 @@
             raise RuntimeError("Cannot connect to the ssh port.")
 
     def check_icmp_echo(self):
+        self.logger.info("%s(%s): Pinging..",
+                         self.server_id, self.floating['ip'])
+
         def func():
             return self.ping_ip_address(self.floating['ip'])
         if not tempest.test.call_until_true(func, self.check_timeout,
                                             self.check_interval):
-            raise RuntimeError("Cannot ping the machine.")
+            raise RuntimeError("%s(%s): Cannot ping the machine.",
+                               self.server_id, self.floating['ip'])
+        self.logger.info("%s(%s): pong :)",
+                         self.server_id, self.floating['ip'])
 
     def _create_vm(self):
         self.name = name = data_utils.rand_name("instance")
@@ -170,6 +174,8 @@
             self._create_vm()
         if self.reboot:
             self.manager.servers_client.reboot(self.server_id, 'HARD')
+            self.manager.servers_client.wait_for_server_status(self.server_id,
+                                                               'ACTIVE')
 
         self.run_core()
 
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
new file mode 100644
index 0000000..1bc3b06
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -0,0 +1,232 @@
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common.utils import data_utils
+from tempest.common.utils.linux import remote_client
+from tempest import config
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+import re
+CONF = config.CONF
+
+
+class VolumeVerifyStress(stressaction.StressAction):
+
+    def _create_keypair(self):
+        keyname = data_utils.rand_name("key")
+        resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
+        assert(resp.status == 200)
+
+    def _delete_keypair(self):
+        resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
+        assert(resp.status == 202)
+
+    def _create_vm(self):
+        self.name = name = data_utils.rand_name("instance")
+        servers_client = self.manager.servers_client
+        self.logger.info("creating %s" % name)
+        vm_args = self.vm_extra_args.copy()
+        vm_args['security_groups'] = [self.sec_grp]
+        vm_args['key_name'] = self.key['name']
+        resp, server = servers_client.create_server(name, self.image,
+                                                    self.flavor,
+                                                    **vm_args)
+        self.server_id = server['id']
+        assert(resp.status == 202)
+        self.manager.servers_client.wait_for_server_status(self.server_id,
+                                                           'ACTIVE')
+
+    def _destroy_vm(self):
+        self.logger.info("deleting server: %s" % self.server_id)
+        resp, _ = self.manager.servers_client.delete_server(self.server_id)
+        assert(resp.status == 204)  # It cannot be 204 if I had to wait..
+        self.manager.servers_client.wait_for_server_termination(self.server_id)
+        self.logger.info("deleted server: %s" % self.server_id)
+
+    def _create_sec_group(self):
+        sec_grp_cli = self.manager.security_groups_client
+        s_name = data_utils.rand_name('sec_grp-')
+        s_description = data_utils.rand_name('desc-')
+        _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+                                                            s_description)
+        create_rule = sec_grp_cli.create_security_group_rule
+        create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+        create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+
+    def _destroy_sec_grp(self):
+        sec_grp_cli = self.manager.security_groups_client
+        sec_grp_cli.delete_security_group(self.sec_grp['id'])
+
+    def _create_floating_ip(self):
+        floating_cli = self.manager.floating_ips_client
+        _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+    def _destroy_floating_ip(self):
+        cli = self.manager.floating_ips_client
+        cli.delete_floating_ip(self.floating['id'])
+        cli.wait_for_resource_deletion(self.floating['id'])
+        self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+    def _create_volume(self):
+        name = data_utils.rand_name("volume")
+        self.logger.info("creating volume: %s" % name)
+        volumes_client = self.manager.volumes_client
+        resp, self.volume = volumes_client.create_volume(size=1,
+                                                         display_name=
+                                                         name)
+        assert(resp.status == 200)
+        volumes_client.wait_for_volume_status(self.volume['id'],
+                                              'available')
+        self.logger.info("created volume: %s" % self.volume['id'])
+
+    def _delete_volume(self):
+        self.logger.info("deleting volume: %s" % self.volume['id'])
+        volumes_client = self.manager.volumes_client
+        resp, _ = volumes_client.delete_volume(self.volume['id'])
+        assert(resp.status == 202)
+        volumes_client.wait_for_resource_deletion(self.volume['id'])
+        self.logger.info("deleted volume: %s" % self.volume['id'])
+
+    def _wait_disassociate(self):
+        cli = self.manager.floating_ips_client
+
+        def func():
+            _, floating = cli.get_floating_ip_details(self.floating['id'])
+            return floating['instance_id'] is None
+
+        if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
+                                            CONF.compute.build_interval):
+            raise RuntimeError("IP disassociate timeout!")
+
+    def new_server_ops(self):
+        self._create_vm()
+        cli = self.manager.floating_ips_client
+        cli.associate_floating_ip_to_server(self.floating['ip'],
+                                            self.server_id)
+        if self.ssh_test_before_attach and self.enable_ssh_verify:
+            self.logger.info("Scanning for block devices via ssh on %s"
+                             % self.server_id)
+            self.part_wait(self.detach_match_count)
+
+    def setUp(self, **kwargs):
+        """Note able configuration combinations:
+            Closest options to the test_stamp_pattern:
+                new_server = True
+                new_volume = True
+                enable_ssh_verify = True
+                ssh_test_before_attach = False
+            Just attaching:
+                new_server = False
+                new_volume = False
+                enable_ssh_verify = True
+                ssh_test_before_attach = True
+            Mostly API load by repeated attachment:
+                new_server = False
+                new_volume = False
+                enable_ssh_verify = False
+                ssh_test_before_attach = False
+            Minimal Nova load, but cinder load not decreased:
+                new_server = False
+                new_volume = True
+                enable_ssh_verify = True
+                ssh_test_before_attach = True
+        """
+        self.image = CONF.compute.image_ref
+        self.flavor = CONF.compute.flavor_ref
+        self.vm_extra_args = kwargs.get('vm_extra_args', {})
+        self.floating_pool = kwargs.get('floating_pool', None)
+        self.new_volume = kwargs.get('new_volume', True)
+        self.new_server = kwargs.get('new_server', False)
+        self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
+        self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
+                                                 False)
+        self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
+        self.detach_match_count = kwargs.get('detach_match_count', 1)
+        self.attach_match_count = kwargs.get('attach_match_count', 2)
+        self.part_name = kwargs.get('part_name', '/dev/vdc')
+
+        self._create_floating_ip()
+        self._create_sec_group()
+        self._create_keypair()
+        private_key = self.key['private_key']
+        username = CONF.compute.image_ssh_user
+        self.remote_client = remote_client.RemoteClient(self.floating['ip'],
+                                                        username,
+                                                        pkey=private_key)
+        if not self.new_volume:
+            self._create_volume()
+        if not self.new_server:
+            self.new_server_ops()
+
+    # now we just test is number of partition increased or decrised
+    def part_wait(self, num_match):
+        def _part_state():
+            self.partitions = self.remote_client.get_partitions().split('\n')
+            matching = 0
+            for part_line in self.partitions[1:]:
+                if self.part_line_re.match(part_line):
+                    matching += 1
+            return matching == num_match
+        if tempest.test.call_until_true(_part_state,
+                                        CONF.compute.build_timeout,
+                                        CONF.compute.build_interval):
+            return
+        else:
+            raise RuntimeError("Unexpected partitions: %s",
+                               str(self.partitions))
+
+    def run(self):
+        if self.new_server:
+            self.new_server_ops()
+        if self.new_volume:
+            self._create_volume()
+        servers_client = self.manager.servers_client
+        self.logger.info("attach volume (%s) to vm %s" %
+                        (self.volume['id'], self.server_id))
+        resp, body = servers_client.attach_volume(self.server_id,
+                                                  self.volume['id'],
+                                                  self.part_name)
+        assert(resp.status == 200)
+        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                           'in-use')
+        if self.enable_ssh_verify:
+            self.logger.info("Scanning for new block device on %s"
+                             % self.server_id)
+            self.part_wait(self.attach_match_count)
+
+        resp, body = servers_client.detach_volume(self.server_id,
+                                                  self.volume['id'])
+        assert(resp.status == 202)
+        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                           'available')
+        if self.enable_ssh_verify:
+            self.logger.info("Scanning for block device disapperance on %s"
+                             % self.server_id)
+            self.part_wait(self.detach_match_count)
+        if self.new_volume:
+            self._delete_volume()
+        if self.new_server:
+            self._destroy_vm()
+
+    def tearDown(self):
+        cli = self.manager.floating_ips_client
+        cli.disassociate_floating_ip_from_server(self.floating['ip'],
+                                                 self.server_id)
+        self._wait_disassociate()
+        if not self.new_server:
+            self._destroy_vm()
+        self._delete_keypair()
+        self._destroy_floating_ip()
+        self._destroy_sec_grp()
+        if not self.new_volume:
+            self._delete_volume()
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 9660081..517cfd5 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -80,17 +80,23 @@
     return ret
 
 
-def sigchld_handler(signal, frame):
+def sigchld_handler(signalnum, frame):
     """
     Signal handler (only active if stop_on_error is True).
     """
-    terminate_all_processes()
+    for process in processes:
+        if (not process['process'].is_alive() and
+                process['process'].exitcode != 0):
+            signal.signal(signalnum, signal.SIG_DFL)
+            terminate_all_processes()
+            break
 
 
 def terminate_all_processes(check_interval=20):
     """
     Goes through the process list and terminates all child processes.
     """
+    LOG.info("Stopping all processes.")
     for process in processes:
         if process['process'].is_alive():
             try:
@@ -174,34 +180,39 @@
         signal.signal(signal.SIGCHLD, sigchld_handler)
     end_time = time.time() + duration
     had_errors = False
-    while True:
-        if max_runs is None:
-            remaining = end_time - time.time()
-            if remaining <= 0:
-                break
-        else:
-            remaining = log_check_interval
-            all_proc_term = True
-            for process in processes:
-                if process['process'].is_alive():
-                    all_proc_term = False
+    try:
+        while True:
+            if max_runs is None:
+                remaining = end_time - time.time()
+                if remaining <= 0:
                     break
-            if all_proc_term:
-                break
-
-        time.sleep(min(remaining, log_check_interval))
-        if stop_on_error:
-            for process in processes:
-                if process['statistic']['fails'] > 0:
+            else:
+                remaining = log_check_interval
+                all_proc_term = True
+                for process in processes:
+                    if process['process'].is_alive():
+                        all_proc_term = False
+                        break
+                if all_proc_term:
                     break
 
-        if not logfiles:
-            continue
-        if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
-                              stop_on_error):
-            had_errors = True
-            break
+            time.sleep(min(remaining, log_check_interval))
+            if stop_on_error:
+                if any([True for proc in processes
+                        if proc['statistic']['fails'] > 0]):
+                    break
 
+            if not logfiles:
+                continue
+            if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
+                                  stop_on_error):
+                had_errors = True
+                break
+    except KeyboardInterrupt:
+        LOG.warning("Interrupted, going to print statistics and exit ...")
+
+    if stop_on_error:
+        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
     terminate_all_processes()
 
     sum_fails = 0
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
new file mode 100644
index 0000000..731f5ed
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -0,0 +1,11 @@
+[{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
+  "threads": 1,
+  "use_admin": false,
+  "use_isolated_tenants": false,
+  "kwargs": {"vm_extra_args": {},
+             "new_volume": true,
+             "new_server": false,
+             "ssh_test_before_attach": false,
+             "enable_ssh_verify": true}
+}
+]
diff --git a/tempest/test.py b/tempest/test.py
index 8df405c..254fffa 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -75,12 +75,16 @@
             try:
                 f(cls)
             except Exception as se:
+                etype, value, trace = sys.exc_info()
                 LOG.exception("setUpClass failed: %s" % se)
                 try:
                     cls.tearDownClass()
                 except Exception as te:
                     LOG.exception("tearDownClass failed: %s" % te)
-                raise se
+                try:
+                    raise etype(value), None, trace
+                finally:
+                    del trace  # for avoiding circular refs
 
     return decorator
 
diff --git a/tempest/tests/cmd/__init__.py b/tempest/tests/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/cmd/__init__.py
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
new file mode 100644
index 0000000..40caf30
--- /dev/null
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -0,0 +1,397 @@
+# Copyright 2014 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+import mock
+
+from tempest.cmd import verify_tempest_config
+from tempest import config
+from tempest.openstack.common.fixture import mockpatch
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestGetAPIVersions(base.TestCase):
+
+    def test_url_grab_versioned_nova_nossl(self):
+        base_url = 'http://127.0.0.1:8774/v2/'
+        endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+        self.assertEqual('http://127.0.0.1:8774', endpoint)
+
+    def test_url_grab_versioned_nova_ssl(self):
+        base_url = 'https://127.0.0.1:8774/v3/'
+        endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+        self.assertEqual('https://127.0.0.1:8774', endpoint)
+
+
+class TestDiscovery(base.TestCase):
+
+    def setUp(self):
+        super(TestDiscovery, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def test_get_keystone_api_versions(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
+        self.assertIn('v2.0', versions)
+        self.assertIn('v3.0', versions)
+
+    def test_get_cinder_api_versions(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
+        self.assertIn('v1.0', versions)
+        self.assertIn('v2.0', versions)
+
+    def test_get_nova_versions(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
+        self.assertIn('v2.0', versions)
+        self.assertIn('v3.0', versions)
+
+    def test_verify_keystone_api_versions_no_v3(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v3',
+                                           'identity_feature_enabled',
+                                           False, True)
+
+    def test_verify_keystone_api_versions_no_v2(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v2',
+                                           'identity_feature_enabled',
+                                           False, True)
+
+    def test_verify_cinder_api_versions_no_v2(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v1.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
+                                           False, True)
+
+    def test_verify_cinder_api_versions_no_v1(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v2.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
+                                           False, True)
+
+    def test_verify_nova_versions(self):
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint',
+            return_value='http://fake_endpoint:5000'))
+        fake_resp = {'versions': [{'id': 'v2.0'}]}
+        fake_resp = json.dumps(fake_resp)
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.RAW_HTTP, 'request',
+            return_value=(None, fake_resp)))
+        fake_os = mock.MagicMock()
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_nova_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v3', 'compute_feature_enabled',
+                                           False, True)
+
+    def test_verify_glance_version_no_v2_with_v1_1(self):
+        def fake_get_versions():
+            return (None, ['v1.1'])
+        fake_os = mock.MagicMock()
+        fake_os.image_client.get_versions = fake_get_versions
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_glance_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+                                           False, True)
+
+    def test_verify_glance_version_no_v2_with_v1_0(self):
+        def fake_get_versions():
+            return (None, ['v1.0'])
+        fake_os = mock.MagicMock()
+        fake_os.image_client.get_versions = fake_get_versions
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_glance_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+                                           False, True)
+
+    def test_verify_glance_version_no_v1(self):
+        def fake_get_versions():
+            return (None, ['v2.0'])
+        fake_os = mock.MagicMock()
+        fake_os.image_client.get_versions = fake_get_versions
+        with mock.patch.object(verify_tempest_config,
+                               'print_and_or_update') as print_mock:
+            verify_tempest_config.verify_glance_api_versions(fake_os, True)
+        print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
+                                           False, True)
+
+    def test_verify_extensions_neutron(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'alias': 'fake1'},
+                                          {'alias': 'fake2'},
+                                          {'alias': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.network_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'neutron', {})
+        self.assertIn('neutron', results)
+        self.assertIn('fake1', results['neutron'])
+        self.assertTrue(results['neutron']['fake1'])
+        self.assertIn('fake2', results['neutron'])
+        self.assertTrue(results['neutron']['fake2'])
+        self.assertIn('fake3', results['neutron'])
+        self.assertFalse(results['neutron']['fake3'])
+        self.assertIn('not_fake', results['neutron'])
+        self.assertFalse(results['neutron']['not_fake'])
+
+    def test_verify_extensions_neutron_all(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'alias': 'fake1'},
+                                          {'alias': 'fake2'},
+                                          {'alias': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.network_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'neutron', {})
+        self.assertIn('neutron', results)
+        self.assertIn('extensions', results['neutron'])
+        self.assertEqual(['fake1', 'fake2', 'not_fake'],
+                         results['neutron']['extensions'])
+
+    def test_verify_extensions_cinder(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'name': 'fake1'},
+                                          {'name': 'fake2'},
+                                          {'name': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'cinder', {})
+        self.assertIn('cinder', results)
+        self.assertIn('fake1', results['cinder'])
+        self.assertTrue(results['cinder']['fake1'])
+        self.assertIn('fake2', results['cinder'])
+        self.assertTrue(results['cinder']['fake2'])
+        self.assertIn('fake3', results['cinder'])
+        self.assertFalse(results['cinder']['fake3'])
+        self.assertIn('not_fake', results['cinder'])
+        self.assertFalse(results['cinder']['not_fake'])
+
+    def test_verify_extensions_cinder_all(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'name': 'fake1'},
+                                          {'name': 'fake2'},
+                                          {'name': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'cinder', {})
+        self.assertIn('cinder', results)
+        self.assertIn('extensions', results['cinder'])
+        self.assertEqual(['fake1', 'fake2', 'not_fake'],
+                         results['cinder']['extensions'])
+
+    def test_verify_extensions_nova(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'name': 'fake1'},
+                                          {'name': 'fake2'},
+                                          {'name': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.extensions_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'nova', {})
+        self.assertIn('nova', results)
+        self.assertIn('fake1', results['nova'])
+        self.assertTrue(results['nova']['fake1'])
+        self.assertIn('fake2', results['nova'])
+        self.assertTrue(results['nova']['fake2'])
+        self.assertIn('fake3', results['nova'])
+        self.assertFalse(results['nova']['fake3'])
+        self.assertIn('not_fake', results['nova'])
+        self.assertFalse(results['nova']['not_fake'])
+
+    def test_verify_extensions_nova_all(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'name': 'fake1'},
+                                          {'name': 'fake2'},
+                                          {'name': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.extensions_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'nova', {})
+        self.assertIn('nova', results)
+        self.assertIn('extensions', results['nova'])
+        self.assertEqual(['fake1', 'fake2', 'not_fake'],
+                         results['nova']['extensions'])
+
+    def test_verify_extensions_nova_v3(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'name': 'fake1'},
+                                          {'name': 'fake2'},
+                                          {'name': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'nova_v3', {})
+        self.assertIn('nova_v3', results)
+        self.assertIn('fake1', results['nova_v3'])
+        self.assertTrue(results['nova_v3']['fake1'])
+        self.assertIn('fake2', results['nova_v3'])
+        self.assertTrue(results['nova_v3']['fake2'])
+        self.assertIn('fake3', results['nova_v3'])
+        self.assertFalse(results['nova_v3']['fake3'])
+        self.assertIn('not_fake', results['nova_v3'])
+        self.assertFalse(results['nova_v3']['not_fake'])
+
+    def test_verify_extensions_nova_v3_all(self):
+        def fake_list_extensions():
+            return (None, {'extensions': [{'name': 'fake1'},
+                                          {'name': 'fake2'},
+                                          {'name': 'not_fake'}]})
+        fake_os = mock.MagicMock()
+        fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'nova_v3', {})
+        self.assertIn('nova_v3', results)
+        self.assertIn('extensions', results['nova_v3'])
+        self.assertEqual(['fake1', 'fake2', 'not_fake'],
+                         results['nova_v3']['extensions'])
+
+    def test_verify_extensions_swift(self):
+        def fake_list_extensions():
+            return (None, {'fake1': 'metadata',
+                           'fake2': 'metadata',
+                           'not_fake': 'metadata',
+                           'swift': 'metadata'})
+        fake_os = mock.MagicMock()
+        fake_os.account_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['fake1', 'fake2', 'fake3'])))
+        results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
+        self.assertIn('swift', results)
+        self.assertIn('fake1', results['swift'])
+        self.assertTrue(results['swift']['fake1'])
+        self.assertIn('fake2', results['swift'])
+        self.assertTrue(results['swift']['fake2'])
+        self.assertIn('fake3', results['swift'])
+        self.assertFalse(results['swift']['fake3'])
+        self.assertIn('not_fake', results['swift'])
+        self.assertFalse(results['swift']['not_fake'])
+
+    def test_verify_extensions_swift_all(self):
+        def fake_list_extensions():
+            return (None, {'fake1': 'metadata',
+                           'fake2': 'metadata',
+                           'not_fake': 'metadata',
+                           'swift': 'metadata'})
+        fake_os = mock.MagicMock()
+        fake_os.account_client.list_extensions = fake_list_extensions
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, 'get_enabled_extensions',
+            return_value=(['all'])))
+        results = verify_tempest_config.verify_extensions(fake_os,
+                                                          'swift', {})
+        self.assertIn('swift', results)
+        self.assertIn('extensions', results['swift'])
+        self.assertEqual(['not_fake', 'fake1', 'fake2'],
+                         results['swift']['extensions'])
diff --git a/tempest/tests/fake_auth_provider.py b/tempest/tests/fake_auth_provider.py
index bc68d26..44c331e 100644
--- a/tempest/tests/fake_auth_provider.py
+++ b/tempest/tests/fake_auth_provider.py
@@ -13,6 +13,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.tests import fake_credentials
+
+
+def get_default_credentials(credential_type, fill_in=True):
+    return fake_credentials.FakeCredentials()
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+    return fake_credentials.FakeCredentials()
+
 
 class FakeAuthProvider(object):
 
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4676cbd..4bed0c2 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -45,6 +45,16 @@
             os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
         self.conf.set_default('lock_path',
                               str(os.environ.get('OS_TEST_LOCK_PATH')))
+        self.conf.set_default('auth_version', 'v2', group='identity')
+        for config_option in ['username', 'password', 'tenant_name']:
+            # Identity group items
+            for prefix in ['', 'alt_', 'admin_']:
+                self.conf.set_default(prefix + config_option,
+                                      'fake_' + config_option,
+                                      group='identity')
+            # Compute Admin group items
+            self.conf.set_default(config_option, 'fake_' + config_option,
+                                  group='compute-admin')
 
 
 class FakePrivate(config.TempestConfigPrivate):
diff --git a/tempest/tests/fake_credentials.py b/tempest/tests/fake_credentials.py
new file mode 100644
index 0000000..48f67d2
--- /dev/null
+++ b/tempest/tests/fake_credentials.py
@@ -0,0 +1,62 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import auth
+
+
+class FakeCredentials(auth.Credentials):
+
+    def is_valid(self):
+        return True
+
+
+class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
+
+    def __init__(self):
+        creds = dict(
+            username='fake_username',
+            password='fake_password',
+            tenant_name='fake_tenant_name'
+        )
+        super(FakeKeystoneV2Credentials, self).__init__(**creds)
+
+
+class FakeKeystoneV3Credentials(auth.KeystoneV3Credentials):
+    """
+    Fake credentials suitable for the Keystone Identity V3 API
+    """
+
+    def __init__(self):
+        creds = dict(
+            username='fake_username',
+            password='fake_password',
+            user_domain_name='fake_domain_name',
+            project_name='fake_tenant_name'
+        )
+        super(FakeKeystoneV3Credentials, self).__init__(**creds)
+
+
+class FakeKeystoneV3DomainCredentials(auth.KeystoneV3Credentials):
+    """
+    Fake credentials suitable for the Keystone Identity V3 API, with no scope
+    """
+
+    def __init__(self):
+        creds = dict(
+            username='fake_username',
+            password='fake_password',
+            user_domain_name='fake_domain_name'
+        )
+        super(FakeKeystoneV3DomainCredentials, self).__init__(**creds)
diff --git a/tempest/tests/fake_http.py b/tempest/tests/fake_http.py
index a09d5ba..7b878af 100644
--- a/tempest/tests/fake_http.py
+++ b/tempest/tests/fake_http.py
@@ -12,6 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
 import httplib2
 
 
@@ -44,3 +45,29 @@
         else:
             msg = "unsupported return type %s" % self.return_type
             raise TypeError(msg)
+
+
+class fake_httplib(object):
+    def __init__(self, headers, body=None,
+                 version=1.0, status=200, reason="Ok"):
+        """
+        :param headers: dict representing HTTP response headers
+        :param body: file-like object
+        :param version: HTTP Version
+        :param status: Response status code
+        :param reason: Status code related message.
+        """
+        self.body = body
+        self.status = status
+        self.reason = reason
+        self.version = version
+        self.headers = headers
+
+    def getheaders(self):
+        return copy.deepcopy(self.headers).items()
+
+    def getheader(self, key, default):
+        return self.headers.get(key, default)
+
+    def read(self, amt):
+        return self.body.read(amt)
diff --git a/tempest/tests/fake_identity.py b/tempest/tests/fake_identity.py
index 058c9c2..1900fc9 100644
--- a/tempest/tests/fake_identity.py
+++ b/tempest/tests/fake_identity.py
@@ -113,7 +113,7 @@
         "expires_at": "2020-01-01T00:00:10.000123Z",
         "project": {
             "domain": {
-                "id": "fake_id",
+                "id": "fake_domain_id",
                 "name": "fake"
             },
             "id": "project_id",
@@ -121,7 +121,7 @@
         },
         "user": {
             "domain": {
-                "id": "domain_id",
+                "id": "fake_domain_id",
                 "name": "domain_name"
             },
             "id": "fake_user_id",
diff --git a/tempest/tests/negative/test_generate_json.py b/tempest/tests/negative/test_generate_json.py
deleted file mode 100644
index e09fcdf..0000000
--- a/tempest/tests/negative/test_generate_json.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014 Deutsche Telekom AG
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.generator import negative_generator
-import tempest.test
-
-
-class TestNegativeGenerator(tempest.test.BaseTestCase):
-
-    fake_input_str = {"type": "string",
-                      "minLength": 2,
-                      "maxLength": 8,
-                      'results': {'gen_number': 404}}
-
-    fake_input_int = {"type": "integer",
-                      "maximum": 255,
-                      "minimum": 1}
-
-    fake_input_obj = {"type": "object",
-                      "properties": {"minRam": {"type": "integer"},
-                                     "diskName": {"type": "string"},
-                                     "maxRam": {"type": "integer", }
-                                     }
-                      }
-
-    def setUp(self):
-        super(TestNegativeGenerator, self).setUp()
-        self.negative = negative_generator.NegativeTestGenerator()
-
-    def _validate_result(self, data):
-        self.assertTrue(isinstance(data, list))
-        for t in data:
-            self.assertTrue(isinstance(t, tuple))
-
-    def test_generate_invalid_string(self):
-        result = self.negative.generate(self.fake_input_str)
-        self._validate_result(result)
-
-    def test_generate_invalid_integer(self):
-        result = self.negative.generate(self.fake_input_int)
-        self._validate_result(result)
-
-    def test_generate_invalid_obj(self):
-        result = self.negative.generate(self.fake_input_obj)
-        self._validate_result(result)
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index f2ed999..c77faca 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -16,7 +16,9 @@
 import jsonschema
 import mock
 
-import tempest.common.generator.base_generator as base_generator
+from tempest.common.generator import base_generator
+from tempest.common.generator import negative_generator
+from tempest.common.generator import valid_generator
 from tempest.tests import base
 
 
@@ -79,3 +81,73 @@
         self.assertRaises(jsonschema.SchemaError,
                           self.generator.validate_schema,
                           self.invalid_json_schema_desc)
+
+
+class BaseNegativeGenerator(object):
+    types = ['string', 'integer', 'object']
+
+    fake_input_str = {"type": "string",
+                      "minLength": 2,
+                      "maxLength": 8,
+                      'results': {'gen_int': 404}}
+
+    fake_input_int = {"type": "integer",
+                      "maximum": 255,
+                      "minimum": 1}
+
+    fake_input_obj = {"type": "object",
+                      "properties": {"minRam": {"type": "integer"},
+                                     "diskName": {"type": "string"},
+                                     "maxRam": {"type": "integer", }
+                                     }
+                      }
+
+    unkown_type_schema = {
+        "type": "not_defined"
+    }
+
+    def _validate_result(self, data):
+        self.assertTrue(isinstance(data, list))
+        for t in data:
+            self.assertIsInstance(t, tuple)
+            self.assertEqual(3, len(t))
+            self.assertIsInstance(t[0], str)
+
+    def test_generate_string(self):
+        result = self.generator.generate(self.fake_input_str)
+        self._validate_result(result)
+
+    def test_generate_integer(self):
+        result = self.generator.generate(self.fake_input_int)
+        self._validate_result(result)
+
+    def test_generate_obj(self):
+        result = self.generator.generate(self.fake_input_obj)
+        self._validate_result(result)
+
+    def test_generator_mandatory_functions(self):
+        for data_type in self.types:
+            self.assertIn(data_type, self.generator.types_dict)
+
+    def test_generate_with_unknown_type(self):
+        self.assertRaises(TypeError, self.generator.generate,
+                          self.unkown_type_schema)
+
+
+class TestNegativeValidGenerator(base.TestCase, BaseNegativeGenerator):
+    def setUp(self):
+        super(TestNegativeValidGenerator, self).setUp()
+        self.generator = valid_generator.ValidTestGenerator()
+
+    def test_generate_valid(self):
+        result = self.generator.generate_valid(self.fake_input_obj)
+        self.assertIn("minRam", result)
+        self.assertIsInstance(result["minRam"], int)
+        self.assertIn("diskName", result)
+        self.assertIsInstance(result["diskName"], str)
+
+
+class TestNegativeNegativeGenerator(base.TestCase, BaseNegativeGenerator):
+    def setUp(self):
+        super(TestNegativeNegativeGenerator, self).setUp()
+        self.generator = negative_generator.NegativeTestGenerator()
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 62c20e3..03333be 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -22,18 +22,16 @@
 from tempest import exceptions
 from tempest.openstack.common.fixture import mockpatch
 from tempest.tests import base
+from tempest.tests import fake_auth_provider
 from tempest.tests import fake_config
+from tempest.tests import fake_credentials
 from tempest.tests import fake_http
 from tempest.tests import fake_identity
 
 
 class BaseAuthTestsSetUp(base.TestCase):
     _auth_provider_class = None
-    credentials = {
-        'username': 'fake_user',
-        'password': 'fake_pwd',
-        'tenant_name': 'fake_tenant'
-    }
+    credentials = fake_credentials.FakeCredentials()
 
     def _auth(self, credentials, **params):
         """
@@ -47,6 +45,10 @@
         self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
         self.fake_http = fake_http.fake_httplib2(return_type=200)
         self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+        self.stubs.Set(auth, 'get_credentials',
+                       fake_auth_provider.get_credentials)
+        self.stubs.Set(auth, 'get_default_credentials',
+                       fake_auth_provider.get_default_credentials)
         self.auth_provider = self._auth(self.credentials)
 
 
@@ -58,12 +60,19 @@
     """
     _auth_provider_class = auth.AuthProvider
 
-    def test_check_credentials_is_dict(self):
-        self.assertTrue(self.auth_provider.check_credentials({}))
+    def test_check_credentials_class(self):
+        self.assertRaises(NotImplementedError,
+                          self.auth_provider.check_credentials,
+                          auth.Credentials())
 
     def test_check_credentials_bad_type(self):
         self.assertFalse(self.auth_provider.check_credentials([]))
 
+    def test_instantiate_with_dict(self):
+        # Dict credentials are only supported for backward compatibility
+        auth_provider = self._auth(credentials={})
+        self.assertIsInstance(auth_provider.credentials, auth.Credentials)
+
     def test_instantiate_with_bad_credentials_type(self):
         """
         Assure that credentials with bad type fail with TypeError
@@ -100,10 +109,15 @@
         self.assertIsNone(self.auth_provider.alt_part)
         self.assertIsNone(self.auth_provider.alt_auth_data)
 
+    def test_fill_credentials(self):
+        self.assertRaises(NotImplementedError,
+                          self.auth_provider.fill_credentials)
+
 
 class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
     _endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
     _auth_provider_class = auth.KeystoneV2AuthProvider
+    credentials = fake_credentials.FakeKeystoneV2Credentials()
 
     def setUp(self):
         super(TestKeystoneV2AuthProvider, self).setUp()
@@ -123,6 +137,13 @@
     def _get_token_from_fake_identity(self):
         return fake_identity.TOKEN
 
+    def _get_from_fake_identity(self, attr):
+        access = fake_identity.IDENTITY_V2_RESPONSE['access']
+        if attr == 'user_id':
+            return access['user']['id']
+        elif attr == 'tenant_id':
+            return access['token']['tenant']['id']
+
     def _test_request_helper(self, filters, expected):
         url, headers, body = self.auth_provider.auth_request('GET',
                                                              self.target_url,
@@ -210,16 +231,12 @@
             del cred[attr]
             self.assertFalse(self.auth_provider.check_credentials(cred))
 
-    def test_check_credentials_not_scoped_missing_tenant_name(self):
-        cred = copy.copy(self.credentials)
-        del cred['tenant_name']
-        self.assertTrue(self.auth_provider.check_credentials(cred,
-                                                             scoped=False))
-
-    def test_check_credentials_missing_tenant_name(self):
-        cred = copy.copy(self.credentials)
-        del cred['tenant_name']
-        self.assertFalse(self.auth_provider.check_credentials(cred))
+    def test_fill_credentials(self):
+        self.auth_provider.fill_credentials()
+        creds = self.auth_provider.credentials
+        for attr in ['user_id', 'tenant_id']:
+            self.assertEqual(self._get_from_fake_identity(attr),
+                             getattr(creds, attr))
 
     def _test_base_url_helper(self, expected_url, filters,
                               auth_data=None):
@@ -321,12 +338,7 @@
 class TestKeystoneV3AuthProvider(TestKeystoneV2AuthProvider):
     _endpoints = fake_identity.IDENTITY_V3_RESPONSE['token']['catalog']
     _auth_provider_class = auth.KeystoneV3AuthProvider
-    credentials = {
-        'username': 'fake_user',
-        'password': 'fake_pwd',
-        'tenant_name': 'fake_tenant',
-        'domain_name': 'fake_domain_name',
-    }
+    credentials = fake_credentials.FakeKeystoneV3Credentials()
 
     def setUp(self):
         super(TestKeystoneV3AuthProvider, self).setUp()
@@ -346,10 +358,44 @@
         access['expires_at'] = date_as_string
         return token, access
 
-    def test_check_credentials_missing_tenant_name(self):
-        cred = copy.copy(self.credentials)
-        del cred['domain_name']
-        self.assertFalse(self.auth_provider.check_credentials(cred))
+    def _get_from_fake_identity(self, attr):
+        token = fake_identity.IDENTITY_V3_RESPONSE['token']
+        if attr == 'user_id':
+            return token['user']['id']
+        elif attr == 'project_id':
+            return token['project']['id']
+        elif attr == 'user_domain_id':
+            return token['user']['domain']['id']
+        elif attr == 'project_domain_id':
+            return token['project']['domain']['id']
+
+    def test_check_credentials_missing_attribute(self):
+        # reset credentials to fresh ones
+        self.credentials.reset()
+        for attr in ['username', 'password', 'user_domain_name',
+                     'project_domain_name']:
+            cred = copy.copy(self.credentials)
+            del cred[attr]
+            self.assertFalse(self.auth_provider.check_credentials(cred),
+                             "Credentials should be invalid without %s" % attr)
+
+    def test_check_domain_credentials_missing_attribute(self):
+        # reset credentials to fresh ones
+        self.credentials.reset()
+        domain_creds = fake_credentials.FakeKeystoneV3DomainCredentials()
+        for attr in ['username', 'password', 'user_domain_name']:
+            cred = copy.copy(domain_creds)
+            del cred[attr]
+            self.assertFalse(self.auth_provider.check_credentials(cred),
+                             "Credentials should be invalid without %s" % attr)
+
+    def test_fill_credentials(self):
+        self.auth_provider.fill_credentials()
+        creds = self.auth_provider.credentials
+        for attr in ['user_id', 'project_id', 'user_domain_id',
+                     'project_domain_id']:
+            self.assertEqual(self._get_from_fake_identity(attr),
+                             getattr(creds, attr))
 
     # Overwrites v2 test
     def test_base_url_to_get_admin_endpoint(self):
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
new file mode 100644
index 0000000..9da5f92
--- /dev/null
+++ b/tempest/tests/test_credentials.py
@@ -0,0 +1,229 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from oslo.config import cfg
+
+from tempest import auth
+from tempest.common import http
+from tempest.common import tempest_fixtures as fixtures
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
+
+
+class CredentialsTests(base.TestCase):
+    attributes = {}
+    credentials_class = auth.Credentials
+
+    def _get_credentials(self, attributes=None):
+        if attributes is None:
+            attributes = self.attributes
+        return self.credentials_class(**attributes)
+
+    def setUp(self):
+        super(CredentialsTests, self).setUp()
+        self.fake_http = fake_http.fake_httplib2(return_type=200)
+        self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+        self.useFixture(fake_config.ConfigFixture())
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def test_create(self):
+        creds = self._get_credentials()
+        self.assertEqual(self.attributes, creds._initial)
+
+    def test_create_invalid_attr(self):
+        self.assertRaises(exceptions.InvalidCredentials,
+                          self._get_credentials,
+                          attributes=dict(invalid='fake'))
+
+    def test_default(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        for ctype in self.credentials_class.TYPES:
+            self.assertRaises(NotImplementedError,
+                              self.credentials_class.get_default,
+                              credentials_type=ctype)
+
+    def test_invalid_default(self):
+        self.assertRaises(exceptions.InvalidCredentials,
+                          auth.Credentials.get_default,
+                          credentials_type='invalid_type')
+
+    def test_is_valid(self):
+        creds = self._get_credentials()
+        self.assertRaises(NotImplementedError, creds.is_valid)
+
+
+class KeystoneV2CredentialsTests(CredentialsTests):
+    attributes = {
+        'username': 'fake_username',
+        'password': 'fake_password',
+        'tenant_name': 'fake_tenant_name'
+    }
+
+    identity_response = fake_identity._fake_v2_response
+    credentials_class = auth.KeystoneV2Credentials
+
+    def setUp(self):
+        super(KeystoneV2CredentialsTests, self).setUp()
+        self.stubs.Set(http.ClosingHttp, 'request', self.identity_response)
+        self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+    def _verify_credentials(self, credentials_class, filled=True,
+                            creds_dict=None):
+
+        def _check(credentials):
+            # Check the right version of credentials has been returned
+            self.assertIsInstance(credentials, credentials_class)
+            # Check the id attributes are filled in
+            attributes = [x for x in credentials.ATTRIBUTES if (
+                '_id' in x and x != 'domain_id')]
+            for attr in attributes:
+                if filled:
+                    self.assertIsNotNone(getattr(credentials, attr))
+                else:
+                    self.assertIsNone(getattr(credentials, attr))
+
+        if creds_dict is None:
+            for ctype in auth.Credentials.TYPES:
+                creds = auth.get_default_credentials(credential_type=ctype,
+                                                     fill_in=filled)
+                _check(creds)
+        else:
+            creds = auth.get_credentials(fill_in=filled, **creds_dict)
+            _check(creds)
+
+    def test_get_default_credentials(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        self._verify_credentials(credentials_class=self.credentials_class)
+
+    def test_get_credentials(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        self._verify_credentials(credentials_class=self.credentials_class,
+                                 creds_dict=self.attributes)
+
+    def test_get_credentials_not_filled(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        self._verify_credentials(credentials_class=self.credentials_class,
+                                 filled=False,
+                                 creds_dict=self.attributes)
+
+    def test_is_valid(self):
+        creds = self._get_credentials()
+        self.assertTrue(creds.is_valid())
+
+    def test_is_not_valid(self):
+        creds = self._get_credentials()
+        for attr in self.attributes.keys():
+            delattr(creds, attr)
+            self.assertFalse(creds.is_valid(),
+                             "Credentials should be invalid without %s" % attr)
+
+    def test_default(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        for ctype in self.credentials_class.TYPES:
+            creds = self.credentials_class.get_default(credentials_type=ctype)
+            for attr in self.attributes.keys():
+                # Default configuration values related to credentials
+                # are defined as fake_* in fake_config.py
+                self.assertEqual(getattr(creds, attr), 'fake_' + attr)
+
+    def test_reset_all_attributes(self):
+        creds = self._get_credentials()
+        initial_creds = copy.deepcopy(creds)
+        set_attr = creds.__dict__.keys()
+        missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
+        # Set all unset attributes, then reset
+        for attr in missing_attr:
+            setattr(creds, attr, 'fake' + attr)
+        creds.reset()
+        # Check reset credentials are same as initial ones
+        self.assertEqual(creds, initial_creds)
+
+    def test_reset_single_attribute(self):
+        creds = self._get_credentials()
+        initial_creds = copy.deepcopy(creds)
+        set_attr = creds.__dict__.keys()
+        missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
+        # Set one unset attributes, then reset
+        for attr in missing_attr:
+            setattr(creds, attr, 'fake' + attr)
+            creds.reset()
+            # Check reset credentials are same as initial ones
+            self.assertEqual(creds, initial_creds)
+
+
+class KeystoneV3CredentialsTests(KeystoneV2CredentialsTests):
+    attributes = {
+        'username': 'fake_username',
+        'password': 'fake_password',
+        'project_name': 'fake_project_name',
+        'user_domain_name': 'fake_domain_name'
+    }
+
+    credentials_class = auth.KeystoneV3Credentials
+    identity_response = fake_identity._fake_v3_response
+
+    def setUp(self):
+        super(KeystoneV3CredentialsTests, self).setUp()
+        # Additional config items reset by cfg fixture after each test
+        cfg.CONF.set_default('auth_version', 'v3', group='identity')
+        # Identity group items
+        for prefix in ['', 'alt_', 'admin_']:
+            cfg.CONF.set_default(prefix + 'domain_name', 'fake_domain_name',
+                                 group='identity')
+        # Compute Admin group items
+        cfg.CONF.set_default('domain_name', 'fake_domain_name',
+                             group='compute-admin')
+
+    def test_default(self):
+        self.useFixture(fixtures.LockFixture('auth_version'))
+        for ctype in self.credentials_class.TYPES:
+            creds = self.credentials_class.get_default(credentials_type=ctype)
+            for attr in self.attributes.keys():
+                if attr == 'project_name':
+                    config_value = 'fake_tenant_name'
+                elif attr == 'user_domain_name':
+                    config_value = 'fake_domain_name'
+                else:
+                    config_value = 'fake_' + attr
+                self.assertEqual(getattr(creds, attr), config_value)
+
+    def test_synced_attributes(self):
+        attributes = self.attributes
+        # Create V3 credentials with tenant instead of project, and user_domain
+        for attr in ['project_id', 'user_domain_id']:
+            attributes[attr] = 'fake_' + attr
+        creds = self._get_credentials(attributes)
+        self.assertEqual(creds.project_name, creds.tenant_name)
+        self.assertEqual(creds.project_id, creds.tenant_id)
+        self.assertEqual(creds.user_domain_name, creds.project_domain_name)
+        self.assertEqual(creds.user_domain_id, creds.project_domain_id)
+        # Replace user_domain with project_domain
+        del attributes['user_domain_name']
+        del attributes['user_domain_id']
+        del attributes['project_name']
+        del attributes['project_id']
+        for attr in ['project_domain_name', 'project_domain_id',
+                     'tenant_name', 'tenant_id']:
+            attributes[attr] = 'fake_' + attr
+        self.assertEqual(creds.tenant_name, creds.project_name)
+        self.assertEqual(creds.tenant_id, creds.project_id)
+        self.assertEqual(creds.project_domain_name, creds.user_domain_name)
+        self.assertEqual(creds.project_domain_id, creds.user_domain_id)
diff --git a/tempest/tests/test_glance_http.py b/tempest/tests/test_glance_http.py
new file mode 100644
index 0000000..bb2df43
--- /dev/null
+++ b/tempest/tests/test_glance_http.py
@@ -0,0 +1,200 @@
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib
+import json
+import mock
+import six
+import socket
+
+from tempest.common import glance_http
+from tempest import exceptions
+from tempest.openstack.common.fixture import mockpatch
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+from tempest.tests import fake_http
+
+
+class TestGlanceHTTPClient(base.TestCase):
+
+    def setUp(self):
+        super(TestGlanceHTTPClient, self).setUp()
+        self.fake_http = fake_http.fake_httplib2(return_type=200)
+        # NOTE(maurosr): using http here implies that we will be using httplib
+        # directly. With https glance_client would use an httpS version, but
+        # the real backend would still be httplib anyway and since we mock it
+        # that there is no reason to care.
+        self.endpoint = 'http://fake_url.com'
+        self.fake_auth = fake_auth_provider.FakeAuthProvider()
+
+        self.fake_auth.base_url = mock.MagicMock(return_value=self.endpoint)
+
+        self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+                        'request',
+                        side_effect=self.fake_http.request(self.endpoint)[1]))
+        self.client = glance_http.HTTPClient(self.fake_auth, {})
+
+    def _set_response_fixture(self, header, status, resp_body):
+        resp = fake_http.fake_httplib(header, status=status,
+                                      body=six.StringIO(resp_body))
+        self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+                        'getresponse',
+                        return_value=resp))
+        return resp
+
+    def test_json_request_without_content_type_header(self):
+        self._set_response_fixture({}, 200, 'fake_response_body')
+        resp, body = self.client.json_request('GET', '/images')
+        self.assertEqual(200, resp.status)
+        self.assertIsNone(body)
+
+    def test_json_request_with_xml_content_type_header(self):
+        self._set_response_fixture({'content-type': 'application/xml'},
+                                   200, 'fake_response_body')
+        resp, body = self.client.json_request('GET', '/images')
+        self.assertEqual(200, resp.status)
+        self.assertIsNone(body)
+
+    def test_json_request_with_content_type_header(self):
+        self._set_response_fixture({'content-type': 'application/json'},
+                                   200, 'fake_response_body')
+        resp, body = self.client.json_request('GET', '/images')
+        self.assertEqual(200, resp.status)
+        self.assertEqual('fake_response_body', body)
+
+    def test_json_request_fails_to_json_loads(self):
+        self._set_response_fixture({'content-type': 'application/json'},
+                                   200, 'fake_response_body')
+        self.useFixture(mockpatch.PatchObject(json, 'loads',
+                        side_effect=ValueError()))
+        resp, body = self.client.json_request('GET', '/images')
+        self.assertEqual(200, resp.status)
+        self.assertEqual(body, 'fake_response_body')
+
+    def test_json_request_socket_timeout(self):
+        self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+                                              'request',
+                                              side_effect=socket.timeout()))
+        self.assertRaises(exceptions.TimeoutException,
+                          self.client.json_request, 'GET', '/images')
+
+    def test_json_request_endpoint_not_found(self):
+        self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+                                              'request',
+                                              side_effect=socket.gaierror()))
+        self.assertRaises(exceptions.EndpointNotFound,
+                          self.client.json_request, 'GET', '/images')
+
+    def test_raw_request(self):
+        self._set_response_fixture({}, 200, 'fake_response_body')
+        resp, body = self.client.raw_request('GET', '/images')
+        self.assertEqual(200, resp.status)
+        self.assertEqual('fake_response_body', body.read())
+
+    def test_raw_request_with_response_chunked(self):
+        self._set_response_fixture({}, 200, 'fake_response_body')
+        self.useFixture(mockpatch.PatchObject(glance_http,
+                                              'CHUNKSIZE', 1))
+        resp, body = self.client.raw_request('GET', '/images')
+        self.assertEqual(200, resp.status)
+        self.assertEqual('fake_response_body', body.read())
+
+    def test_raw_request_chunked(self):
+        self.useFixture(mockpatch.PatchObject(glance_http,
+                                              'CHUNKSIZE', 1))
+        self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+                        'endheaders'))
+        self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+                        'send'))
+
+        self._set_response_fixture({}, 200, 'fake_response_body')
+        req_body = six.StringIO('fake_request_body')
+        resp, body = self.client.raw_request('PUT', '/images', body=req_body)
+        self.assertEqual(200, resp.status)
+        self.assertEqual('fake_response_body', body.read())
+        httplib.HTTPConnection.send.assert_call_count(req_body.len)
+
+    def test_get_connection_class_for_https(self):
+        conn_class = self.client.get_connection_class('https')
+        self.assertEqual(glance_http.VerifiedHTTPSConnection, conn_class)
+
+    def test_get_connection_class_for_http(self):
+        conn_class = (self.client.get_connection_class('http'))
+        self.assertEqual(httplib.HTTPConnection, conn_class)
+
+    def test_get_connection_http(self):
+        self.assertTrue(isinstance(self.client.get_connection(),
+                                   httplib.HTTPConnection))
+
+    def test_get_connection_https(self):
+        endpoint = 'https://fake_url.com'
+        self.fake_auth.base_url = mock.MagicMock(return_value=endpoint)
+        self.client = glance_http.HTTPClient(self.fake_auth, {})
+        self.assertTrue(isinstance(self.client.get_connection(),
+                                   glance_http.VerifiedHTTPSConnection))
+
+    def test_get_connection_url_not_fount(self):
+        self.useFixture(mockpatch.PatchObject(self.client, 'connection_class',
+                                              side_effect=httplib.InvalidURL()
+                                              ))
+        self.assertRaises(exceptions.EndpointNotFound,
+                          self.client.get_connection)
+
+    def test_get_connection_kwargs_default_for_http(self):
+        kwargs = self.client.get_connection_kwargs('http')
+        self.assertEqual(600, kwargs['timeout'])
+        self.assertEqual(1, len(kwargs.keys()))
+
+    def test_get_connection_kwargs_set_timeout_for_http(self):
+        kwargs = self.client.get_connection_kwargs('http', timeout=10,
+                                                   cacert='foo')
+        self.assertEqual(10, kwargs['timeout'])
+        # nothing more than timeout is evaluated for http connections
+        self.assertEqual(1, len(kwargs.keys()))
+
+    def test_get_connection_kwargs_default_for_https(self):
+        kwargs = self.client.get_connection_kwargs('https')
+        self.assertEqual(600, kwargs['timeout'])
+        self.assertEqual(None, kwargs['cacert'])
+        self.assertEqual(None, kwargs['cert_file'])
+        self.assertEqual(None, kwargs['key_file'])
+        self.assertEqual(False, kwargs['insecure'])
+        self.assertEqual(True, kwargs['ssl_compression'])
+        self.assertEqual(6, len(kwargs.keys()))
+
+    def test_get_connection_kwargs_set_params_for_https(self):
+        kwargs = self.client.get_connection_kwargs('https', timeout=10,
+                                                   cacert='foo',
+                                                   cert_file='/foo/bar.cert',
+                                                   key_file='/foo/key.pem',
+                                                   insecure=True,
+                                                   ssl_compression=False)
+        self.assertEqual(10, kwargs['timeout'])
+        self.assertEqual('foo', kwargs['cacert'])
+        self.assertEqual('/foo/bar.cert', kwargs['cert_file'])
+        self.assertEqual('/foo/key.pem', kwargs['key_file'])
+        self.assertEqual(True, kwargs['insecure'])
+        self.assertEqual(False, kwargs['ssl_compression'])
+        self.assertEqual(6, len(kwargs.keys()))
+
+
+class TestResponseBodyIterator(base.TestCase):
+
+    def test_iter_default_chunk_size_64k(self):
+        resp = fake_http.fake_httplib({}, six.StringIO(
+            'X' * (glance_http.CHUNKSIZE + 1)))
+        iterator = glance_http.ResponseBodyIterator(resp)
+        chunks = list(iterator)
+        self.assertEqual(chunks, ['X' * glance_http.CHUNKSIZE, 'X'])
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
new file mode 100644
index 0000000..f584cb9
--- /dev/null
+++ b/tempest/tests/test_hacking.py
@@ -0,0 +1,26 @@
+# Copyright 2014 Matthew Treinish
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.hacking import checks
+from tempest.tests import base
+
+
+class HackingTestCase(base.TestCase):
+    def test_no_setupclass_for_unit_tests(self):
+        self.assertTrue(checks.no_setupclass_for_unit_tests(
+            "  def setUpClass(cls):", './tempest/tests/fake_test.py'))
+        self.assertIsNone(checks.no_setupclass_for_unit_tests(
+            "  def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
+        self.assertFalse(checks.no_setupclass_for_unit_tests(
+            "  def setUpClass(cls):", './tempest/api/fake_test.py'))
diff --git a/tempest/tests/test_tenant_isolation.py b/tempest/tests/test_tenant_isolation.py
index ae2e57d..084b6d2 100644
--- a/tempest/tests/test_tenant_isolation.py
+++ b/tempest/tests/test_tenant_isolation.py
@@ -17,6 +17,7 @@
 import neutronclient.v2_0.client as neutronclient
 from oslo.config import cfg
 
+from tempest.common import http
 from tempest.common import isolated_creds
 from tempest import config
 from tempest import exceptions
@@ -27,6 +28,8 @@
 from tempest.services.network.xml import network_client as xml_network_client
 from tempest.tests import base
 from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
 
 
 class TestTenantIsolation(base.TestCase):
@@ -35,6 +38,9 @@
         super(TestTenantIsolation, self).setUp()
         self.useFixture(fake_config.ConfigFixture())
         self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+        self.fake_http = fake_http.fake_httplib2(return_type=200)
+        self.stubs.Set(http.ClosingHttp, 'request',
+                       fake_identity._fake_v2_response)
 
     def test_tempest_client(self):
         iso_creds = isolated_creds.IsolatedCreds('test class')
@@ -108,9 +114,9 @@
                                                  password='fake_password')
         self._mock_tenant_create('1234', 'fake_prim_tenant')
         self._mock_user_create('1234', 'fake_prim_user')
-        username, tenant_name, password = iso_creds.get_primary_creds()
-        self.assertEqual(username, 'fake_prim_user')
-        self.assertEqual(tenant_name, 'fake_prim_tenant')
+        primary_creds = iso_creds.get_primary_creds(old_style=False)
+        self.assertEqual(primary_creds.username, 'fake_prim_user')
+        self.assertEqual(primary_creds.tenant_name, 'fake_prim_tenant')
         # Verify helper methods
         tenant = iso_creds.get_primary_tenant()
         user = iso_creds.get_primary_user()
@@ -136,10 +142,10 @@
         self.addCleanup(user_mock.stop)
         with patch.object(json_iden_client.IdentityClientJSON,
                           'assign_user_role') as user_mock:
-            username, tenant_name, password = iso_creds.get_admin_creds()
+            admin_creds = iso_creds.get_admin_creds(old_style=False)
         user_mock.assert_called_once_with('1234', '1234', '1234')
-        self.assertEqual(username, 'fake_admin_user')
-        self.assertEqual(tenant_name, 'fake_admin_tenant')
+        self.assertEqual(admin_creds.username, 'fake_admin_user')
+        self.assertEqual(admin_creds.tenant_name, 'fake_admin_tenant')
         # Verify helper methods
         tenant = iso_creds.get_admin_tenant()
         user = iso_creds.get_admin_user()
@@ -153,12 +159,12 @@
                                                  password='fake_password')
         tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
         user_fix = self._mock_user_create('1234', 'fake_prim_user')
-        username, tenant_name, password = iso_creds.get_primary_creds()
+        iso_creds.get_primary_creds(old_style=False)
         tenant_fix.cleanUp()
         user_fix.cleanUp()
         tenant_fix = self._mock_tenant_create('12345', 'fake_alt_tenant')
         user_fix = self._mock_user_create('12345', 'fake_alt_user')
-        alt_username, alt_tenant, alt_password = iso_creds.get_alt_creds()
+        iso_creds.get_alt_creds(old_style=False)
         tenant_fix.cleanUp()
         user_fix.cleanUp()
         tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
@@ -170,8 +176,7 @@
                           [{'id': '123456', 'name': 'admin'}])))
         with patch.object(json_iden_client.IdentityClientJSON,
                           'assign_user_role'):
-            admin_username, admin_tenant, admin_pass = \
-                iso_creds.get_admin_creds()
+            iso_creds.get_admin_creds(old_style=False)
         user_mock = self.patch(
             'tempest.services.identity.json.identity_client.'
             'IdentityClientJSON.delete_user')
@@ -201,9 +206,9 @@
                                                  password='fake_password')
         self._mock_user_create('1234', 'fake_alt_user')
         self._mock_tenant_create('1234', 'fake_alt_tenant')
-        username, tenant_name, password = iso_creds.get_alt_creds()
-        self.assertEqual(username, 'fake_alt_user')
-        self.assertEqual(tenant_name, 'fake_alt_tenant')
+        alt_creds = iso_creds.get_alt_creds(old_style=False)
+        self.assertEqual(alt_creds.username, 'fake_alt_user')
+        self.assertEqual(alt_creds.tenant_name, 'fake_alt_tenant')
         # Verify helper methods
         tenant = iso_creds.get_alt_tenant()
         user = iso_creds.get_alt_user()
@@ -222,7 +227,7 @@
         router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClientJSON.'
             'add_router_interface_with_subnet_id')
-        username, tenant_name, password = iso_creds.get_primary_creds()
+        iso_creds.get_primary_creds(old_style=False)
         router_interface_mock.called_once_with('1234', '1234')
         network = iso_creds.get_primary_network()
         subnet = iso_creds.get_primary_subnet()
@@ -247,7 +252,7 @@
         router_interface_mock = self.patch(
             'tempest.services.network.json.network_client.NetworkClientJSON.'
             'add_router_interface_with_subnet_id')
-        username, tenant_name, password = iso_creds.get_primary_creds()
+        iso_creds.get_primary_creds(old_style=False)
         router_interface_mock.called_once_with('1234', '1234')
         router_interface_mock.reset_mock()
         tenant_fix.cleanUp()
@@ -262,7 +267,7 @@
         subnet_fix = self._mock_subnet_create(iso_creds, '12345',
                                               'fake_alt_subnet')
         router_fix = self._mock_router_create('12345', 'fake_alt_router')
-        alt_username, alt_tenant_name, password = iso_creds.get_alt_creds()
+        iso_creds.get_alt_creds(old_style=False)
         router_interface_mock.called_once_with('12345', '12345')
         router_interface_mock.reset_mock()
         tenant_fix.cleanUp()
@@ -285,7 +290,7 @@
                           [{'id': '123456', 'name': 'admin'}])))
         with patch.object(json_iden_client.IdentityClientJSON,
                           'assign_user_role'):
-            admin_user, admin_tenant, password = iso_creds.get_admin_creds()
+            iso_creds.get_admin_creds(old_style=False)
         self.patch('tempest.services.identity.json.identity_client.'
                    'IdentityClientJSON.delete_user')
         self.patch('tempest.services.identity.json.identity_client.'
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index e8610d3..33b8d6e 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -218,10 +218,8 @@
         else:
             self.assertNotEqual(instance.state, "running")
 
-    # NOTE(afazekas): doctored test case,
-    # with normal validation it would fail
     @test.attr(type='smoke')
-    def test_integration_1(self):
+    def test_compute_with_volumes(self):
         # EC2 1. integration test (not strict)
         image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
         sec_group_name = data_utils.rand_name("securitygroup-")
@@ -249,14 +247,20 @@
                                     instance_type=self.instance_type,
                                     key_name=self.keypair_name,
                                     security_groups=(sec_group_name,))
+
+        LOG.debug("Instance booted - state: %s",
+                  reservation.instances[0].state)
+
         self.addResourceCleanUp(self.destroy_reservation,
                                 reservation)
         volume = self.ec2_client.create_volume(1, self.zone)
+        LOG.debug("Volume created - status: %s", volume.status)
+
         self.addResourceCleanUp(self.destroy_volume_wait, volume)
         instance = reservation.instances[0]
-        LOG.info("state: %s", instance.state)
         if instance.state != "running":
             self.assertInstanceStateWait(instance, "running")
+        LOG.debug("Instance now running - state: %s", instance.state)
 
         address = self.ec2_client.allocate_address()
         rcuk_a = self.addResourceCleanUp(address.delete)
@@ -284,10 +288,21 @@
         volume.attach(instance.id, "/dev/vdh")
 
         def _volume_state():
+            """Return volume state realizing that 'in-use' is overloaded."""
             volume.update(validate=True)
-            return volume.status
+            status = volume.status
+            attached = volume.attach_data.status
+            LOG.debug("Volume %s is in status: %s, attach_status: %s",
+                      volume.id, status, attached)
+            # Nova reports 'in-use' on 'attaching' volumes because we
+            # have a single volume status, and EC2 has 2. Ensure that
+            # if we aren't attached yet we return something other than
+            # 'in-use'
+            if status == 'in-use' and attached != 'attached':
+                return 'attaching'
+            else:
+                return status
 
-        self.assertVolumeStatusWait(_volume_state, "in-use")
         wait.re_search_wait(_volume_state, "in-use")
 
         # NOTE(afazekas):  Different Hypervisor backends names
@@ -296,6 +311,7 @@
 
         def _part_state():
             current = ssh.get_partitions().split('\n')
+            LOG.debug("Partition map for instance: %s", current)
             if current > part_lines:
                 return 'INCREASE'
             if current < part_lines:
@@ -311,7 +327,6 @@
 
         self.assertVolumeStatusWait(_volume_state, "available")
         wait.re_search_wait(_volume_state, "available")
-        LOG.info("Volume %s state: %s", volume.id, volume.status)
 
         wait.state_wait(_part_state, 'DECREASE')
 
@@ -323,7 +338,7 @@
         self.assertAddressReleasedWait(address)
         self.cancelResourceCleanUp(rcuk_a)
 
-        LOG.info("state: %s", instance.state)
+        LOG.debug("Instance %s state: %s", instance.id, instance.state)
         if instance.state != "stopped":
             self.assertInstanceStateWait(instance, "stopped")
         # TODO(afazekas): move steps from teardown to the test case
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
deleted file mode 100755
index 30785c4..0000000
--- a/tools/verify_tempest_config.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import json
-import sys
-import urlparse
-
-import httplib2
-
-from tempest import clients
-from tempest import config
-
-
-CONF = config.CONF
-RAW_HTTP = httplib2.Http()
-
-
-def verify_glance_api_versions(os):
-    # Check glance api versions
-    __, versions = os.image_client.get_versions()
-    if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
-                                             versions):
-        print('Config option image api_v1 should be change to: %s' % (
-            not CONF.image_feature_enabled.api_v1))
-    if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
-        print('Config option image api_v2 should be change to: %s' % (
-            not CONF.image_feature_enabled.api_v2))
-
-
-def _get_api_versions(os, service):
-    client_dict = {
-        'nova': os.servers_client,
-        'keystone': os.identity_client,
-    }
-    client_dict[service].skip_path()
-    endpoint_parts = urlparse.urlparse(client_dict[service])
-    endpoint = endpoint_parts.scheme + '//' + endpoint_parts.netloc
-    __, body = RAW_HTTP.request(endpoint, 'GET')
-    client_dict[service].reset_path()
-    body = json.loads(body)
-    if service == 'keystone':
-        versions = map(lambda x: x['id'], body['versions']['values'])
-    else:
-        versions = map(lambda x: x['id'], body['versions'])
-    return versions
-
-
-def verify_keystone_api_versions(os):
-    # Check keystone api versions
-    versions = _get_api_versions(os, 'keystone')
-    if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
-        print('Config option identity api_v2 should be change to %s' % (
-            not CONF.identity_feature_enabled.api_v2))
-    if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
-        print('Config option identity api_v3 should be change to %s' % (
-            not CONF.identity_feature_enabled.api_v3))
-
-
-def verify_nova_api_versions(os):
-    versions = _get_api_versions(os, 'nova')
-    if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
-        print('Config option compute api_v3 should be change to: %s' % (
-              not CONF.compute_feature_enabled.api_v3))
-
-
-def get_extension_client(os, service):
-    extensions_client = {
-        'nova': os.extensions_client,
-        'nova_v3': os.extensions_v3_client,
-        'cinder': os.volumes_extension_client,
-        'neutron': os.network_client,
-        'swift': os.account_client,
-    }
-    if service not in extensions_client:
-        print('No tempest extensions client for %s' % service)
-        exit(1)
-    return extensions_client[service]
-
-
-def get_enabled_extensions(service):
-    extensions_options = {
-        'nova': CONF.compute_feature_enabled.api_extensions,
-        'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
-        'cinder': CONF.volume_feature_enabled.api_extensions,
-        'neutron': CONF.network_feature_enabled.api_extensions,
-        'swift': CONF.object_storage_feature_enabled.discoverable_apis,
-    }
-    if service not in extensions_options:
-        print('No supported extensions list option for %s' % service)
-        exit(1)
-    return extensions_options[service]
-
-
-def verify_extensions(os, service, results):
-    extensions_client = get_extension_client(os, service)
-    __, resp = extensions_client.list_extensions()
-    if isinstance(resp, dict):
-        # Neutron's extension 'name' field has is not a single word (it has
-        # spaces in the string) Since that can't be used for list option the
-        # api_extension option in the network-feature-enabled group uses alias
-        # instead of name.
-        if service == 'neutron':
-            extensions = map(lambda x: x['alias'], resp['extensions'])
-        elif service == 'swift':
-            # Remove Swift general information from extensions list
-            resp.pop('swift')
-            extensions = resp.keys()
-        else:
-            extensions = map(lambda x: x['name'], resp['extensions'])
-
-    else:
-        extensions = map(lambda x: x['name'], resp)
-    if not results.get(service):
-        results[service] = {}
-    extensions_opt = get_enabled_extensions(service)
-    if extensions_opt[0] == 'all':
-        results[service]['extensions'] = 'all'
-        return results
-    # Verify that all configured extensions are actually enabled
-    for extension in extensions_opt:
-        results[service][extension] = extension in extensions
-    # Verify that there aren't additional extensions enabled that aren't
-    # specified in the config list
-    for extension in extensions:
-        if extension not in extensions_opt:
-            results[service][extension] = False
-    return results
-
-
-def display_results(results):
-    for service in results:
-        # If all extensions are specified as being enabled there is no way to
-        # verify this so we just assume this to be true
-        if results[service].get('extensions'):
-            continue
-        extension_list = get_enabled_extensions(service)
-        for extension in results[service]:
-            if not results[service][extension]:
-                if extension in extension_list:
-                    print("%s extension: %s should not be included in the list"
-                          " of enabled extensions" % (service, extension))
-                else:
-                    print("%s extension: %s should be included in the list of "
-                          "enabled extensions" % (service, extension))
-
-
-def check_service_availability(os):
-    services = []
-    avail_services = []
-    codename_match = {
-        'volume': 'cinder',
-        'network': 'neutron',
-        'image': 'glance',
-        'object_storage': 'swift',
-        'compute': 'nova',
-        'orchestration': 'heat',
-        'metering': 'ceilometer',
-        'telemetry': 'ceilometer',
-        'data_processing': 'savanna',
-        'baremetal': 'ironic',
-        'identity': 'keystone'
-
-    }
-    # Get catalog list for endpoints to use for validation
-    __, endpoints = os.endpoints_client.list_endpoints()
-    for endpoint in endpoints:
-        __, service = os.service_client.get_service(endpoint['service_id'])
-        services.append(service['type'])
-    # Pull all catalog types from config file and compare against endpoint list
-    for cfgname in dir(CONF._config):
-        cfg = getattr(CONF, cfgname)
-        catalog_type = getattr(cfg, 'catalog_type', None)
-        if not catalog_type:
-            continue
-        else:
-            if cfgname == 'identity':
-                # Keystone is a required service for tempest
-                continue
-            if catalog_type not in services:
-                if getattr(CONF.service_available, codename_match[cfgname]):
-                    print('Endpoint type %s not found either disable service '
-                          '%s or fix the catalog_type in the config file' % (
-                          catalog_type, codename_match[cfgname]))
-            else:
-                if not getattr(CONF.service_available,
-                               codename_match[cfgname]):
-                    print('Endpoint type %s is available, service %s should be'
-                          ' set as available in the config file.' % (
-                          catalog_type, codename_match[cfgname]))
-                else:
-                    avail_services.append(codename_match[cfgname])
-    return avail_services
-
-
-def main(argv):
-    print('Running config verification...')
-    os = clients.ComputeAdminManager(interface='json')
-    services = check_service_availability(os)
-    results = {}
-    for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
-        if service == 'nova_v3' and 'nova' not in services:
-            continue
-        elif service not in services:
-            continue
-        results = verify_extensions(os, service, results)
-    verify_keystone_api_versions(os)
-    verify_glance_api_versions(os)
-    verify_nova_api_versions(os)
-    display_results(results)
-
-
-if __name__ == "__main__":
-    main(sys.argv)