Merge "Full response for v3 ServiceClient methods"
diff --git a/HACKING.rst b/HACKING.rst
index 45c35df..6ddb8ac 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -275,7 +275,7 @@
 Test Documentation
 ------------------
 For tests being added we need to require inline documentation in the form of
-docstings to explain what is being tested. In API tests for a new API a class
+docstrings to explain what is being tested. In API tests for a new API a class
 level docstring should be added to an API reference doc. If one doesn't exist
 a TODO comment should be put indicating that the reference needs to be added.
 For individual API test cases a method level docstring should be used to
diff --git a/requirements.txt b/requirements.txt
index f00de0d..cc2a187 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,8 @@
 # The order of packages is significant, because pip processes them in the order
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
-pbr<2.0,>=1.3
-cliff>=1.13.0 # Apache-2.0
+pbr<2.0,>=1.4
+cliff>=1.14.0 # Apache-2.0
 anyjson>=0.3.3
 httplib2>=0.7.5
 jsonschema!=2.5.0,<3.0.0,>=2.0.0
@@ -13,11 +13,11 @@
 testrepository>=0.0.18
 pyOpenSSL>=0.14
 oslo.concurrency>=2.3.0 # Apache-2.0
-oslo.config>=1.11.0 # Apache-2.0
+oslo.config>=2.1.0 # Apache-2.0
 oslo.i18n>=1.5.0 # Apache-2.0
 oslo.log>=1.8.0 # Apache-2.0
 oslo.serialization>=1.4.0 # Apache-2.0
-oslo.utils>=1.9.0 # Apache-2.0
+oslo.utils>=2.0.0 # Apache-2.0
 six>=1.9.0
 iso8601>=0.1.9
 fixtures>=1.3.1
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index 4830dcd..b6dee18 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -20,7 +20,7 @@
 
 
 class TestNodes(base.BaseBaremetalTest):
-    '''Tests for baremetal nodes.'''
+    """Tests for baremetal nodes."""
 
     def setUp(self):
         super(TestNodes, self).setUp()
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 6ffa4e9..79c2ac9 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -32,6 +32,7 @@
         super(LiveBlockMigrationTestJSON, cls).setup_clients()
         cls.admin_hosts_client = cls.os_adm.hosts_client
         cls.admin_servers_client = cls.os_adm.servers_client
+        cls.admin_migration_client = cls.os_adm.migrations_client
 
     @classmethod
     def resource_setup(cls):
@@ -55,9 +56,10 @@
         return self._get_server_details(server_id)[self._host_key]
 
     def _migrate_server_to(self, server_id, dest_host):
+        bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
         body = self.admin_servers_client.live_migrate_server(
-            server_id, dest_host,
-            CONF.compute_feature_enabled.block_migration_for_live_migration)
+            server_id, host=dest_host, block_migration=bmflm,
+            disk_over_commit=False)
         return body
 
     def _get_host_other_than(self, host):
@@ -109,7 +111,16 @@
 
         self._migrate_server_to(server_id, target_host)
         waiters.wait_for_server_status(self.servers_client, server_id, state)
-        self.assertEqual(target_host, self._get_host_for_server(server_id))
+        migration_list = self.admin_migration_client.list_migrations()
+
+        msg = ("Live Migration failed. Migrations list for Instance "
+               "%s: [" % server_id)
+        for live_migration in migration_list:
+            if (live_migration['instance_uuid'] == server_id):
+                msg += "\n%s" % live_migration
+        msg += "]"
+        self.assertEqual(target_host, self._get_host_for_server(server_id),
+                         msg)
 
     @test.idempotent_id('1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b')
     @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
@@ -153,7 +164,7 @@
         self.addCleanup(self._volume_clean_up, server_id, volume['id'])
 
         # Attach the volume to the server
-        self.servers_client.attach_volume(server_id, volume['id'],
+        self.servers_client.attach_volume(server_id, volumeId=volume['id'],
                                           device='/dev/xvdb')
         self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
 
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index ab82d91..d16c020 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -48,7 +48,7 @@
         body = cls.glance_client.create_image(name=name,
                                               container_format='bare',
                                               disk_format='raw',
-                                              is_public=False)
+                                              is_public=False)['image']
         cls.image_id = body['id']
         cls.images.append(cls.image_id)
         image_file = six.StringIO(('*' * 1024))
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 2c0ce59..247a57b 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -54,7 +54,7 @@
             body = cls.glance_client.create_image(name=name,
                                                   container_format='bare',
                                                   disk_format='raw',
-                                                  is_public=False)
+                                                  is_public=False)['image']
             image_id = body['id']
             cls.images.append(image_id)
             # Wait 1 second between creation and upload to ensure a delta
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index b2acd34..673a401 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -122,7 +122,7 @@
         waiters.wait_for_volume_status(volumes_client,
                                        volume['id'], 'available')
         self.client.attach_volume(server['id'],
-                                  volume['id'],
+                                  volumeId=volume['id'],
                                   device=device)
         waiters.wait_for_volume_status(volumes_client,
                                        volume['id'], 'in-use')
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index f0f6b8c..a20f7f5 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -323,7 +323,7 @@
             properties=properties,
             status='active',
             sort_key='created_at',
-            sort_dir='asc')
+            sort_dir='asc')['images']
         self.assertEqual(2, len(image_list))
         self.assertEqual((backup1, backup2),
                          (image_list[0]['name'], image_list[1]['name']))
@@ -347,7 +347,7 @@
             properties=properties,
             status='active',
             sort_key='created_at',
-            sort_dir='asc')
+            sort_dir='asc')['images']
         self.assertEqual(2, len(image_list),
                          'Unexpected number of images for '
                          'v2:test_create_backup; was the oldest backup not '
@@ -474,6 +474,7 @@
     def test_lock_unlock_server(self):
         # Lock the server,try server stop(exceptions throw),unlock it and retry
         self.client.lock_server(self.server_id)
+        self.addCleanup(self.client.unlock_server, self.server_id)
         server = self.client.show_server(self.server_id)
         self.assertEqual(server['status'], 'ACTIVE')
         # Locked server is not allowed to be stopped by non-admin user
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 2fe63ed..7a25526 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -137,7 +137,7 @@
         self.assertRaises(lib_exc.Conflict,
                           self.servers_client.attach_volume,
                           self.server_id,
-                          volume['id'],
+                          volumeId=volume['id'],
                           device='/dev/%s' % self.device)
 
     @test.idempotent_id('f56e465b-fe10-48bf-b75d-646cda3a8bc9')
@@ -148,7 +148,7 @@
 
         # Attach the volume to the server
         self.servers_client.attach_volume(self.server_id,
-                                          volume['id'],
+                                          volumeId=volume['id'],
                                           device='/dev/%s' % self.device)
         waiters.wait_for_volume_status(self.volumes_extensions_client,
                                        volume['id'], 'in-use')
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index e7111b0..b542d7f 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -70,10 +70,11 @@
         body = cls.glance_client.create_image(name=name,
                                               container_format='bare',
                                               disk_format='raw',
-                                              is_public=False)
+                                              is_public=False)['image']
         image_id = body['id']
         image_file = six.StringIO(('*' * 1024))
-        body = cls.glance_client.update_image(image_id, data=image_file)
+        body = cls.glance_client.update_image(image_id,
+                                              data=image_file)['image']
         cls.glance_client.wait_for_image_status(image_id, 'active')
         cls.image = cls.images_client.show_image(image_id)
 
diff --git a/tempest/api/compute/test_live_block_migration_negative.py b/tempest/api/compute/test_live_block_migration_negative.py
index fabe55d..2cd85f2 100644
--- a/tempest/api/compute/test_live_block_migration_negative.py
+++ b/tempest/api/compute/test_live_block_migration_negative.py
@@ -40,10 +40,10 @@
         cls.admin_servers_client = cls.os_adm.servers_client
 
     def _migrate_server_to(self, server_id, dest_host):
+        bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
         body = self.admin_servers_client.live_migrate_server(
-            server_id, dest_host,
-            CONF.compute_feature_enabled.
-            block_migration_for_live_migration)
+            server_id, host=dest_host, block_migration=bmflm,
+            disk_over_commit=False)
         return body
 
     @test.attr(type=['negative'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 8e4278a..6496854 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -83,7 +83,7 @@
         # Attach the volume to the server
         self.attachment = self.servers_client.attach_volume(
             self.server['id'],
-            self.volume['id'],
+            volumeId=self.volume['id'],
             device='/dev/%s' % self.device)
         self.volumes_client.wait_for_volume_status(self.volume['id'], 'in-use')
 
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 904cbb6..5d78539 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -297,6 +297,7 @@
                                                           flavor_id,
                                                           node_configs,
                                                           **kwargs)
+        resp_body = resp_body['node_group_template']
         # store id of created node group template
         cls._node_group_templates.append(resp_body['id'])
 
@@ -316,6 +317,7 @@
                                                        node_groups,
                                                        cluster_configs,
                                                        **kwargs)
+        resp_body = resp_body['cluster_template']
         # store id of created cluster template
         cls._cluster_templates.append(resp_body['id'])
 
@@ -330,6 +332,7 @@
         removed in tearDownClass method.
         """
         resp_body = cls.client.create_data_source(name, type, url, **kwargs)
+        resp_body = resp_body['data_source']
         # store id of created data source
         cls._data_sources.append(resp_body['id'])
 
@@ -343,6 +346,7 @@
         be automatically removed in tearDownClass method.
         """
         resp_body = cls.client.create_job_binary_internal(name, data)
+        resp_body = resp_body['job_binary_internal']
         # store id of created job binary internal
         cls._job_binary_internals.append(resp_body['id'])
 
@@ -357,6 +361,7 @@
         removed in tearDownClass method.
         """
         resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
+        resp_body = resp_body['job_binary']
         # store id of created job binary
         cls._job_binaries.append(resp_body['id'])
 
@@ -372,6 +377,7 @@
         """
         resp_body = cls.client.create_job(name,
                                           job_type, mains, libs, **kwargs)
+        resp_body = resp_body['job']
         # store id of created job
         cls._jobs.append(resp_body['id'])
 
@@ -400,7 +406,7 @@
         """
         if not cls.default_plugin:
             return None
-        plugin = cls.client.get_plugin(cls.default_plugin)
+        plugin = cls.client.get_plugin(cls.default_plugin)['plugin']
 
         for version in DEFAULT_TEMPLATES[cls.default_plugin].keys():
             if version in plugin['versions']:
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index e357a85..42cbd14 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -98,7 +98,7 @@
         template_info = self._create_cluster_template()
 
         # check for cluster template in list
-        templates = self.client.list_cluster_templates()
+        templates = self.client.list_cluster_templates()['cluster_templates']
         templates_info = [(template['id'], template['name'])
                           for template in templates]
         self.assertIn(template_info, templates_info)
@@ -110,6 +110,7 @@
 
         # check cluster template fetch by id
         template = self.client.get_cluster_template(template_id)
+        template = template['cluster_template']
         self.assertEqual(template_name, template['name'])
         self.assertDictContainsSubset(self.cluster_template, template)
 
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
index dd16b2f..67d09a0 100644
--- a/tempest/api/data_processing/test_data_sources.py
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -68,13 +68,13 @@
 
     def _list_data_sources(self, source_info):
         # check for data source in list
-        sources = self.client.list_data_sources()
+        sources = self.client.list_data_sources()['data_sources']
         sources_info = [(source['id'], source['name']) for source in sources]
         self.assertIn(source_info, sources_info)
 
     def _get_data_source(self, source_id, source_name, source_body):
         # check data source fetch by id
-        source = self.client.get_data_source(source_id)
+        source = self.client.get_data_source(source_id)['data_source']
         self.assertEqual(source_name, source['name'])
         self.assertDictContainsSubset(source_body, source)
 
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
index fb21270..98b7e24 100644
--- a/tempest/api/data_processing/test_job_binaries.py
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -80,7 +80,7 @@
         binary_info = self._create_job_binary(self.swift_job_binary_with_extra)
 
         # check for job binary in list
-        binaries = self.client.list_job_binaries()
+        binaries = self.client.list_job_binaries()['binaries']
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
@@ -91,7 +91,7 @@
             self._create_job_binary(self.swift_job_binary_with_extra))
 
         # check job binary fetch by id
-        binary = self.client.get_job_binary(binary_id)
+        binary = self.client.get_job_binary(binary_id)['job_binary']
         self.assertEqual(binary_name, binary['name'])
         self.assertDictContainsSubset(self.swift_job_binary, binary)
 
@@ -115,7 +115,7 @@
         binary_info = self._create_job_binary(self.internal_db_job_binary)
 
         # check for job binary in list
-        binaries = self.client.list_job_binaries()
+        binaries = self.client.list_job_binaries()['binaries']
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
@@ -126,7 +126,7 @@
             self._create_job_binary(self.internal_db_job_binary))
 
         # check job binary fetch by id
-        binary = self.client.get_job_binary(binary_id)
+        binary = self.client.get_job_binary(binary_id)['job_binary']
         self.assertEqual(binary_name, binary['name'])
         self.assertDictContainsSubset(self.internal_db_job_binary, binary)
 
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
index 3d76ebe..6919fa5 100644
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -57,7 +57,7 @@
         binary_info = self._create_job_binary_internal()
 
         # check for job binary internal in list
-        binaries = self.client.list_job_binary_internals()
+        binaries = self.client.list_job_binary_internals()['binaries']
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
@@ -68,7 +68,7 @@
 
         # check job binary internal fetch by id
         binary = self.client.get_job_binary_internal(binary_id)
-        self.assertEqual(binary_name, binary['name'])
+        self.assertEqual(binary_name, binary['job_binary_internal']['name'])
 
     @test.attr(type='smoke')
     @test.idempotent_id('b3568c33-4eed-40d5-aae4-6ff3b2ac58f5')
diff --git a/tempest/api/data_processing/test_jobs.py b/tempest/api/data_processing/test_jobs.py
index 83eb54d..7798056 100644
--- a/tempest/api/data_processing/test_jobs.py
+++ b/tempest/api/data_processing/test_jobs.py
@@ -71,7 +71,7 @@
         job_info = self._create_job()
 
         # check for job in list
-        jobs = self.client.list_jobs()
+        jobs = self.client.list_jobs()['jobs']
         jobs_info = [(job['id'], job['name']) for job in jobs]
         self.assertIn(job_info, jobs_info)
 
@@ -81,7 +81,7 @@
         job_id, job_name = self._create_job()
 
         # check job fetch by id
-        job = self.client.get_job(job_id)
+        job = self.client.get_job(job_id)['job']
         self.assertEqual(job_name, job['name'])
 
     @test.attr(type='smoke')
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index 102799d..388bb58 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -65,6 +65,7 @@
 
         # check for node group template in list
         templates = self.client.list_node_group_templates()
+        templates = templates['node_group_templates']
         templates_info = [(template['id'], template['name'])
                           for template in templates]
         self.assertIn(template_info, templates_info)
@@ -76,6 +77,7 @@
 
         # check node group template fetch by id
         template = self.client.get_node_group_template(template_id)
+        template = template['node_group_template']
         self.assertEqual(template_name, template['name'])
         self.assertDictContainsSubset(self.node_group_template, template)
 
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index 92a5bd0..14594e4 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -25,7 +25,7 @@
 
         It ensures main plugins availability.
         """
-        plugins = self.client.list_plugins()
+        plugins = self.client.list_plugins()['plugins']
         plugins_names = [plugin['name'] for plugin in plugins]
         for enabled_plugin in CONF.data_processing_feature_enabled.plugins:
             self.assertIn(enabled_plugin, plugins_names)
@@ -41,12 +41,13 @@
     @test.idempotent_id('53cf6487-2cfb-4a6f-8671-97c542c6e901')
     def test_plugin_get(self):
         for plugin_name in self._list_all_plugin_names():
-            plugin = self.client.get_plugin(plugin_name)
+            plugin = self.client.get_plugin(plugin_name)['plugin']
             self.assertEqual(plugin_name, plugin['name'])
 
             for plugin_version in plugin['versions']:
                 detailed_plugin = self.client.get_plugin(plugin_name,
                                                          plugin_version)
+                detailed_plugin = detailed_plugin['plugin']
                 self.assertEqual(plugin_name, detailed_plugin['name'])
 
                 # check that required image tags contains name and version
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 87013db..4572310 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -90,6 +90,26 @@
         super(BaseV1ImageTest, cls).setup_clients()
         cls.client = cls.os.image_client
 
+    # TODO(jswarren) Remove this method once the v2 client also returns the
+    # full response object, not just the ['image'] value. At that
+    # point BaseImageTest.create_image will need to retrieve the
+    # ['image'] value.
+    @classmethod
+    def create_image(cls, **kwargs):
+        """Wrapper that returns a test image."""
+        name = data_utils.rand_name(cls.__name__ + "-instance")
+
+        if 'name' in kwargs:
+            name = kwargs.pop('name')
+
+        container_format = kwargs.pop('container_format')
+        disk_format = kwargs.pop('disk_format')
+
+        image = cls.client.create_image(name, container_format,
+                                        disk_format, **kwargs)['image']
+        cls.created_images.append(image['id'])
+        return image
+
 
 class BaseV1ImageMembersTest(BaseV1ImageTest):
 
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 8beed32..7739d16 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -45,7 +45,7 @@
 
         # Now try uploading an image file
         image_file = moves.cStringIO(data_utils.random_bytes())
-        body = self.client.update_image(image_id, data=image_file)
+        body = self.client.update_image(image_id, data=image_file)['image']
         self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
 
@@ -168,14 +168,14 @@
     @test.idempotent_id('246178ab-3b33-4212-9a4b-a7fe8261794d')
     def test_index_no_params(self):
         # Simple test to see all fixture images returned
-        images_list = self.client.list_images()
+        images_list = self.client.list_images()['images']
         image_list = map(lambda x: x['id'], images_list)
         for image_id in self.created_images:
             self.assertIn(image_id, image_list)
 
     @test.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
     def test_index_disk_format(self):
-        images_list = self.client.list_images(disk_format='ami')
+        images_list = self.client.list_images(disk_format='ami')['images']
         for image in images_list:
             self.assertEqual(image['disk_format'], 'ami')
         result_set = set(map(lambda x: x['id'], images_list))
@@ -184,7 +184,8 @@
 
     @test.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
     def test_index_container_format(self):
-        images_list = self.client.list_images(container_format='bare')
+        images_list = (self.client.list_images(container_format='bare')
+                       ['images'])
         for image in images_list:
             self.assertEqual(image['container_format'], 'bare')
         result_set = set(map(lambda x: x['id'], images_list))
@@ -193,7 +194,7 @@
 
     @test.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
     def test_index_max_size(self):
-        images_list = self.client.list_images(size_max=42)
+        images_list = self.client.list_images(size_max=42)['images']
         for image in images_list:
             self.assertTrue(image['size'] <= 42)
         result_set = set(map(lambda x: x['id'], images_list))
@@ -202,7 +203,7 @@
 
     @test.idempotent_id('6ffc16d0-4cbf-4401-95c8-4ac63eac34d8')
     def test_index_min_size(self):
-        images_list = self.client.list_images(size_min=142)
+        images_list = self.client.list_images(size_min=142)['images']
         for image in images_list:
             self.assertTrue(image['size'] >= 142)
         result_set = set(map(lambda x: x['id'], images_list))
@@ -214,7 +215,7 @@
         images_list = self.client.list_images(detail=True,
                                               status='active',
                                               sort_key='size',
-                                              sort_dir='desc')
+                                              sort_dir='desc')['images']
         top_size = images_list[0]['size']  # We have non-zero sized images
         for image in images_list:
             size = image['size']
@@ -226,7 +227,7 @@
     def test_index_name(self):
         images_list = self.client.list_images(
             detail=True,
-            name='New Remote Image dup')
+            name='New Remote Image dup')['images']
         result_set = set(map(lambda x: x['id'], images_list))
         for image in images_list:
             self.assertEqual(image['name'], 'New Remote Image dup')
@@ -272,7 +273,7 @@
         self.assertEqual(metadata['properties'], {'key1': 'value1'})
         metadata['properties'].update(req_metadata)
         metadata = self.client.update_image(
-            self.image_id, properties=metadata['properties'])
+            self.image_id, properties=metadata['properties'])['image']
 
         resp_metadata = self.client.get_image_meta(self.image_id)
         expected = {'key1': 'alt1', 'key2': 'value2'}
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 78b51c8..1308414 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -277,11 +277,11 @@
         test_routes = []
         routes_num = 5
         # Create a router
-        self.router = self._create_router(
+        router = self._create_router(
             data_utils.rand_name('router-'), True)
         self.addCleanup(
             self._delete_extra_routes,
-            self.router['id'])
+            router['id'])
         # Update router extra route, second ip of the range is
         # used as next hop
         for i in range(routes_num):
@@ -290,7 +290,7 @@
             next_cidr = next_cidr.next()
 
             # Add router interface with subnet id
-            self.create_router_interface(self.router['id'], subnet['id'])
+            self.create_router_interface(router['id'], subnet['id'])
 
             cidr = netaddr.IPNetwork(subnet['cidr'])
             next_hop = str(cidr[2])
@@ -300,9 +300,9 @@
             )
 
         test_routes.sort(key=lambda x: x['destination'])
-        extra_route = self.client.update_extra_routes(self.router['id'],
+        extra_route = self.client.update_extra_routes(router['id'],
                                                       test_routes)
-        show_body = self.client.show_router(self.router['id'])
+        show_body = self.client.show_router(router['id'])
         # Assert the number of routes
         self.assertEqual(routes_num, len(extra_route['router']['routes']))
         self.assertEqual(routes_num, len(show_body['router']['routes']))
@@ -327,13 +327,13 @@
 
     @test.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
     def test_update_router_admin_state(self):
-        self.router = self._create_router(data_utils.rand_name('router-'))
-        self.assertFalse(self.router['admin_state_up'])
+        router = self._create_router(data_utils.rand_name('router-'))
+        self.assertFalse(router['admin_state_up'])
         # Update router admin state
-        update_body = self.client.update_router(self.router['id'],
+        update_body = self.client.update_router(router['id'],
                                                 admin_state_up=True)
         self.assertTrue(update_body['router']['admin_state_up'])
-        show_body = self.client.show_router(self.router['id'])
+        show_body = self.client.show_router(router['id'])
         self.assertTrue(show_body['router']['admin_state_up'])
 
     @test.attr(type='smoke')
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 0f9b7dd..5d1784f 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -85,6 +85,11 @@
         body = client.create_image(
             data_utils.rand_name('image'), container_format='bare',
             disk_format='raw', visibility='private')
+        # TODO(jswarren) Move ['image'] up to initial body value assignment
+        # once both v1 and v2 glance clients include the full response
+        # object.
+        if 'image' in body:
+            body = body['image']
         cls.image_ids.append(body['id'])
         return body
 
diff --git a/tempest/api/volume/test_extensions.py b/tempest/api/volume/test_extensions.py
index 17db45f..cce9ace 100644
--- a/tempest/api/volume/test_extensions.py
+++ b/tempest/api/volume/test_extensions.py
@@ -30,7 +30,8 @@
     @test.idempotent_id('94607eb0-43a5-47ca-82aa-736b41bd2e2c')
     def test_list_extensions(self):
         # List of all extensions
-        extensions = self.volumes_extension_client.list_extensions()
+        extensions = (self.volumes_extension_client.list_extensions()
+                      ['extensions'])
         if len(CONF.volume_feature_enabled.api_extensions) == 0:
             raise self.skipException('There are not any extensions configured')
         extension_list = [extension.get('alias') for extension in extensions]
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 1df1896..5860501 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -75,7 +75,8 @@
                                        'ACTIVE')
         mountpoint = '/dev/%s' % CONF.compute.volume_device_name
         self.servers_client.attach_volume(
-            server['id'], self.volume_origin['id'], mountpoint)
+            server['id'], volumeId=self.volume_origin['id'],
+            device=mountpoint)
         self.volumes_client.wait_for_volume_status(self.volume_origin['id'],
                                                    'in-use')
         self.addCleanup(self.volumes_client.wait_for_volume_status,
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 89b0842..e9530a2 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -342,7 +342,7 @@
             'is_public': 'False',
         }
         params['properties'] = properties
-        image = self.image_client.create_image(**params)
+        image = self.image_client.create_image(**params)['image']
         self.addCleanup(self.image_client.delete_image, image['id'])
         self.assertEqual("queued", image['status'])
         self.image_client.update_image(image['id'], data=image_file)
@@ -419,7 +419,7 @@
 
     def nova_volume_attach(self):
         volume = self.servers_client.attach_volume(
-            self.server['id'], self.volume['id'], '/dev/%s'
+            self.server['id'], volumeId=self.volume['id'], device='/dev/%s'
             % CONF.compute.volume_device_name)
         self.assertEqual(self.volume['id'], volume['id'])
         self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index eb018eb..8e91a6d 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -26,6 +26,7 @@
 class HorizonHTMLParser(HTMLParser.HTMLParser):
     csrf_token = None
     region = None
+    login = None
 
     def _find_name(self, attrs, name):
         for attrpair in attrs:
@@ -39,12 +40,20 @@
                 return attrpair[1]
         return None
 
+    def _find_attr_value(self, attrs, attr_name):
+        for attrpair in attrs:
+            if attrpair[0] == attr_name:
+                return attrpair[1]
+        return None
+
     def handle_starttag(self, tag, attrs):
         if tag == 'input':
             if self._find_name(attrs, 'csrfmiddlewaretoken'):
                 self.csrf_token = self._find_value(attrs)
             if self._find_name(attrs, 'region'):
                 self.region = self._find_value(attrs)
+        if tag == 'form':
+            self.login = self._find_attr_value(attrs, 'action')
 
 
 class TestDashboardBasicOps(manager.ScenarioTest):
@@ -79,8 +88,12 @@
         parser = HorizonHTMLParser()
         parser.feed(response)
 
+        # construct login url for dashboard, discovery accomodates non-/ web
+        # root for dashboard
+        login_url = CONF.dashboard.dashboard_url + parser.login[1:]
+
         # Prepare login form request
-        req = request.Request(CONF.dashboard.login_url)
+        req = request.Request(login_url)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('Referer', CONF.dashboard.dashboard_url)
         params = {'username': username,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index e676063..12af667 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -338,8 +338,8 @@
 
         for remote_ip in address_list:
             if should_connect:
-                msg = "Timed out waiting for "
-                "%s to become reachable" % remote_ip
+                msg = ("Timed out waiting for %s to become "
+                       "reachable") % remote_ip
             else:
                 msg = "ip address %s is reachable" % remote_ip
             try:
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index f61b151..3019cc4 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -16,6 +16,7 @@
 from oslo_log import log as logging
 
 from tempest import config
+from tempest import exceptions
 from tempest.scenario import manager
 from tempest.scenario import utils as test_utils
 from tempest import test
@@ -98,9 +99,24 @@
     def verify_metadata(self):
         if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
             # Verify metadata service
-            result = self.ssh_client.exec_command(
-                "curl http://169.254.169.254/latest/meta-data/public-ipv4")
-            self.assertEqual(self.floating_ip['ip'], result)
+            md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
+
+            def exec_cmd_and_verify_output():
+                cmd = 'curl ' + md_url
+                floating_ip = self.floating_ip['ip']
+                result = self.ssh_client.exec_command(cmd)
+                if result:
+                    msg = ('Failed while verifying metadata on server. Result '
+                           'of command "%s" is NOT "%s".' % (cmd, floating_ip))
+                    self.assertEqual(floating_ip, result, msg)
+                    return 'Verification is successful!'
+
+            if not test.call_until_true(exec_cmd_and_verify_output,
+                                        CONF.compute.build_timeout,
+                                        CONF.compute.build_interval):
+                raise exceptions.TimeoutException('Timed out while waiting to '
+                                                  'verify metadata on server. '
+                                                  '%s is empty.' % md_url)
 
     @test.idempotent_id('7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba')
     @test.attr(type='smoke')
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c1d9a1b..8b5a595 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -104,7 +104,7 @@
 
     def _attach_volume(self, server, volume):
         attached_volume = self.servers_client.attach_volume(
-            server['id'], volume['id'], device='/dev/%s'
+            server['id'], volumeId=volume['id'], device='/dev/%s'
             % CONF.compute.volume_device_name)
         self.assertEqual(volume['id'], attached_volume['id'])
         self._wait_for_volume_status(attached_volume, 'in-use')
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 1159a58..f523f11 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -206,15 +206,7 @@
                                post_body)
         if response_key is not None:
             body = json.loads(body)
-            # Check for Schema as 'None' because if we do not have any server
-            # action schema implemented yet then they can pass 'None' to skip
-            # the validation.Once all server action has their schema
-            # implemented then, this check can be removed if every actions are
-            # supposed to validate their response.
-            # TODO(GMann): Remove the below 'if' check once all server actions
-            # schema are implemented.
-            if schema is not None:
-                self.validate_response(schema, resp, body)
+            self.validate_response(schema, resp, body)
             body = body[response_key]
         else:
             self.validate_response(schema, resp, body)
@@ -341,14 +333,9 @@
     def start(self, server_id, **kwargs):
         return self.action(server_id, 'os-start', None, **kwargs)
 
-    def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
+    def attach_volume(self, server_id, **kwargs):
         """Attaches a volume to a server instance."""
-        post_body = json.dumps({
-            'volumeAttachment': {
-                'volumeId': volume_id,
-                'device': device,
-            }
-        })
+        post_body = json.dumps({'volumeAttachment': kwargs})
         resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
                                post_body)
         body = json.loads(body)
@@ -386,16 +373,10 @@
         """Removes a security group from the server."""
         return self.action(server_id, 'removeSecurityGroup', None, name=name)
 
-    def live_migrate_server(self, server_id, dest_host, use_block_migration):
+    def live_migrate_server(self, server_id, **kwargs):
         """This should be called with administrator privileges ."""
 
-        migrate_params = {
-            "disk_over_commit": False,
-            "block_migration": use_block_migration,
-            "host": dest_host
-        }
-
-        req_body = json.dumps({'os-migrateLive': migrate_params})
+        req_body = json.dumps({'os-migrateLive': kwargs})
 
         resp, body = self.post("servers/%s/action" % server_id, req_body)
         self.validate_response(schema.server_actions_common_schema,
diff --git a/tempest/services/data_processing/v1_1/data_processing_client.py b/tempest/services/data_processing/v1_1/data_processing_client.py
index bbc0f2a..cba4c42 100644
--- a/tempest/services/data_processing/v1_1/data_processing_client.py
+++ b/tempest/services/data_processing/v1_1/data_processing_client.py
@@ -39,8 +39,8 @@
         self.expected_success(resp_status, resp.status)
         return resp, body
 
-    def _request_check_and_parse_resp(self, request_func, uri, resp_status,
-                                      resource_name, *args, **kwargs):
+    def _request_check_and_parse_resp(self, request_func, uri,
+                                      resp_status, *args, **kwargs):
         """Make a request using specified request_func, check response status
         code and parse response body.
 
@@ -50,36 +50,19 @@
         resp, body = request_func(uri, headers=headers, *args, **kwargs)
         self.expected_success(resp_status, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body[resource_name])
-
-    def _request_check_and_parse_resp_list(self, request_func, uri,
-                                           resp_status, resource_name,
-                                           *args, **kwargs):
-        """Make a request using specified request_func, check response status
-        code and parse response body.
-
-        It returns a ResponseBodyList.
-        """
-        headers = {'Content-Type': 'application/json'}
-        resp, body = request_func(uri, headers=headers, *args, **kwargs)
-        self.expected_success(resp_status, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body[resource_name])
+        return service_client.ResponseBody(resp, body)
 
     def list_node_group_templates(self):
         """List all node group templates for a user."""
 
         uri = 'node-group-templates'
-        return self._request_check_and_parse_resp_list(self.get, uri,
-                                                       200,
-                                                       'node_group_templates')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_node_group_template(self, tmpl_id):
         """Returns the details of a single node group template."""
 
         uri = 'node-group-templates/%s' % tmpl_id
-        return self._request_check_and_parse_resp(self.get, uri,
-                                                  200, 'node_group_template')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_node_group_template(self, name, plugin_name, hadoop_version,
                                    node_processes, flavor_id,
@@ -100,7 +83,6 @@
             'node_configs': node_configs or dict(),
         })
         return self._request_check_and_parse_resp(self.post, uri, 202,
-                                                  'node_group_template',
                                                   body=json.dumps(body))
 
     def delete_node_group_template(self, tmpl_id):
@@ -113,8 +95,7 @@
         """List all enabled plugins."""
 
         uri = 'plugins'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200, 'plugins')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_plugin(self, plugin_name, plugin_version=None):
         """Returns the details of a single plugin."""
@@ -122,22 +103,19 @@
         uri = 'plugins/%s' % plugin_name
         if plugin_version:
             uri += '/%s' % plugin_version
-        return self._request_check_and_parse_resp(self.get, uri, 200, 'plugin')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def list_cluster_templates(self):
         """List all cluster templates for a user."""
 
         uri = 'cluster-templates'
-        return self._request_check_and_parse_resp_list(self.get, uri,
-                                                       200,
-                                                       'cluster_templates')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_cluster_template(self, tmpl_id):
         """Returns the details of a single cluster template."""
 
         uri = 'cluster-templates/%s' % tmpl_id
-        return self._request_check_and_parse_resp(self.get,
-                                                  uri, 200, 'cluster_template')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_cluster_template(self, name, plugin_name, hadoop_version,
                                 node_groups, cluster_configs=None,
@@ -157,7 +135,6 @@
             'cluster_configs': cluster_configs or dict(),
         })
         return self._request_check_and_parse_resp(self.post, uri, 202,
-                                                  'cluster_template',
                                                   body=json.dumps(body))
 
     def delete_cluster_template(self, tmpl_id):
@@ -170,16 +147,13 @@
         """List all data sources for a user."""
 
         uri = 'data-sources'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200,
-                                                       'data_sources')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_data_source(self, source_id):
         """Returns the details of a single data source."""
 
         uri = 'data-sources/%s' % source_id
-        return self._request_check_and_parse_resp(self.get,
-                                                  uri, 200, 'data_source')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_data_source(self, name, data_source_type, url, **kwargs):
         """Creates data source with specified params.
@@ -195,8 +169,7 @@
             'url': url
         })
         return self._request_check_and_parse_resp(self.post, uri,
-                                                  202, 'data_source',
-                                                  body=json.dumps(body))
+                                                  202, body=json.dumps(body))
 
     def delete_data_source(self, source_id):
         """Deletes the specified data source by id."""
@@ -208,22 +181,19 @@
         """List all job binary internals for a user."""
 
         uri = 'job-binary-internals'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200, 'binaries')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_job_binary_internal(self, job_binary_id):
         """Returns the details of a single job binary internal."""
 
         uri = 'job-binary-internals/%s' % job_binary_id
-        return self._request_check_and_parse_resp(self.get, uri,
-                                                  200, 'job_binary_internal')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_job_binary_internal(self, name, data):
         """Creates job binary internal with specified params."""
 
         uri = 'job-binary-internals/%s' % name
-        return self._request_check_and_parse_resp(self.put, uri, 202,
-                                                  'job_binary_internal', data)
+        return self._request_check_and_parse_resp(self.put, uri, 202, data)
 
     def delete_job_binary_internal(self, job_binary_id):
         """Deletes the specified job binary internal by id."""
@@ -241,15 +211,13 @@
         """List all job binaries for a user."""
 
         uri = 'job-binaries'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200, 'binaries')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_job_binary(self, job_binary_id):
         """Returns the details of a single job binary."""
 
         uri = 'job-binaries/%s' % job_binary_id
-        return self._request_check_and_parse_resp(self.get,
-                                                  uri, 200, 'job_binary')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_job_binary(self, name, url, extra=None, **kwargs):
         """Creates job binary with specified params.
@@ -265,8 +233,7 @@
             'extra': extra or dict(),
         })
         return self._request_check_and_parse_resp(self.post, uri,
-                                                  202, 'job_binary',
-                                                  body=json.dumps(body))
+                                                  202, body=json.dumps(body))
 
     def delete_job_binary(self, job_binary_id):
         """Deletes the specified job binary by id."""
@@ -284,14 +251,13 @@
         """List all jobs for a user."""
 
         uri = 'jobs'
-        return self._request_check_and_parse_resp_list(self.get,
-                                                       uri, 200, 'jobs')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def get_job(self, job_id):
         """Returns the details of a single job."""
 
         uri = 'jobs/%s' % job_id
-        return self._request_check_and_parse_resp(self.get, uri, 200, 'job')
+        return self._request_check_and_parse_resp(self.get, uri, 200)
 
     def create_job(self, name, job_type, mains, libs=None, **kwargs):
         """Creates job with specified params.
@@ -307,8 +273,8 @@
             'mains': mains,
             'libs': libs or list(),
         })
-        return self._request_check_and_parse_resp(self.post, uri, 202,
-                                                  'job', body=json.dumps(body))
+        return self._request_check_and_parse_resp(self.post, uri,
+                                                  202, body=json.dumps(body))
 
     def delete_job(self, job_id):
         """Deletes the specified job by id."""
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index a07612a..d97da36 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -130,7 +130,7 @@
         self._error_checker('POST', '/v1/images', headers, data, resp,
                             body_iter)
         body = json.loads(''.join([c for c in body_iter]))
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     def _update_with_data(self, image_id, headers, data):
         url = '/v1/images/%s' % image_id
@@ -139,7 +139,7 @@
         self._error_checker('PUT', url, headers, data,
                             resp, body_iter)
         body = json.loads(''.join([c for c in body_iter]))
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     @property
     def http(self):
@@ -169,7 +169,7 @@
         resp, body = self.post('v1/images', None, headers)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     def update_image(self, image_id, name=None, container_format=None,
                      data=None, properties=None):
@@ -193,7 +193,7 @@
         resp, body = self.put(url, data, headers)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBody(resp, body['image'])
+        return service_client.ResponseBody(resp, body)
 
     def delete_image(self, image_id):
         url = 'v1/images/%s' % image_id
@@ -223,7 +223,7 @@
         resp, body = self.get(url)
         self.expected_success(200, resp.status)
         body = json.loads(body)
-        return service_client.ResponseBodyList(resp, body['images'])
+        return service_client.ResponseBody(resp, body)
 
     def get_image_meta(self, image_id):
         url = 'v1/images/%s' % image_id
diff --git a/tempest/services/volume/json/extensions_client.py b/tempest/services/volume/json/extensions_client.py
index 1098e1e..5744d4a 100644
--- a/tempest/services/volume/json/extensions_client.py
+++ b/tempest/services/volume/json/extensions_client.py
@@ -25,7 +25,7 @@
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
-        return service_client.ResponseBodyList(resp, body['extensions'])
+        return service_client.ResponseBody(resp, body)
 
 
 class ExtensionsClient(BaseExtensionsClient):
diff --git a/tempest/stress/actions/volume_attach_delete.py b/tempest/stress/actions/volume_attach_delete.py
index d6965c7..99e9eb6 100644
--- a/tempest/stress/actions/volume_attach_delete.py
+++ b/tempest/stress/actions/volume_attach_delete.py
@@ -49,8 +49,8 @@
         self.logger.info("attach volume (%s) to vm %s" %
                          (volume['id'], server_id))
         self.manager.servers_client.attach_volume(server_id,
-                                                  volume['id'],
-                                                  '/dev/vdc')
+                                                  volumeId=volume['id'],
+                                                  device='/dev/vdc')
         self.manager.volumes_client.wait_for_volume_status(volume['id'],
                                                            'in-use')
         self.logger.info("volume (%s) attached to vm %s" %
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
index c89985c..e30ca0c 100644
--- a/tempest/stress/actions/volume_attach_verify.py
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -191,8 +191,8 @@
         self.logger.info("attach volume (%s) to vm %s" %
                          (self.volume['id'], self.server_id))
         servers_client.attach_volume(self.server_id,
-                                     self.volume['id'],
-                                     self.part_name)
+                                     volumeId=self.volume['id'],
+                                     device=self.part_name)
         self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
                                                            'in-use')
         if self.enable_ssh_verify:
diff --git a/tempest/tests/services/compute/test_quota_classes_client.py b/tempest/tests/services/compute/test_quota_classes_client.py
new file mode 100644
index 0000000..ff9b310
--- /dev/null
+++ b/tempest/tests/services/compute/test_quota_classes_client.py
@@ -0,0 +1,81 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import httplib2
+
+from oslo_serialization import jsonutils as json
+from oslotest import mockpatch
+
+from tempest.services.compute.json import quota_classes_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestQuotaClassesClient(base.TestCase):
+
+    FAKE_QUOTA_CLASS_SET = {
+        "injected_file_content_bytes": 10240,
+        "metadata_items": 128,
+        "server_group_members": 10,
+        "server_groups": 10,
+        "ram": 51200,
+        "floating_ips": 10,
+        "key_pairs": 100,
+        "id": u'\u2740(*\xb4\u25e1`*)\u2740',
+        "instances": 10,
+        "security_group_rules": 20,
+        "security_groups": 10,
+        "injected_files": 5,
+        "cores": 20,
+        "fixed_ips": -1,
+        "injected_file_path_bytes": 255,
+        }
+
+    def setUp(self):
+        super(TestQuotaClassesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = quota_classes_client.QuotaClassesClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_show_quota_class_set(self, bytes_body=False):
+        serialized_body = json.dumps({
+            "quota_class_set": self.FAKE_QUOTA_CLASS_SET})
+        if bytes_body:
+            serialized_body = serialized_body.encode('utf-8')
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=mocked_resp))
+        resp = self.client.show_quota_class_set("test")
+        self.assertEqual(self.FAKE_QUOTA_CLASS_SET, resp)
+
+    def test_show_quota_class_set_with_str_body(self):
+        self._test_show_quota_class_set()
+
+    def test_show_quota_class_set_with_bytes_body(self):
+        self._test_show_quota_class_set(bytes_body=True)
+
+    def test_update_quota_class_set(self):
+        fake_quota_class_set = copy.deepcopy(self.FAKE_QUOTA_CLASS_SET)
+        fake_quota_class_set.pop("id")
+        serialized_body = json.dumps({"quota_class_set": fake_quota_class_set})
+
+        mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.put',
+            return_value=mocked_resp))
+        resp = self.client.update_quota_class_set("test")
+        self.assertEqual(fake_quota_class_set, resp)