Merge "Refactor rest-client and identity v2-client"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index fb11d96..fe4959b 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -398,8 +398,7 @@
 #uri_v3=<None>
 
 # Identity API version to be used for authentication for API
-# tests. Planned to extend to tenant isolation, scenario tests
-# and CLI tests. (string value)
+# tests. (string value)
 #auth_version=v2
 
 # The identity region name to use. Also used as the other
@@ -753,6 +752,11 @@
 # value)
 #default_thread_number_per_action=4
 
+# Prevent the cleaning (tearDownClass()) between each stress
+# test run if an exception occurs during this run. (boolean
+# value)
+#leave_dirty_stack=false
+
 
 [telemetry]
 
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index da4ccbe..fd069e7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -161,6 +161,18 @@
                 return
             time.sleep(self.build_interval)
 
+    @staticmethod
+    def _delete_volume(volumes_client, volume_id):
+        """Deletes the given volume and waits for it to be gone."""
+        try:
+            resp, _ = volumes_client.delete_volume(volume_id)
+            # TODO(mriedem): We should move the wait_for_resource_deletion
+            # into the delete_volume method as a convenience to the caller.
+            volumes_client.wait_for_resource_deletion(volume_id)
+        except exceptions.NotFound:
+            LOG.warn("Unable to delete volume '%s' since it was not found. "
+                     "Maybe it was already deleted?" % volume_id)
+
 
 class BaseV2ComputeTest(BaseComputeTest):
 
@@ -231,14 +243,7 @@
     @classmethod
     def delete_volume(cls, volume_id):
         """Deletes the given volume and waits for it to be gone."""
-        try:
-            resp, _ = cls.volumes_extensions_client.delete_volume(volume_id)
-            # TODO(mriedem): We should move the wait_for_resource_deletion
-            # into the delete_volume method as a convenience to the caller.
-            cls.volumes_extensions_client.wait_for_resource_deletion(volume_id)
-        except exceptions.NotFound:
-            LOG.warn("Unable to delete volume '%s' since it was not found. "
-                     "Maybe it was already deleted?" % volume_id)
+        cls._delete_volume(cls.volumes_extensions_client, volume_id)
 
 
 class BaseV2ComputeAdminTest(BaseV2ComputeTest):
@@ -336,6 +341,11 @@
         cls.password = server['admin_password']
         return server['id']
 
+    @classmethod
+    def delete_volume(cls, volume_id):
+        """Deletes the given volume and waits for it to be gone."""
+        cls._delete_volume(cls.volumes_client, volume_id)
+
 
 class BaseV3ComputeAdminTest(BaseV3ComputeTest):
     """Base test case class for all Compute Admin API V3 tests."""
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 20c5d7f..0bf604c 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -24,6 +24,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.set_network_resources(network=True, subnet=True, router=True)
         super(ServerRescueTestJSON, cls).setUpClass()
         cls.device = 'vdf'
 
@@ -41,20 +42,10 @@
         cls.sg_id = cls.sg['id']
 
         # Create a volume and wait for it to become ready for attach
-        resp, cls.volume_to_attach = \
-            cls.volumes_extensions_client.create_volume(1,
-                                                        display_name=
-                                                        'test_attach')
+        resp, cls.volume = cls.volumes_extensions_client.create_volume(
+            1, display_name=data_utils.rand_name(cls.__name__ + '_volume'))
         cls.volumes_extensions_client.wait_for_volume_status(
-            cls.volume_to_attach['id'], 'available')
-
-        # Create a volume and wait for it to become ready for attach
-        resp, cls.volume_to_detach = \
-            cls.volumes_extensions_client.create_volume(1,
-                                                        display_name=
-                                                        'test_detach')
-        cls.volumes_extensions_client.wait_for_volume_status(
-            cls.volume_to_detach['id'], 'available')
+            cls.volume['id'], 'available')
 
         # Server for positive tests
         resp, server = cls.create_test_server(wait_until='BUILD')
@@ -78,9 +69,7 @@
     def tearDownClass(cls):
         # Deleting the floating IP which is created in this method
         cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
-        client = cls.volumes_extensions_client
-        client.delete_volume(str(cls.volume_to_attach['id']).strip())
-        client.delete_volume(str(cls.volume_to_detach['id']).strip())
+        cls.delete_volume(cls.volume['id'])
         resp, cls.sg = cls.security_groups_client.delete_security_group(
             cls.sg_id)
         super(ServerRescueTestJSON, cls).tearDownClass()
@@ -93,9 +82,6 @@
         self.volumes_extensions_client.wait_for_volume_status(volume_id,
                                                               'available')
 
-    def _delete(self, volume_id):
-        self.volumes_extensions_client.delete_volume(volume_id)
-
     def _unrescue(self, server_id):
         resp, body = self.servers_client.unrescue_server(server_id)
         self.assertEqual(202, resp.status)
@@ -159,32 +145,31 @@
         self.assertRaises(exceptions.Conflict,
                           self.servers_client.attach_volume,
                           self.server_id,
-                          self.volume_to_attach['id'],
+                          self.volume['id'],
                           device='/dev/%s' % self.device)
 
     @attr(type=['negative', 'gate'])
     def test_rescued_vm_detach_volume(self):
         # Attach the volume to the server
         self.servers_client.attach_volume(self.server_id,
-                                          self.volume_to_detach['id'],
+                                          self.volume['id'],
                                           device='/dev/%s' % self.device)
         self.volumes_extensions_client.wait_for_volume_status(
-            self.volume_to_detach['id'], 'in-use')
+            self.volume['id'], 'in-use')
 
         # Rescue the server
         self.servers_client.rescue_server(self.server_id,
                                           adminPass=self.password)
         self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
         # addCleanup is a LIFO queue
-        self.addCleanup(self._detach, self.server_id,
-                        self.volume_to_detach['id'])
+        self.addCleanup(self._detach, self.server_id, self.volume['id'])
         self.addCleanup(self._unrescue, self.server_id)
 
         # Detach the volume from the server expecting failure
         self.assertRaises(exceptions.Conflict,
                           self.servers_client.detach_volume,
                           self.server_id,
-                          self.volume_to_detach['id'])
+                          self.volume['id'])
 
     @attr(type='gate')
     def test_rescued_vm_associate_dissociate_floating_ip(self):
diff --git a/tempest/api/compute/v3/servers/test_server_metadata_negative.py b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
index 2c413db..ce6c340 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
@@ -34,6 +34,7 @@
 
         cls.server_id = server['id']
 
+    @test.skip_because(bug="1273948")
     @test.attr(type=['gate', 'negative'])
     def test_server_create_metadata_key_too_long(self):
         # Attempt to start a server with a meta-data key that is > 255
@@ -43,7 +44,7 @@
         for sz in [256, 257, 511, 1023]:
             key = "k" * sz
             meta = {key: 'data1'}
-            self.assertRaises(exceptions.OverLimit,
+            self.assertRaises(exceptions.BadRequest,
                               self.create_test_server,
                               meta=meta)
 
diff --git a/tempest/api/compute/v3/servers/test_server_rescue.py b/tempest/api/compute/v3/servers/test_server_rescue.py
index f8be1c1..fa7def0 100644
--- a/tempest/api/compute/v3/servers/test_server_rescue.py
+++ b/tempest/api/compute/v3/servers/test_server_rescue.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest.common.utils import data_utils
 from tempest import exceptions
 from tempest.test import attr
 
@@ -27,20 +28,10 @@
         cls.device = 'vdf'
 
         # Create a volume and wait for it to become ready for attach
-        resp, cls.volume_to_attach = \
-            cls.volumes_client.create_volume(1,
-                                             display_name=
-                                             'test_attach')
+        resp, cls.volume = cls.volumes_client.create_volume(
+            1, display_name=data_utils.rand_name(cls.__name__ + '_volume'))
         cls.volumes_client.wait_for_volume_status(
-            cls.volume_to_attach['id'], 'available')
-
-        # Create a volume and wait for it to become ready for attach
-        resp, cls.volume_to_detach = \
-            cls.volumes_client.create_volume(1,
-                                             display_name=
-                                             'test_detach')
-        cls.volumes_client.wait_for_volume_status(
-            cls.volume_to_detach['id'], 'available')
+            cls.volume['id'], 'available')
 
         # Server for positive tests
         resp, server = cls.create_test_server(wait_until='BUILD')
@@ -62,9 +53,7 @@
 
     @classmethod
     def tearDownClass(cls):
-        client = cls.volumes_client
-        client.delete_volume(str(cls.volume_to_attach['id']).strip())
-        client.delete_volume(str(cls.volume_to_detach['id']).strip())
+        cls.delete_volume(cls.volume['id'])
         super(ServerRescueV3Test, cls).tearDownClass()
 
     def tearDown(self):
@@ -75,9 +64,6 @@
         self.volumes_client.wait_for_volume_status(volume_id,
                                                    'available')
 
-    def _delete(self, volume_id):
-        self.volumes_client.delete_volume(volume_id)
-
     def _unrescue(self, server_id):
         resp, body = self.servers_client.unrescue_server(server_id)
         self.assertEqual(202, resp.status)
@@ -141,29 +127,27 @@
         self.assertRaises(exceptions.Conflict,
                           self.servers_client.attach_volume,
                           self.server_id,
-                          self.volume_to_attach['id'],
+                          self.volume['id'],
                           device='/dev/%s' % self.device)
 
     @attr(type=['negative', 'gate'])
     def test_rescued_vm_detach_volume(self):
         # Attach the volume to the server
         self.servers_client.attach_volume(self.server_id,
-                                          self.volume_to_detach['id'],
+                                          self.volume['id'],
                                           device='/dev/%s' % self.device)
-        self.volumes_client.wait_for_volume_status(
-            self.volume_to_detach['id'], 'in-use')
+        self.volumes_client.wait_for_volume_status(self.volume['id'], 'in-use')
 
         # Rescue the server
         self.servers_client.rescue_server(self.server_id,
                                           admin_password=self.password)
         self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
         # addCleanup is a LIFO queue
-        self.addCleanup(self._detach, self.server_id,
-                        self.volume_to_detach['id'])
+        self.addCleanup(self._detach, self.server_id, self.volume['id'])
         self.addCleanup(self._unrescue, self.server_id)
 
         # Detach the volume from the server expecting failure
         self.assertRaises(exceptions.Conflict,
                           self.servers_client.detach_volume,
                           self.server_id,
-                          self.volume_to_detach['id'])
+                          self.volume['id'])
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index c153699..12e0ad8 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -191,12 +191,13 @@
                           self.create_test_server,
                           key_name=key_name)
 
+    @test.skip_because(bug="1273948")
     @test.attr(type=['negative', 'gate'])
     def test_create_server_metadata_exceeds_length_limit(self):
         # Pass really long metadata while creating a server
 
         metadata = {'a': 'b' * 260}
-        self.assertRaises(exceptions.OverLimit,
+        self.assertRaises(exceptions.BadRequest,
                           self.create_test_server,
                           meta=metadata)
 
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
new file mode 100644
index 0000000..3b941d8
--- /dev/null
+++ b/tempest/api/data_processing/test_plugins.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.test import attr
+
+
+class PluginsTest(dp_base.BaseDataProcessingTest):
+    def _list_all_plugin_names(self):
+        """Returns all enabled plugin names.
+
+        It ensures response status and main plugins availability.
+        """
+        resp, plugins = self.client.list_plugins()
+
+        self.assertEqual(200, resp.status)
+
+        plugins_names = list([plugin['name'] for plugin in plugins])
+        self.assertIn('vanilla', plugins_names)
+        self.assertIn('hdp', plugins_names)
+
+        return plugins_names
+
+    @attr(type='smoke')
+    def test_plugin_list(self):
+        self._list_all_plugin_names()
+
+    @attr(type='smoke')
+    def test_plugin_get(self):
+        for plugin_name in self._list_all_plugin_names():
+            resp, plugin = self.client.get_plugin(plugin_name)
+
+            self.assertEqual(200, resp.status)
+            self.assertEqual(plugin_name, plugin['name'])
+
+            for plugin_version in plugin['versions']:
+                resp, detailed_plugin = self.client.get_plugin(plugin_name,
+                                                               plugin_version)
+
+                self.assertEqual(200, resp.status)
+                self.assertEqual(plugin_name, detailed_plugin['name'])
+
+                # check that required image tags contains name and version
+                image_tags = detailed_plugin['required_image_tags']
+                self.assertIn(plugin_name, image_tags)
+                self.assertIn(plugin_version, image_tags)
diff --git a/tempest/api/identity/admin/test_users_negative.py b/tempest/api/identity/admin/test_users_negative.py
index e9e7818..060f24a 100644
--- a/tempest/api/identity/admin/test_users_negative.py
+++ b/tempest/api/identity/admin/test_users_negative.py
@@ -207,7 +207,7 @@
     @attr(type=['negative', 'gate'])
     def test_get_users_request_without_token(self):
         # Request to get list of users without a valid token should fail
-        token = self.client.auth_provider.auth_data[0]
+        token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
         self.assertRaises(exceptions.Unauthorized, self.client.get_users)
         self.client.auth_provider.clear_auth()
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 37b848c..e439238 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -106,7 +106,7 @@
             cls.os_alt = clients.AltManager()
             identity_client = cls._get_identity_admin_client()
             cls.alt_tenant_id = identity_client.get_tenant_by_name(
-                cls.os_alt.tenant_name)['id']
+                cls.os_alt.credentials['tenant_name'])['id']
 
         cls.alt_img_cli = cls.os_alt.image_client
 
@@ -147,7 +147,7 @@
             cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
         else:
             cls.os_alt = clients.AltManager()
-            alt_tenant_name = cls.os_alt.tenant_name
+            alt_tenant_name = cls.os_alt.credentials['tenant_name']
             identity_client = cls._get_identity_admin_client()
             cls.alt_tenant_id = identity_client.get_tenant_by_name(
                 alt_tenant_name)['id']
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 8c62c05..d8b79ca 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -71,7 +71,7 @@
         resp, body = self.create_image(name='New Http Image',
                                        container_format='bare',
                                        disk_format='raw', is_public=True,
-                                       copy_from=CONF.images.http_image)
+                                       copy_from=CONF.image.http_image)
         self.assertIn('id', body)
         image_id = body.get('id')
         self.assertEqual('New Http Image', body.get('name'))
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index bfb7b48..7c02787 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -17,7 +17,7 @@
 from tempest import test
 
 
-class L3AgentSchedulerJSON(base.BaseAdminNetworkTest):
+class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest):
     _interface = 'json'
 
     """
@@ -33,7 +33,7 @@
 
     @classmethod
     def setUpClass(cls):
-        super(L3AgentSchedulerJSON, cls).setUpClass()
+        super(L3AgentSchedulerTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('l3_agent_scheduler', 'network'):
             msg = "L3 Agent Scheduler Extension not enabled."
             raise cls.skipException(msg)
@@ -61,5 +61,5 @@
         self.assertEqual(204, resp.status)
 
 
-class L3AgentSchedulerXML(L3AgentSchedulerJSON):
+class L3AgentSchedulerTestXML(L3AgentSchedulerTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 1c2c4b0..b129786 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -49,6 +49,8 @@
         neutron as True
     """
 
+    force_tenant_isolation = False
+
     @classmethod
     def setUpClass(cls):
         # Create no network resources for these test.
@@ -57,6 +59,10 @@
         os = clients.Manager(interface=cls._interface)
         if not CONF.service_available.neutron:
             raise cls.skipException("Neutron support is required")
+
+        os = cls.get_client_manager()
+
+        cls.network_cfg = CONF.network
         cls.client = os.network_client
         cls.networks = []
         cls.subnets = []
@@ -110,6 +116,7 @@
         # Clean up networks
         for network in cls.networks:
             cls.client.delete_network(network['id'])
+        cls.clear_isolated_creds()
         super(BaseNetworkTest, cls).tearDownClass()
 
     @classmethod
@@ -269,5 +276,14 @@
             msg = ("Missing Administrative Network API credentials "
                    "in configuration.")
             raise cls.skipException(msg)
-        cls.admin_manager = clients.AdminManager(interface=cls._interface)
-        cls.admin_client = cls.admin_manager.network_client
+        if (CONF.compute.allow_tenant_isolation or
+            cls.force_tenant_isolation is True):
+            creds = cls.isolated_creds.get_admin_creds()
+            admin_username, admin_tenant_name, admin_password = creds
+            cls.os_adm = clients.Manager(username=admin_username,
+                                         password=admin_password,
+                                         tenant_name=admin_tenant_name,
+                                         interface=cls._interface)
+        else:
+            cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+        cls.admin_client = cls.os_adm.network_client
diff --git a/tempest/api/network/common.py b/tempest/api/network/common.py
index 0ce1769..d68ff1a 100644
--- a/tempest/api/network/common.py
+++ b/tempest/api/network/common.py
@@ -126,3 +126,21 @@
 
     def delete(self):
         self.client.delete_security_group_rule(self.id)
+
+
+class DeletablePool(DeletableResource):
+
+    def delete(self):
+        self.client.delete_pool(self.id)
+
+
+class DeletableMember(DeletableResource):
+
+    def delete(self):
+        self.client.delete_member(self.id)
+
+
+class DeletableVip(DeletableResource):
+
+    def delete(self):
+        self.client.delete_vip(self.id)
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
index 65eebf2..d5f2b5b 100644
--- a/tempest/api/network/test_load_balancer.py
+++ b/tempest/api/network/test_load_balancer.py
@@ -18,7 +18,7 @@
 from tempest import test
 
 
-class LoadBalancerJSON(base.BaseNetworkTest):
+class LoadBalancerTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
     """
@@ -39,7 +39,7 @@
 
     @classmethod
     def setUpClass(cls):
-        super(LoadBalancerJSON, cls).setUpClass()
+        super(LoadBalancerTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('lbaas', 'network'):
             msg = "lbaas extension not enabled."
             raise cls.skipException(msg)
@@ -210,5 +210,5 @@
         self.assertEqual('204', resp['status'])
 
 
-class LoadBalancerXML(LoadBalancerJSON):
+class LoadBalancerTestXML(LoadBalancerTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 3aa765c..aee2a44 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -239,7 +239,7 @@
     _interface = 'xml'
 
 
-class BulkNetworkOpsJSON(base.BaseNetworkTest):
+class BulkNetworkOpsTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
     """
@@ -263,7 +263,7 @@
 
     @classmethod
     def setUpClass(cls):
-        super(BulkNetworkOpsJSON, cls).setUpClass()
+        super(BulkNetworkOpsTestJSON, cls).setUpClass()
         cls.network1 = cls.create_network()
         cls.network2 = cls.create_network()
 
@@ -390,5 +390,5 @@
             self.assertIn(n['id'], ports_list)
 
 
-class BulkNetworkOpsXML(BulkNetworkOpsJSON):
+class BulkNetworkOpsTestXML(BulkNetworkOpsTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 64b8a41..78bc80a 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -38,10 +38,10 @@
 
     @classmethod
     def setUpClass(cls):
-        super(VPNaaSJSON, cls).setUpClass()
         if not test.is_extension_enabled('vpnaas', 'network'):
             msg = "vpnaas extension not enabled."
             raise cls.skipException(msg)
+        super(VPNaaSJSON, cls).setUpClass()
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
         cls.router = cls.create_router(
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index cb1a6cb..b0c878b 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -14,8 +14,6 @@
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest.openstack.common import log as logging
-from tempest.services.volume.json.admin import volume_types_client
-from tempest.services.volume.json import volumes_client
 from tempest.test import attr
 
 CONF = config.CONF
@@ -36,20 +34,7 @@
         cls.backend1_name = CONF.volume.backend1_name
         cls.backend2_name = CONF.volume.backend2_name
 
-        adm_user = CONF.identity.admin_username
-        adm_pass = CONF.identity.admin_password
-        adm_tenant = CONF.identity.admin_tenant_name
-        auth_url = CONF.identity.uri
-
-        cls.volume_client = volumes_client.VolumesClientJSON(adm_user,
-                                                             adm_pass,
-                                                             auth_url,
-                                                             adm_tenant)
-        cls.type_client = volume_types_client.VolumeTypesClientJSON(adm_user,
-                                                                    adm_pass,
-                                                                    auth_url,
-                                                                    adm_tenant)
-
+        cls.volume_client = cls.os_adm.volumes_client
         cls.volume_type_id_list = []
         cls.volume_id_list = []
         try:
@@ -57,7 +42,7 @@
             type1_name = data_utils.rand_name('Type-')
             vol1_name = data_utils.rand_name('Volume-')
             extra_specs1 = {"volume_backend_name": cls.backend1_name}
-            resp, cls.type1 = cls.type_client.create_volume_type(
+            resp, cls.type1 = cls.client.create_volume_type(
                 type1_name, extra_specs=extra_specs1)
             cls.volume_type_id_list.append(cls.type1['id'])
 
@@ -72,7 +57,7 @@
                 type2_name = data_utils.rand_name('Type-')
                 vol2_name = data_utils.rand_name('Volume-')
                 extra_specs2 = {"volume_backend_name": cls.backend2_name}
-                resp, cls.type2 = cls.type_client.create_volume_type(
+                resp, cls.type2 = cls.client.create_volume_type(
                     type2_name, extra_specs=extra_specs2)
                 cls.volume_type_id_list.append(cls.type2['id'])
 
@@ -97,7 +82,7 @@
         # volume types deletion
         volume_type_id_list = getattr(cls, 'volume_type_id_list', [])
         for volume_type_id in volume_type_id_list:
-            cls.type_client.delete_volume_type(volume_type_id)
+            cls.client.delete_volume_type(volume_type_id)
 
         super(VolumeMultiBackendTest, cls).tearDownClass()
 
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index fc4f07d..cf4e052 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from testtools import matchers
+
 from tempest.api.volume import base
 from tempest import clients
 from tempest import config
@@ -87,7 +89,7 @@
         # or equal to 1
         resp, body = self.client.list_volume_transfers()
         self.assertEqual(200, resp.status)
-        self.assertGreaterEqual(len(body), 1)
+        self.assertThat(len(body), matchers.GreaterThan(0))
 
         # Accept a volume transfer by alt_tenant
         resp, body = self.alt_client.accept_volume_transfer(transfer_id,
@@ -107,10 +109,14 @@
         self.client.wait_for_volume_status(volume['id'],
                                            'awaiting-transfer')
 
-        # List all volume transfers, there's only one in this test
+        # List all volume transfers (looking for the one we created)
         resp, body = self.client.list_volume_transfers()
         self.assertEqual(200, resp.status)
-        self.assertEqual(volume['id'], body[0]['volume_id'])
+        for transfer in body:
+            if volume['id'] == transfer['volume_id']:
+                break
+        else:
+            self.fail('Transfer not found for volume %s' % volume['id'])
 
         # Delete a volume transfer
         resp, body = self.client.delete_volume_transfer(transfer_id)
diff --git a/tempest/api/volume/v2/__init__.py b/tempest/api/volume/v2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/api/volume/v2/__init__.py
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
new file mode 100644
index 0000000..049544d
--- /dev/null
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -0,0 +1,228 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import operator
+
+from tempest.api.volume import base
+from tempest.common.utils import data_utils
+from tempest.openstack.common import log as logging
+from tempest.test import attr
+from testtools.matchers import ContainsAll
+
+LOG = logging.getLogger(__name__)
+
+VOLUME_FIELDS = ('id', 'display_name')
+
+
+class VolumesListTest(base.BaseVolumeV1Test):
+
+    """
+    This test creates a number of 1G volumes. To run successfully,
+    ensure that the backing file for the volume group that Nova uses
+    has space for at least 3 1G volumes!
+    If you are running a Devstack environment, ensure that the
+    VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
+    """
+
+    _interface = 'json'
+
+    def assertVolumesIn(self, fetched_list, expected_list, fields=None):
+        if fields:
+            expected_list = map(operator.itemgetter(*fields), expected_list)
+            fetched_list = map(operator.itemgetter(*fields), fetched_list)
+
+        missing_vols = [v for v in expected_list if v not in fetched_list]
+        if len(missing_vols) == 0:
+            return
+
+        def str_vol(vol):
+            return "%s:%s" % (vol['id'], vol['display_name'])
+
+        raw_msg = "Could not find volumes %s in expected list %s; fetched %s"
+        self.fail(raw_msg % ([str_vol(v) for v in missing_vols],
+                             [str_vol(v) for v in expected_list],
+                             [str_vol(v) for v in fetched_list]))
+
+    @classmethod
+    def setUpClass(cls):
+        super(VolumesListTest, cls).setUpClass()
+        cls.client = cls.volumes_client
+
+        # Create 3 test volumes
+        cls.volume_list = []
+        cls.volume_id_list = []
+        cls.metadata = {'Type': 'work'}
+        for i in range(3):
+            try:
+                volume = cls.create_volume(metadata=cls.metadata)
+
+                resp, volume = cls.client.get_volume(volume['id'])
+                cls.volume_list.append(volume)
+                cls.volume_id_list.append(volume['id'])
+            except Exception:
+                LOG.exception('Failed to create volume. %d volumes were '
+                              'created' % len(cls.volume_id_list))
+                if cls.volume_list:
+                    # We could not create all the volumes, though we were able
+                    # to create *some* of the volumes. This is typically
+                    # because the backing file size of the volume group is
+                    # too small.
+                    for volid in cls.volume_id_list:
+                        cls.client.delete_volume(volid)
+                        cls.client.wait_for_resource_deletion(volid)
+                raise
+
+    @classmethod
+    def tearDownClass(cls):
+        # Delete the created volumes
+        for volid in cls.volume_id_list:
+            resp, _ = cls.client.delete_volume(volid)
+            cls.client.wait_for_resource_deletion(volid)
+        super(VolumesListTest, cls).tearDownClass()
+
+    def _list_by_param_value_and_assert(self, params, with_detail=False):
+        """
+        Perform list or list_details action with given params
+        and validates result.
+        """
+        if with_detail:
+            resp, fetched_vol_list = \
+                self.client.list_volumes_with_detail(params=params)
+        else:
+            resp, fetched_vol_list = self.client.list_volumes(params=params)
+
+        self.assertEqual(200, resp.status)
+        # Validating params of fetched volumes
+        for volume in fetched_vol_list:
+            for key in params:
+                msg = "Failed to list volumes %s by %s" % \
+                      ('details' if with_detail else '', key)
+                if key == 'metadata':
+                    self.assertThat(volume[key].items(),
+                                    ContainsAll(params[key].items()),
+                                    msg)
+                else:
+                    self.assertEqual(params[key], volume[key], msg)
+
+    @attr(type='smoke')
+    def test_volume_list(self):
+        # Get a list of Volumes
+        # Fetch all volumes
+        resp, fetched_list = self.client.list_volumes()
+        self.assertEqual(200, resp.status)
+        self.assertVolumesIn(fetched_list, self.volume_list,
+                             fields=VOLUME_FIELDS)
+
+    @attr(type='gate')
+    def test_volume_list_with_details(self):
+        # Get a list of Volumes with details
+        # Fetch all Volumes
+        resp, fetched_list = self.client.list_volumes_with_detail()
+        self.assertEqual(200, resp.status)
+        self.assertVolumesIn(fetched_list, self.volume_list)
+
+    @attr(type='gate')
+    def test_volume_list_by_name(self):
+        volume = self.volume_list[data_utils.rand_int_id(0, 2)]
+        params = {'display_name': volume['display_name']}
+        resp, fetched_vol = self.client.list_volumes(params)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(1, len(fetched_vol), str(fetched_vol))
+        self.assertEqual(fetched_vol[0]['display_name'],
+                         volume['display_name'])
+
+    @attr(type='gate')
+    def test_volume_list_details_by_name(self):
+        volume = self.volume_list[data_utils.rand_int_id(0, 2)]
+        params = {'display_name': volume['display_name']}
+        resp, fetched_vol = self.client.list_volumes_with_detail(params)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(1, len(fetched_vol), str(fetched_vol))
+        self.assertEqual(fetched_vol[0]['display_name'],
+                         volume['display_name'])
+
+    @attr(type='gate')
+    def test_volumes_list_by_status(self):
+        params = {'status': 'available'}
+        resp, fetched_list = self.client.list_volumes(params)
+        self.assertEqual(200, resp.status)
+        for volume in fetched_list:
+            self.assertEqual('available', volume['status'])
+        self.assertVolumesIn(fetched_list, self.volume_list,
+                             fields=VOLUME_FIELDS)
+
+    @attr(type='gate')
+    def test_volumes_list_details_by_status(self):
+        params = {'status': 'available'}
+        resp, fetched_list = self.client.list_volumes_with_detail(params)
+        self.assertEqual(200, resp.status)
+        for volume in fetched_list:
+            self.assertEqual('available', volume['status'])
+        self.assertVolumesIn(fetched_list, self.volume_list)
+
+    @attr(type='gate')
+    def test_volumes_list_by_availability_zone(self):
+        volume = self.volume_list[data_utils.rand_int_id(0, 2)]
+        zone = volume['availability_zone']
+        params = {'availability_zone': zone}
+        resp, fetched_list = self.client.list_volumes(params)
+        self.assertEqual(200, resp.status)
+        for volume in fetched_list:
+            self.assertEqual(zone, volume['availability_zone'])
+        self.assertVolumesIn(fetched_list, self.volume_list,
+                             fields=VOLUME_FIELDS)
+
+    @attr(type='gate')
+    def test_volumes_list_details_by_availability_zone(self):
+        volume = self.volume_list[data_utils.rand_int_id(0, 2)]
+        zone = volume['availability_zone']
+        params = {'availability_zone': zone}
+        resp, fetched_list = self.client.list_volumes_with_detail(params)
+        self.assertEqual(200, resp.status)
+        for volume in fetched_list:
+            self.assertEqual(zone, volume['availability_zone'])
+        self.assertVolumesIn(fetched_list, self.volume_list)
+
+    @attr(type='gate')
+    def test_volume_list_with_param_metadata(self):
+        # Test to list volumes when metadata param is given
+        params = {'metadata': self.metadata}
+        self._list_by_param_value_and_assert(params)
+
+    @attr(type='gate')
+    def test_volume_list_with_detail_param_metadata(self):
+        # Test to list volumes details when metadata param is given
+        params = {'metadata': self.metadata}
+        self._list_by_param_value_and_assert(params, with_detail=True)
+
+    @attr(type='gate')
+    def test_volume_list_param_display_name_and_status(self):
+        # Test to list volume when display name and status param is given
+        volume = self.volume_list[data_utils.rand_int_id(0, 2)]
+        params = {'display_name': volume['display_name'],
+                  'status': 'available'}
+        self._list_by_param_value_and_assert(params)
+
+    @attr(type='gate')
+    def test_volume_list_with_detail_param_display_name_and_status(self):
+        # Test to list volume when name and status param is given
+        volume = self.volume_list[data_utils.rand_int_id(0, 2)]
+        params = {'display_name': volume['display_name'],
+                  'status': 'available'}
+        self._list_by_param_value_and_assert(params, with_detail=True)
+
+
+class VolumeListTestXML(VolumesListTest):
+    _interface = 'xml'
diff --git a/tempest/auth.py b/tempest/auth.py
index 8d826cf..90017c1 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -68,10 +68,10 @@
         """
         Decorate request with authentication data
         """
-        raise NotImplemented
+        raise NotImplementedError
 
     def _get_auth(self):
-        raise NotImplemented
+        raise NotImplementedError
 
     @classmethod
     def check_credentials(cls, credentials):
@@ -98,7 +98,7 @@
         self.cache = None
 
     def is_expired(self, auth_data):
-        raise NotImplemented
+        raise NotImplementedError
 
     def auth_request(self, method, url, headers=None, body=None, filters=None):
         """
@@ -176,7 +176,7 @@
         """
         Extracts the base_url based on provided filters
         """
-        raise NotImplemented
+        raise NotImplementedError
 
 
 class KeystoneAuthProvider(AuthProvider):
@@ -208,10 +208,10 @@
         return _url, _headers, body
 
     def _auth_client(self):
-        raise NotImplemented
+        raise NotImplementedError
 
     def _auth_params(self):
-        raise NotImplemented
+        raise NotImplementedError
 
     def _get_auth(self):
         # Bypasses the cache
@@ -223,7 +223,7 @@
             token, auth_data = auth_func(**auth_params)
             return token, auth_data
         else:
-            raise NotImplemented
+            raise NotImplementedError
 
     def get_token(self):
         return self.auth_data[0]
@@ -250,7 +250,7 @@
             else:
                 return xml_id.TokenClientXML()
         else:
-            raise NotImplemented
+            raise NotImplementedError
 
     def _auth_params(self):
         if self.client_type == 'tempest':
@@ -260,7 +260,7 @@
                 tenant=self.credentials.get('tenant_name', None),
                 auth_data=True)
         else:
-            raise NotImplemented
+            raise NotImplementedError
 
     def base_url(self, filters, auth_data=None):
         """
@@ -299,7 +299,7 @@
             path = "/" + filters['api_version']
             noversion_path = "/".join(parts.path.split("/")[2:])
             if noversion_path != "":
-                path += noversion_path
+                path += "/" + noversion_path
             _base_url = _base_url.replace(parts.path, path)
         if filters.get('skip_path', None) is not None:
             _base_url = _base_url.replace(parts.path, "/")
@@ -334,7 +334,7 @@
             else:
                 return xml_v3id.V3TokenClientXML()
         else:
-            raise NotImplemented
+            raise NotImplementedError
 
     def _auth_params(self):
         if self.client_type == 'tempest':
@@ -345,7 +345,7 @@
                 domain=self.credentials['domain_name'],
                 auth_data=True)
         else:
-            raise NotImplemented
+            raise NotImplementedError
 
     def base_url(self, filters, auth_data=None):
         """
diff --git a/tempest/clients.py b/tempest/clients.py
index fd46656..9c1a0f1 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -445,6 +445,6 @@
         base = super(OrchestrationManager, self)
         base.__init__(CONF.identity.admin_username,
                       CONF.identity.admin_password,
-                      CONF.identity.tenant_name,
+                      CONF.identity.admin_tenant_name,
                       interface=interface,
                       service=service)
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 146fac9..ac8b14f 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -36,7 +36,6 @@
         self.isolated_net_resources = {}
         self.ports = []
         self.name = name
-        self.config = CONF
         self.tempest_client = tempest_client
         self.interface = interface
         self.password = password
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index af12642..212d41d 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -138,6 +138,10 @@
         return self.auth_provider.base_url(filters=self.filters)
 
     @property
+    def token(self):
+        return self.auth_provider.get_token()
+
+    @property
     def filters(self):
         _filters = dict(
             service=self.service,
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index 0ed9b82..c772ce9 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -49,7 +49,7 @@
         self.channel_timeout = float(channel_timeout)
         self.buf_size = 1024
 
-    def _get_ssh_connection(self, sleep=1.5, backoff=1.01):
+    def _get_ssh_connection(self, sleep=1.5, backoff=1):
         """Returns an ssh connection to the specified host."""
         bsleep = sleep
         ssh = paramiko.SSHClient()
@@ -76,19 +76,21 @@
                          self.username, self.host)
                 return ssh
             except (socket.error,
-                    paramiko.SSHException):
-                attempts += 1
-                time.sleep(bsleep)
-                bsleep *= backoff
-                if not self._is_timed_out(_start_time):
-                    continue
-                else:
+                    paramiko.SSHException) as e:
+                if self._is_timed_out(_start_time):
                     LOG.exception("Failed to establish authenticated ssh"
                                   " connection to %s@%s after %d attempts",
                                   self.username, self.host, attempts)
                     raise exceptions.SSHTimeout(host=self.host,
                                                 user=self.username,
                                                 password=self.password)
+                bsleep += backoff
+                attempts += 1
+                LOG.warning("Failed to establish authenticated ssh"
+                            " connection to %s@%s (%s). Number attempts: %s."
+                            " Retry after %d seconds.",
+                            self.username, self.host, e, attempts, bsleep)
+                time.sleep(bsleep)
 
     def _is_timed_out(self, start_time):
         return (time.time() - self.timeout) > start_time
diff --git a/tempest/config.py b/tempest/config.py
index cb186ca..d24ab34 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -46,8 +46,7 @@
     cfg.StrOpt('auth_version',
                default='v2',
                help="Identity API version to be used for authentication "
-                    "for API tests. Planned to extend to tenant isolation, "
-                    "scenario tests and CLI tests."),
+                    "for API tests."),
     cfg.StrOpt('region',
                default='RegionOne',
                help="The identity region name to use. Also used as the other "
@@ -587,7 +586,12 @@
                help='time (in seconds) between log file error checks.'),
     cfg.IntOpt('default_thread_number_per_action',
                default=4,
-               help='The number of threads created while stress test.')
+               help='The number of threads created while stress test.'),
+    cfg.BoolOpt('leave_dirty_stack',
+                default=False,
+                help='Prevent the cleaning (tearDownClass()) between'
+                     ' each stress test run if an exception occurs'
+                     ' during this run.')
 ]
 
 
@@ -781,7 +785,7 @@
         self.compute_feature_enabled = cfg.CONF['compute-feature-enabled']
         self.identity = cfg.CONF.identity
         self.identity_feature_enabled = cfg.CONF['identity-feature-enabled']
-        self.images = cfg.CONF.image
+        self.image = cfg.CONF.image
         self.image_feature_enabled = cfg.CONF['image-feature-enabled']
         self.network = cfg.CONF.network
         self.network_feature_enabled = cfg.CONF['network-feature-enabled']
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 59a3aeb..0fc304a 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -22,7 +22,7 @@
 import cinderclient.client
 import glanceclient
 import heatclient.client
-import keystoneclient.apiclient.exceptions
+import keystoneclient.exceptions
 import keystoneclient.v2_0.client
 import netaddr
 from neutronclient.common import exceptions as exc
@@ -112,7 +112,7 @@
         region = CONF.identity.region
         endpoint = self.identity_client.service_catalog.url_for(
             attr='region', filter_value=region,
-            service_type='image', endpoint_type='publicURL')
+            service_type=CONF.image.catalog_type, endpoint_type='publicURL')
         dscv = CONF.identity.disable_ssl_certificate_validation
         return glanceclient.Client('1', endpoint=endpoint, token=token,
                                    insecure=dscv)
@@ -146,7 +146,7 @@
             keystone_admin.roles.add_user_role(self.identity_client.user_id,
                                                member_role.id,
                                                self.identity_client.tenant_id)
-        except keystoneclient.apiclient.exceptions.Conflict:
+        except keystoneclient.exceptions.Conflict:
             pass
 
         return swiftclient.Connection(auth_url, username, password,
@@ -167,11 +167,12 @@
         keystone = self._get_identity_client(username, password, tenant_name)
         region = CONF.identity.region
         token = keystone.auth_token
+        service_type = CONF.orchestration.catalog_type
         try:
             endpoint = keystone.service_catalog.url_for(
                 attr='region',
                 filter_value=region,
-                service_type='orchestration',
+                service_type=service_type,
                 endpoint_type='publicURL')
         except keystoneclient.exceptions.EndpointNotFound:
             return None
@@ -670,13 +671,17 @@
                          "Unable to determine which port to target.")
         return ports[0]['id']
 
-    def _create_floating_ip(self, server, external_network_id):
-        port_id = self._get_server_port_id(server)
+    def _create_floating_ip(self, thing, external_network_id,
+                            port_filters=None):
+        if port_filters is None:
+            port_id = self._get_server_port_id(thing)
+        else:
+            port_id = port_filters
         body = dict(
             floatingip=dict(
                 floating_network_id=external_network_id,
                 port_id=port_id,
-                tenant_id=server.tenant_id,
+                tenant_id=thing.tenant_id,
             )
         )
         result = self.network_client.create_floatingip(body=body)
@@ -713,6 +718,58 @@
         return tempest.test.call_until_true(
             ping, CONF.compute.ping_timeout, 1)
 
+    def _create_pool(self, lb_method, protocol, subnet_id):
+        """Wrapper utility that returns a test pool."""
+        name = data_utils.rand_name('pool-')
+        body = {
+            "pool": {
+                "protocol": protocol,
+                "name": name,
+                "subnet_id": subnet_id,
+                "lb_method": lb_method
+            }
+        }
+        resp = self.network_client.create_pool(body=body)
+        pool = net_common.DeletablePool(client=self.network_client,
+                                        **resp['pool'])
+        self.assertEqual(pool['name'], name)
+        self.set_resource(name, pool)
+        return pool
+
+    def _create_member(self, address, protocol_port, pool_id):
+        """Wrapper utility that returns a test member."""
+        body = {
+            "member": {
+                "protocol_port": protocol_port,
+                "pool_id": pool_id,
+                "address": address
+            }
+        }
+        resp = self.network_client.create_member(body)
+        member = net_common.DeletableMember(client=self.network_client,
+                                            **resp['member'])
+        self.set_resource(data_utils.rand_name('member-'), member)
+        return member
+
+    def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
+        """Wrapper utility that returns a test vip."""
+        name = data_utils.rand_name('vip-')
+        body = {
+            "vip": {
+                "protocol": protocol,
+                "name": name,
+                "subnet_id": subnet_id,
+                "pool_id": pool_id,
+                "protocol_port": protocol_port
+            }
+        }
+        resp = self.network_client.create_vip(body)
+        vip = net_common.DeletableVip(client=self.network_client,
+                                      **resp['vip'])
+        self.assertEqual(vip['name'], name)
+        self.set_resource(name, vip)
+        return vip
+
     def _check_vm_connectivity(self, ip_address,
                                username=None,
                                private_key=None,
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
new file mode 100644
index 0000000..68f6e62
--- /dev/null
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -0,0 +1,240 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+import urllib
+
+from tempest.api.network import common as net_common
+from tempest.common import ssh
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest.scenario import manager
+from tempest import test
+
+config = config.CONF
+
+
+class TestLoadBalancerBasic(manager.NetworkScenarioTest):
+
+    """
+    This test checks basic load balancing.
+
+    The following is the scenario outline:
+    1. Create an instance
+    2. SSH to the instance and start two servers
+    3. Create a load balancer with two members and with ROUND_ROBIN algorithm
+       associate the VIP with a floating ip
+    4. Send 10 requests to the floating ip and check that they are shared
+       between the two servers and that both of them get equal portions
+    of the requests
+    """
+
+    @classmethod
+    def check_preconditions(cls):
+        super(TestLoadBalancerBasic, cls).check_preconditions()
+        cfg = config.network
+        if not test.is_extension_enabled('lbaas', 'network'):
+            msg = 'LBaaS Extension is not enabled'
+            cls.enabled = False
+            raise cls.skipException(msg)
+        if not (cfg.tenant_networks_reachable or cfg.public_network_id):
+            msg = ('Either tenant_networks_reachable must be "true", or '
+                   'public_network_id must be defined.')
+            cls.enabled = False
+            raise cls.skipException(msg)
+
+    @classmethod
+    def setUpClass(cls):
+        super(TestLoadBalancerBasic, cls).setUpClass()
+        cls.check_preconditions()
+        cls.security_groups = {}
+        cls.networks = []
+        cls.subnets = []
+        cls.servers_keypairs = {}
+        cls.pools = []
+        cls.members = []
+        cls.vips = []
+        cls.floating_ips = {}
+        cls.port1 = 80
+        cls.port2 = 88
+
+    def _create_security_groups(self):
+        self.security_groups[self.tenant_id] =\
+            self._create_security_group_neutron(tenant_id=self.tenant_id)
+
+    def _create_server(self):
+        tenant_id = self.tenant_id
+        name = data_utils.rand_name("smoke_server-")
+        keypair = self.create_keypair(name='keypair-%s' % name)
+        security_groups = [self.security_groups[tenant_id].name]
+        nets = self.network_client.list_networks()
+        for net in nets['networks']:
+            if net['tenant_id'] == self.tenant_id:
+                self.networks.append(net)
+                create_kwargs = {
+                    'nics': [
+                        {'net-id': net['id']},
+                    ],
+                    'key_name': keypair.name,
+                    'security_groups': security_groups,
+                }
+                server = self.create_server(name=name,
+                                            create_kwargs=create_kwargs)
+                self.servers_keypairs[server] = keypair
+                break
+        self.assertTrue(self.servers_keypairs)
+
+    def _start_servers(self):
+        """
+        1. SSH to the instance
+        2. Start two servers listening on ports 80 and 88 respectively
+        """
+        for server in self.servers_keypairs.keys():
+            ssh_login = config.compute.image_ssh_user
+            private_key = self.servers_keypairs[server].private_key
+            network_name = self.networks[0]['name']
+
+            ip_address = server.networks[network_name][0]
+            ssh_client = ssh.Client(ip_address, ssh_login,
+                                    pkey=private_key,
+                                    timeout=100)
+            start_server = "while true; do echo -e 'HTTP/1.0 200 OK\r\n\r\n" \
+                           "%(server)s' | sudo nc -l -p %(port)s ; done &"
+            cmd = start_server % {'server': 'server1',
+                                  'port': self.port1}
+            ssh_client.exec_command(cmd)
+            cmd = start_server % {'server': 'server2',
+                                  'port': self.port2}
+            ssh_client.exec_command(cmd)
+
+    def _check_connection(self, check_ip):
+        def try_connect(ip):
+            try:
+                urllib.urlopen("http://{0}/".format(ip))
+                return True
+            except IOError:
+                return False
+        timeout = config.compute.ping_timeout
+        timer = 0
+        while not try_connect(check_ip):
+            time.sleep(1)
+            timer += 1
+            if timer >= timeout:
+                message = "Timed out trying to connect to %s" % check_ip
+                raise exceptions.TimeoutException(message)
+
+    def _create_pool(self):
+        """Create a pool with ROUND_ROBIN algorithm."""
+        subnets = self.network_client.list_subnets()
+        for subnet in subnets['subnets']:
+            if subnet['tenant_id'] == self.tenant_id:
+                self.subnets.append(subnet)
+                pool = super(TestLoadBalancerBasic, self)._create_pool(
+                    'ROUND_ROBIN',
+                    'HTTP',
+                    subnet['id'])
+                self.pools.append(pool)
+                break
+        self.assertTrue(self.pools)
+
+    def _create_members(self, network_name, server_ids):
+        """
+        Create two members.
+
+        In case there is only one server, create both members with the same ip
+        but with different ports to listen on.
+        """
+        servers = self.compute_client.servers.list()
+        for server in servers:
+            if server.id in server_ids:
+                ip = server.networks[network_name][0]
+                pool_id = self.pools[0]['id']
+                if len(set(server_ids)) == 1 or len(servers) == 1:
+                    member1 = self._create_member(ip, self.port1, pool_id)
+                    member2 = self._create_member(ip, self.port2, pool_id)
+                    self.members.extend([member1, member2])
+                else:
+                    member = self._create_member(ip, self.port1, pool_id)
+                    self.members.append(member)
+        self.assertTrue(self.members)
+
+    def _assign_floating_ip_to_vip(self, vip):
+        public_network_id = config.network.public_network_id
+        port_id = vip['port_id']
+        floating_ip = self._create_floating_ip(vip,
+                                               public_network_id,
+                                               port_filters=port_id)
+        self.floating_ips.setdefault(vip['id'], [])
+        self.floating_ips[vip['id']].append(floating_ip)
+
+    def _create_load_balancer(self):
+        self._create_pool()
+        self._create_members(self.networks[0]['name'],
+                             [self.servers_keypairs.keys()[0].id])
+        subnet_id = self.subnets[0]['id']
+        pool_id = self.pools[0]['id']
+        vip = super(TestLoadBalancerBasic, self)._create_vip('HTTP', 80,
+                                                             subnet_id,
+                                                             pool_id)
+        self.vips.append(vip)
+        self._status_timeout(NeutronRetriever(self.network_client,
+                                              self.network_client.vip_path,
+                                              net_common.DeletableVip),
+                             self.vips[0]['id'],
+                             expected_status='ACTIVE')
+        self._assign_floating_ip_to_vip(self.vips[0])
+
+    def _check_load_balancing(self):
+        """
+        1. Send 10 requests on the floating ip associated with the VIP
+        2. Check that the requests are shared between
+           the two servers and that both of them get equal portions
+           of the requests
+        """
+
+        vip = self.vips[0]
+        floating_ip_vip = self.floating_ips[
+            vip['id']][0]['floating_ip_address']
+        self._check_connection(floating_ip_vip)
+        resp = []
+        for count in range(10):
+            resp.append(
+                urllib.urlopen(
+                    "http://{0}/".format(floating_ip_vip)).read())
+        self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
+        self.assertEqual(5, resp.count("server1\n"))
+        self.assertEqual(5, resp.count("server2\n"))
+
+    @test.skip_because(bug="1277381")
+    @test.attr(type='smoke')
+    @test.services('compute', 'network')
+    def test_load_balancer_basic(self):
+        self._create_security_groups()
+        self._create_server()
+        self._start_servers()
+        self._create_load_balancer()
+        self._check_load_balancing()
+
+
+class NeutronRetriever(object):
+    def __init__(self, network_client, path, resource):
+        self.network_client = network_client
+        self.path = path
+        self.resource = resource
+
+    def get(self, thing_id):
+        obj = self.network_client.get(self.path % thing_id)
+        return self.resource(client=self.network_client, **obj.values()[0])
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index db21201..e96b44b 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -77,3 +77,17 @@
 
         uri = "node-group-templates/%s" % tmpl_id
         return self.delete(uri)
+
+    def list_plugins(self):
+        """List all enabled plugins."""
+
+        uri = 'plugins'
+        return self._request_and_parse(self.get, uri, 'plugins')
+
+    def get_plugin(self, plugin_name, plugin_version=None):
+        """Returns the details of a single plugin."""
+
+        uri = "plugins/%s" % plugin_name
+        if plugin_version:
+            uri += '/%s' % plugin_version
+        return self._request_and_parse(self.get, uri, 'plugin')
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index 17271cc..bc9db38 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -35,7 +35,7 @@
 
     def __init__(self, auth_provider):
         super(ImageClientJSON, self).__init__(auth_provider)
-        self.service = CONF.images.catalog_type
+        self.service = CONF.image.catalog_type
         self._http = None
 
     def _image_meta_from_headers(self, headers):
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index 38aef2d..b825519 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -30,7 +30,7 @@
 
     def __init__(self, auth_provider):
         super(ImageClientV2JSON, self).__init__(auth_provider)
-        self.service = CONF.images.catalog_type
+        self.service = CONF.image.catalog_type
         self._http = None
 
     def _get_http(self):
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index 316ca95..efac5f5 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -30,10 +30,6 @@
         self.service = CONF.object_storage.catalog_type
         self.format = 'json'
 
-    @property
-    def token(self):
-        return self.auth_provider.auth_data[0]
-
     def create_account(self, data=None,
                        params=None,
                        metadata={},
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index 5656c9a..f224407 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -32,10 +32,6 @@
         self.service = CONF.object_storage.catalog_type
         self.format = 'json'
 
-    @property
-    def token(self):
-        return self.auth_provider.auth_data[0]
-
     def create_container(
             self, container_name,
             metadata=None,
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 0a16b9f..b70b2e8 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -177,7 +177,7 @@
             stack_name = body['stack_name']
             stack_status = body['stack_status']
             if stack_status == status:
-                return
+                return body
             if fail_regexp.search(stack_status):
                 raise exceptions.StackBuildErrorException(
                     stack_identifier=stack_identifier,
diff --git a/tempest/services/volume/v2/__init__.py b/tempest/services/volume/v2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/volume/v2/__init__.py
diff --git a/tempest/services/volume/v2/json/__init__.py b/tempest/services/volume/v2/json/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/volume/v2/json/__init__.py
diff --git a/tempest/services/volume/v2/json/volumes_client.py b/tempest/services/volume/v2/json/volumes_client.py
new file mode 100644
index 0000000..0524212
--- /dev/null
+++ b/tempest/services/volume/v2/json/volumes_client.py
@@ -0,0 +1,302 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import time
+import urllib
+
+from tempest.common.rest_client import RestClient
+from tempest import config
+from tempest import exceptions
+
+CONF = config.CONF
+
+
+class VolumesClientJSON(RestClient):
+    """
+    Client class to send CRUD Volume API requests to a Cinder endpoint
+    """
+
+    def __init__(self, auth_provider):
+        super(VolumesClientJSON, self).__init__(auth_provider)
+
+        self.service = CONF.volume.catalog_type
+        self.build_interval = CONF.volume.build_interval
+        self.build_timeout = CONF.volume.build_timeout
+
+    def get_attachment_from_volume(self, volume):
+        """Return the element 'attachment' from input volumes."""
+        return volume['attachments'][0]
+
+    def list_volumes(self, params=None):
+        """List all the volumes created."""
+        url = 'volumes'
+        if params:
+                url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['volumes']
+
+    def list_volumes_with_detail(self, params=None):
+        """List the details of all volumes."""
+        url = 'volumes/detail'
+        if params:
+                url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['volumes']
+
+    def get_volume(self, volume_id):
+        """Returns the details of a single volume."""
+        url = "volumes/%s" % str(volume_id)
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['volume']
+
+    def create_volume(self, size, **kwargs):
+        """
+        Creates a new Volume.
+        size(Required): Size of volume in GB.
+        Following optional keyword arguments are accepted:
+        display_name: Optional Volume Name.
+        metadata: A dictionary of values to be used as metadata.
+        volume_type: Optional Name of volume_type for the volume
+        snapshot_id: When specified the volume is created from this snapshot
+        imageRef: When specified the volume is created from this image
+        """
+        post_body = {'size': size}
+        post_body.update(kwargs)
+        post_body = json.dumps({'volume': post_body})
+        resp, body = self.post('volumes', post_body, self.headers)
+        body = json.loads(body)
+        return resp, body['volume']
+
+    def update_volume(self, volume_id, **kwargs):
+        """Updates the Specified Volume."""
+        put_body = json.dumps({'volume': kwargs})
+        resp, body = self.put('volumes/%s' % volume_id, put_body,
+                              self.headers)
+        body = json.loads(body)
+        return resp, body['volume']
+
+    def delete_volume(self, volume_id):
+        """Deletes the Specified Volume."""
+        return self.delete("volumes/%s" % str(volume_id))
+
+    def upload_volume(self, volume_id, image_name, disk_format):
+        """Uploads a volume in Glance."""
+        post_body = {
+            'image_name': image_name,
+            'disk_format': disk_format
+        }
+        post_body = json.dumps({'os-volume_upload_image': post_body})
+        url = 'volumes/%s/action' % (volume_id)
+        resp, body = self.post(url, post_body, self.headers)
+        body = json.loads(body)
+        return resp, body['os-volume_upload_image']
+
+    def attach_volume(self, volume_id, instance_uuid, mountpoint):
+        """Attaches a volume to a given instance on a given mountpoint."""
+        post_body = {
+            'instance_uuid': instance_uuid,
+            'mountpoint': mountpoint,
+        }
+        post_body = json.dumps({'os-attach': post_body})
+        url = 'volumes/%s/action' % (volume_id)
+        resp, body = self.post(url, post_body, self.headers)
+        return resp, body
+
+    def detach_volume(self, volume_id):
+        """Detaches a volume from an instance."""
+        post_body = {}
+        post_body = json.dumps({'os-detach': post_body})
+        url = 'volumes/%s/action' % (volume_id)
+        resp, body = self.post(url, post_body, self.headers)
+        return resp, body
+
+    def reserve_volume(self, volume_id):
+        """Reserves a volume."""
+        post_body = {}
+        post_body = json.dumps({'os-reserve': post_body})
+        url = 'volumes/%s/action' % (volume_id)
+        resp, body = self.post(url, post_body, self.headers)
+        return resp, body
+
+    def unreserve_volume(self, volume_id):
+        """Restore a reserved volume ."""
+        post_body = {}
+        post_body = json.dumps({'os-unreserve': post_body})
+        url = 'volumes/%s/action' % (volume_id)
+        resp, body = self.post(url, post_body, self.headers)
+        return resp, body
+
+    def wait_for_volume_status(self, volume_id, status):
+        """Waits for a Volume to reach a given status."""
+        resp, body = self.get_volume(volume_id)
+        volume_name = body['display_name']
+        volume_status = body['status']
+        start = int(time.time())
+
+        while volume_status != status:
+            time.sleep(self.build_interval)
+            resp, body = self.get_volume(volume_id)
+            volume_status = body['status']
+            if volume_status == 'error':
+                raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
+
+            if int(time.time()) - start >= self.build_timeout:
+                message = ('Volume %s failed to reach %s status within '
+                           'the required time (%s s).' %
+                           (volume_name, status, self.build_timeout))
+                raise exceptions.TimeoutException(message)
+
+    def is_resource_deleted(self, id):
+        try:
+            self.get_volume(id)
+        except exceptions.NotFound:
+            return True
+        return False
+
+    def extend_volume(self, volume_id, extend_size):
+        """Extend a volume."""
+        post_body = {
+            'new_size': extend_size
+        }
+        post_body = json.dumps({'os-extend': post_body})
+        url = 'volumes/%s/action' % (volume_id)
+        resp, body = self.post(url, post_body, self.headers)
+        return resp, body
+
+    def reset_volume_status(self, volume_id, status):
+        """Reset the Specified Volume's Status."""
+        post_body = json.dumps({'os-reset_status': {"status": status}})
+        resp, body = self.post('volumes/%s/action' % volume_id, post_body,
+                               self.headers)
+        return resp, body
+
+    def volume_begin_detaching(self, volume_id):
+        """Volume Begin Detaching."""
+        post_body = json.dumps({'os-begin_detaching': {}})
+        resp, body = self.post('volumes/%s/action' % volume_id, post_body,
+                               self.headers)
+        return resp, body
+
+    def volume_roll_detaching(self, volume_id):
+        """Volume Roll Detaching."""
+        post_body = json.dumps({'os-roll_detaching': {}})
+        resp, body = self.post('volumes/%s/action' % volume_id, post_body,
+                               self.headers)
+        return resp, body
+
+    def create_volume_transfer(self, vol_id, display_name=None):
+        """Create a volume transfer."""
+        post_body = {
+            'volume_id': vol_id
+        }
+        if display_name:
+            post_body['name'] = display_name
+        post_body = json.dumps({'transfer': post_body})
+        resp, body = self.post('os-volume-transfer',
+                               post_body,
+                               self.headers)
+        body = json.loads(body)
+        return resp, body['transfer']
+
+    def get_volume_transfer(self, transfer_id):
+        """Returns the details of a volume transfer."""
+        url = "os-volume-transfer/%s" % str(transfer_id)
+        resp, body = self.get(url, self.headers)
+        body = json.loads(body)
+        return resp, body['transfer']
+
+    def list_volume_transfers(self, params=None):
+        """List all the volume transfers created."""
+        url = 'os-volume-transfer'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['transfers']
+
+    def delete_volume_transfer(self, transfer_id):
+        """Delete a volume transfer."""
+        return self.delete("os-volume-transfer/%s" % str(transfer_id))
+
+    def accept_volume_transfer(self, transfer_id, transfer_auth_key):
+        """Accept a volume transfer."""
+        post_body = {
+            'auth_key': transfer_auth_key,
+        }
+        url = 'os-volume-transfer/%s/accept' % transfer_id
+        post_body = json.dumps({'accept': post_body})
+        resp, body = self.post(url, post_body, self.headers)
+        body = json.loads(body)
+        return resp, body['transfer']
+
+    def update_volume_readonly(self, volume_id, readonly):
+        """Update the Specified Volume readonly."""
+        post_body = {
+            'readonly': readonly
+        }
+        post_body = json.dumps({'os-update_readonly_flag': post_body})
+        url = 'volumes/%s/action' % (volume_id)
+        resp, body = self.post(url, post_body, self.headers)
+        return resp, body
+
+    def force_delete_volume(self, volume_id):
+        """Force Delete Volume."""
+        post_body = json.dumps({'os-force_delete': {}})
+        resp, body = self.post('volumes/%s/action' % volume_id, post_body,
+                               self.headers)
+        return resp, body
+
+    def create_volume_metadata(self, volume_id, metadata):
+        """Create metadata for the volume."""
+        put_body = json.dumps({'metadata': metadata})
+        url = "volumes/%s/metadata" % str(volume_id)
+        resp, body = self.post(url, put_body, self.headers)
+        body = json.loads(body)
+        return resp, body['metadata']
+
+    def get_volume_metadata(self, volume_id):
+        """Get metadata of the volume."""
+        url = "volumes/%s/metadata" % str(volume_id)
+        resp, body = self.get(url, self.headers)
+        body = json.loads(body)
+        return resp, body['metadata']
+
+    def update_volume_metadata(self, volume_id, metadata):
+        """Update metadata for the volume."""
+        put_body = json.dumps({'metadata': metadata})
+        url = "volumes/%s/metadata" % str(volume_id)
+        resp, body = self.put(url, put_body, self.headers)
+        body = json.loads(body)
+        return resp, body['metadata']
+
+    def update_volume_metadata_item(self, volume_id, id, meta_item):
+        """Update metadata item for the volume."""
+        put_body = json.dumps({'meta': meta_item})
+        url = "volumes/%s/metadata/%s" % (str(volume_id), str(id))
+        resp, body = self.put(url, put_body, self.headers)
+        body = json.loads(body)
+        return resp, body['meta']
+
+    def delete_volume_metadata_item(self, volume_id, id):
+        """Delete metadata item for the volume."""
+        url = "volumes/%s/metadata/%s" % (str(volume_id), str(id))
+        resp, body = self.delete(url, self.headers)
+        return resp, body
diff --git a/tempest/services/volume/v2/xml/__init__.py b/tempest/services/volume/v2/xml/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/volume/v2/xml/__init__.py
diff --git a/tempest/services/volume/v2/xml/volumes_client.py b/tempest/services/volume/v2/xml/volumes_client.py
new file mode 100644
index 0000000..deb56fd
--- /dev/null
+++ b/tempest/services/volume/v2/xml/volumes_client.py
@@ -0,0 +1,412 @@
+# Copyright 2012 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+import urllib
+
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest import config
+from tempest import exceptions
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import Text
+from tempest.services.compute.xml.common import xml_to_json
+from tempest.services.compute.xml.common import XMLNS_11
+
+CONF = config.CONF
+
+
+class VolumesClientXML(RestClientXML):
+    """
+    Client class to send CRUD Volume API requests to a Cinder endpoint
+    """
+
+    def __init__(self, auth_provider):
+        super(VolumesClientXML, self).__init__(auth_provider)
+        self.service = CONF.volume.catalog_type
+        self.build_interval = CONF.compute.build_interval
+        self.build_timeout = CONF.compute.build_timeout
+
+    def _parse_volume(self, body):
+        vol = dict((attr, body.get(attr)) for attr in body.keys())
+
+        for child in body.getchildren():
+            tag = child.tag
+            if tag.startswith("{"):
+                ns, tag = tag.split("}", 1)
+            if tag == 'metadata':
+                vol['metadata'] = dict((meta.get('key'),
+                                       meta.text) for meta in
+                                       child.getchildren())
+            else:
+                vol[tag] = xml_to_json(child)
+        return vol
+
+    def get_attachment_from_volume(self, volume):
+        """Return the element 'attachment' from input volumes."""
+        return volume['attachments']['attachment']
+
+    def _check_if_bootable(self, volume):
+        """
+        Check if the volume is bootable, also change the value
+        of 'bootable' from string to boolean.
+        """
+
+        # NOTE(jdg): Version 1 of Cinder API uses lc strings
+        # We should consider being explicit in this check to
+        # avoid introducing bugs like: LP #1227837
+
+        if volume['bootable'].lower() == 'true':
+            volume['bootable'] = True
+        elif volume['bootable'].lower() == 'false':
+            volume['bootable'] = False
+        else:
+            raise ValueError(
+                'bootable flag is supposed to be either True or False,'
+                'it is %s' % volume['bootable'])
+        return volume
+
+    def list_volumes(self, params=None):
+        """List all the volumes created."""
+        url = 'volumes'
+
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url, self.headers)
+        body = etree.fromstring(body)
+        volumes = []
+        if body is not None:
+            volumes += [self._parse_volume(vol) for vol in list(body)]
+        for v in volumes:
+            v = self._check_if_bootable(v)
+        return resp, volumes
+
+    def list_volumes_with_detail(self, params=None):
+        """List all the details of volumes."""
+        url = 'volumes/detail'
+
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url, self.headers)
+        body = etree.fromstring(body)
+        volumes = []
+        if body is not None:
+            volumes += [self._parse_volume(vol) for vol in list(body)]
+        for v in volumes:
+            v = self._check_if_bootable(v)
+        return resp, volumes
+
+    def get_volume(self, volume_id):
+        """Returns the details of a single volume."""
+        url = "volumes/%s" % str(volume_id)
+        resp, body = self.get(url, self.headers)
+        body = self._parse_volume(etree.fromstring(body))
+        body = self._check_if_bootable(body)
+        return resp, body
+
+    def create_volume(self, size, **kwargs):
+        """Creates a new Volume.
+
+        :param size: Size of volume in GB. (Required)
+        :param display_name: Optional Volume Name.
+        :param metadata: An optional dictionary of values for metadata.
+        :param volume_type: Optional Name of volume_type for the volume
+        :param snapshot_id: When specified the volume is created from
+                            this snapshot
+        :param imageRef: When specified the volume is created from this
+                         image
+        """
+        # NOTE(afazekas): it should use a volume namespace
+        volume = Element("volume", xmlns=XMLNS_11, size=size)
+
+        if 'metadata' in kwargs:
+            _metadata = Element('metadata')
+            volume.append(_metadata)
+            for key, value in kwargs['metadata'].items():
+                meta = Element('meta')
+                meta.add_attr('key', key)
+                meta.append(Text(value))
+                _metadata.append(meta)
+            attr_to_add = kwargs.copy()
+            del attr_to_add['metadata']
+        else:
+            attr_to_add = kwargs
+
+        for key, value in attr_to_add.items():
+            volume.add_attr(key, value)
+
+        resp, body = self.post('volumes', str(Document(volume)),
+                               self.headers)
+        body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def update_volume(self, volume_id, **kwargs):
+        """Updates the Specified Volume."""
+        put_body = Element("volume", xmlns=XMLNS_11, **kwargs)
+
+        resp, body = self.put('volumes/%s' % volume_id,
+                              str(Document(put_body)),
+                              self.headers)
+        body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def delete_volume(self, volume_id):
+        """Deletes the Specified Volume."""
+        return self.delete("volumes/%s" % str(volume_id))
+
+    def wait_for_volume_status(self, volume_id, status):
+        """Waits for a Volume to reach a given status."""
+        resp, body = self.get_volume(volume_id)
+        volume_status = body['status']
+        start = int(time.time())
+
+        while volume_status != status:
+            time.sleep(self.build_interval)
+            resp, body = self.get_volume(volume_id)
+            volume_status = body['status']
+            if volume_status == 'error':
+                raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
+
+            if int(time.time()) - start >= self.build_timeout:
+                message = 'Volume %s failed to reach %s status within '\
+                          'the required time (%s s).' % (volume_id,
+                                                         status,
+                                                         self.build_timeout)
+                raise exceptions.TimeoutException(message)
+
+    def is_resource_deleted(self, id):
+        try:
+            self.get_volume(id)
+        except exceptions.NotFound:
+            return True
+        return False
+
+    def attach_volume(self, volume_id, instance_uuid, mountpoint):
+        """Attaches a volume to a given instance on a given mountpoint."""
+        post_body = Element("os-attach",
+                            instance_uuid=instance_uuid,
+                            mountpoint=mountpoint
+                            )
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def detach_volume(self, volume_id):
+        """Detaches a volume from an instance."""
+        post_body = Element("os-detach")
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def upload_volume(self, volume_id, image_name, disk_format):
+        """Uploads a volume in Glance."""
+        post_body = Element("os-volume_upload_image",
+                            image_name=image_name,
+                            disk_format=disk_format)
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        volume = xml_to_json(etree.fromstring(body))
+        return resp, volume
+
+    def extend_volume(self, volume_id, extend_size):
+        """Extend a volume."""
+        post_body = Element("os-extend",
+                            new_size=extend_size)
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def reset_volume_status(self, volume_id, status):
+        """Reset the Specified Volume's Status."""
+        post_body = Element("os-reset_status",
+                            status=status
+                            )
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def volume_begin_detaching(self, volume_id):
+        """Volume Begin Detaching."""
+        post_body = Element("os-begin_detaching")
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def volume_roll_detaching(self, volume_id):
+        """Volume Roll Detaching."""
+        post_body = Element("os-roll_detaching")
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def reserve_volume(self, volume_id):
+        """Reserves a volume."""
+        post_body = Element("os-reserve")
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def unreserve_volume(self, volume_id):
+        """Restore a reserved volume ."""
+        post_body = Element("os-unreserve")
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def create_volume_transfer(self, vol_id, display_name=None):
+        """Create a volume transfer."""
+        post_body = Element("transfer",
+                            volume_id=vol_id)
+        if display_name:
+            post_body.add_attr('name', display_name)
+        resp, body = self.post('os-volume-transfer',
+                               str(Document(post_body)),
+                               self.headers)
+        volume = xml_to_json(etree.fromstring(body))
+        return resp, volume
+
+    def get_volume_transfer(self, transfer_id):
+        """Returns the details of a volume transfer."""
+        url = "os-volume-transfer/%s" % str(transfer_id)
+        resp, body = self.get(url, self.headers)
+        volume = xml_to_json(etree.fromstring(body))
+        return resp, volume
+
+    def list_volume_transfers(self, params=None):
+        """List all the volume transfers created."""
+        url = 'os-volume-transfer'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url, self.headers)
+        body = etree.fromstring(body)
+        volumes = []
+        if body is not None:
+            volumes += [self._parse_volume_transfer(vol) for vol in list(body)]
+        return resp, volumes
+
+    def _parse_volume_transfer(self, body):
+        vol = dict((attr, body.get(attr)) for attr in body.keys())
+        for child in body.getchildren():
+            tag = child.tag
+            if tag.startswith("{"):
+                tag = tag.split("}", 1)
+            vol[tag] = xml_to_json(child)
+        return vol
+
+    def delete_volume_transfer(self, transfer_id):
+        """Delete a volume transfer."""
+        return self.delete("os-volume-transfer/%s" % str(transfer_id))
+
+    def accept_volume_transfer(self, transfer_id, transfer_auth_key):
+        """Accept a volume transfer."""
+        post_body = Element("accept", auth_key=transfer_auth_key)
+        url = 'os-volume-transfer/%s/accept' % transfer_id
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        volume = xml_to_json(etree.fromstring(body))
+        return resp, volume
+
+    def update_volume_readonly(self, volume_id, readonly):
+        """Update the Specified Volume readonly."""
+        post_body = Element("os-update_readonly_flag",
+                            readonly=readonly)
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def force_delete_volume(self, volume_id):
+        """Force Delete Volume."""
+        post_body = Element("os-force_delete")
+        url = 'volumes/%s/action' % str(volume_id)
+        resp, body = self.post(url, str(Document(post_body)), self.headers)
+        if body:
+            body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def _metadata_body(self, meta):
+        post_body = Element('metadata')
+        for k, v in meta.items():
+            data = Element('meta', key=k)
+            data.append(Text(v))
+            post_body.append(data)
+        return post_body
+
+    def _parse_key_value(self, node):
+        """Parse <foo key='key'>value</foo> data into {'key': 'value'}."""
+        data = {}
+        for node in node.getchildren():
+            data[node.get('key')] = node.text
+        return data
+
+    def create_volume_metadata(self, volume_id, metadata):
+        """Create metadata for the volume."""
+        post_body = self._metadata_body(metadata)
+        resp, body = self.post('volumes/%s/metadata' % volume_id,
+                               str(Document(post_body)),
+                               self.headers)
+        body = self._parse_key_value(etree.fromstring(body))
+        return resp, body
+
+    def get_volume_metadata(self, volume_id):
+        """Get metadata of the volume."""
+        url = "volumes/%s/metadata" % str(volume_id)
+        resp, body = self.get(url, self.headers)
+        body = self._parse_key_value(etree.fromstring(body))
+        return resp, body
+
+    def update_volume_metadata(self, volume_id, metadata):
+        """Update metadata for the volume."""
+        put_body = self._metadata_body(metadata)
+        url = "volumes/%s/metadata" % str(volume_id)
+        resp, body = self.put(url, str(Document(put_body)), self.headers)
+        body = self._parse_key_value(etree.fromstring(body))
+        return resp, body
+
+    def update_volume_metadata_item(self, volume_id, id, meta_item):
+        """Update metadata item for the volume."""
+        for k, v in meta_item.items():
+            put_body = Element('meta', key=k)
+            put_body.append(Text(v))
+        url = "volumes/%s/metadata/%s" % (str(volume_id), str(id))
+        resp, body = self.put(url, str(Document(put_body)), self.headers)
+        body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def delete_volume_metadata_item(self, volume_id, id):
+        """Delete metadata item for the volume."""
+        url = "volumes/%s/metadata/%s" % (str(volume_id), str(id))
+        return self.delete(url)
diff --git a/tempest/stress/actions/unit_test.py b/tempest/stress/actions/unit_test.py
index 8bd2f22..2f1d28f 100644
--- a/tempest/stress/actions/unit_test.py
+++ b/tempest/stress/actions/unit_test.py
@@ -10,10 +10,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest import config
 from tempest.openstack.common import importutils
 from tempest.openstack.common import log as logging
 import tempest.stress.stressaction as stressaction
 
+CONF = config.CONF
+
 
 class SetUpClassRunTime(object):
 
@@ -73,10 +76,14 @@
                 self.klass.setUpClass()
                 self.setupclass_called = True
 
-            self.run_core()
-
-            if (self.class_setup_per == SetUpClassRunTime.action):
-                self.klass.tearDownClass()
+            try:
+                self.run_core()
+            except Exception as e:
+                raise e
+            finally:
+                if (CONF.stress.leave_dirty_stack is False
+                    and self.class_setup_per == SetUpClassRunTime.action):
+                    self.klass.tearDownClass()
         else:
             self.run_core()
 
diff --git a/tempest/test.py b/tempest/test.py
index 5464c03..38b9102 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -70,16 +70,35 @@
     This decorator applies a testtools attr for each service that gets
     exercised by a test case.
     """
-    valid_service_list = ['compute', 'image', 'volume', 'orchestration',
-                          'network', 'identity', 'object_storage', 'dashboard']
+    service_list = {
+        'compute': CONF.service_available.nova,
+        'image': CONF.service_available.glance,
+        'volume': CONF.service_available.cinder,
+        'orchestration': CONF.service_available.heat,
+        # NOTE(mtreinish) nova-network will provide networking functionality
+        # if neutron isn't available, so always set to True.
+        'network': True,
+        'identity': True,
+        'object_storage': CONF.service_available.swift,
+        'dashboard': CONF.service_available.horizon,
+    }
 
     def decorator(f):
         for service in args:
-            if service not in valid_service_list:
+            if service not in service_list:
                 raise exceptions.InvalidServiceTag('%s is not a valid service'
                                                    % service)
         attr(type=list(args))(f)
-        return f
+
+        @functools.wraps(f)
+        def wrapper(self, *func_args, **func_kwargs):
+            for service in args:
+                if not service_list[service]:
+                    msg = 'Skipped because the %s service is not available' % (
+                        service)
+                    raise testtools.TestCase.skipException(msg)
+            return f(self, *func_args, **func_kwargs)
+        return wrapper
     return decorator
 
 
diff --git a/tempest/tests/negative/__init__.py b/tempest/tests/negative/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/negative/__init__.py
diff --git a/tempest/tests/negative/test_generate_json.py b/tempest/tests/negative/test_generate_json.py
new file mode 100644
index 0000000..a0aa088
--- /dev/null
+++ b/tempest/tests/negative/test_generate_json.py
@@ -0,0 +1,53 @@
+# Copyright 2014 Deutsche Telekom AG
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import generate_json as gen
+import tempest.test
+
+
+class TestGenerateJson(tempest.test.BaseTestCase):
+
+    fake_input_str = {"type": "string",
+                      "minLength": 2,
+                      "maxLength": 8,
+                      'results': {'gen_number': 404}}
+
+    fake_input_int = {"type": "integer",
+                      "maximum": 255,
+                      "minimum": 1}
+
+    fake_input_obj = {"type": "object",
+                      "properties": {"minRam": {"type": "integer"},
+                                     "diskName": {"type": "string"},
+                                     "maxRam": {"type": "integer", }
+                                     }
+                      }
+
+    def _validate_result(self, data):
+        self.assertTrue(isinstance(data, list))
+        for t in data:
+            self.assertTrue(isinstance(t, tuple))
+
+    def test_generate_invalid_string(self):
+        result = gen.generate_invalid(self.fake_input_str)
+        self._validate_result(result)
+
+    def test_generate_invalid_integer(self):
+        result = gen.generate_invalid(self.fake_input_int)
+        self._validate_result(result)
+
+    def test_generate_invalid_obj(self):
+        result = gen.generate_invalid(self.fake_input_obj)
+        self._validate_result(result)
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
new file mode 100644
index 0000000..4c59383
--- /dev/null
+++ b/tempest/tests/negative/test_negative_auto_test.py
@@ -0,0 +1,64 @@
+# Copyright 2014 Deutsche Telekom AG
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+import tempest.test as test
+
+
+class TestNegativeAutoTest(test.BaseTestCase):
+    # Fake entries
+    _interface = 'json'
+    _service = 'compute'
+
+    fake_input_desc = {"name": "list-flavors-with-detail",
+                       "http-method": "GET",
+                       "url": "flavors/detail",
+                       "json-schema": {"type": "object",
+                                      "properties":
+                                      {"minRam": {"type": "integer"},
+                                       "minDisk": {"type": "integer"}}
+                                       },
+                       "resources": ["flavor", "volume", "image"]
+                       }
+
+    def _check_prop_entries(self, result, entry):
+        entries = [a for a in result if entry in a[0]]
+        self.assertIsNotNone(entries)
+        self.assertIs(len(entries), 2)
+        for entry in entries:
+            self.assertIsNotNone(entry[1]['schema'])
+
+    def _check_resource_entries(self, result, entry):
+        entries = [a for a in result if entry in a[0]]
+        self.assertIsNotNone(entries)
+        self.assertIs(len(entries), 3)
+        for entry in entries:
+            self.assertIsNotNone(entry[1]['resource'])
+
+    @mock.patch('tempest.test.NegativeAutoTest.load_schema')
+    def test_generate_scenario(self, open_mock):
+        open_mock.return_value = self.fake_input_desc
+        scenarios = test.NegativeAutoTest.\
+            generate_scenario(None)
+
+        self.assertIsInstance(scenarios, list)
+        for scenario in scenarios:
+            self.assertIsInstance(scenario, tuple)
+            self.assertIsInstance(scenario[0], str)
+            self.assertIsInstance(scenario[1], dict)
+        self._check_prop_entries(scenarios, "prop_minRam")
+        self._check_prop_entries(scenarios, "prop_minDisk")
+        self._check_resource_entries(scenarios, "inv_res")
diff --git a/tempest/tests/test_ssh.py b/tempest/tests/test_ssh.py
index 429ed56..a6eedc4 100644
--- a/tempest/tests/test_ssh.py
+++ b/tempest/tests/test_ssh.py
@@ -88,15 +88,17 @@
         client_mock.connect.side_effect = [socket.error, socket.error, True]
         t_mock.side_effect = [
             1000,  # Start time
+            1000,  # LOG.warning() calls time.time() loop 1
             1001,  # Sleep loop 1
+            1001,  # LOG.warning() calls time.time() loop 2
             1002   # Sleep loop 2
         ]
 
         client._get_ssh_connection(sleep=1)
 
         expected_sleeps = [
-            mock.call(1),
-            mock.call(1.01)
+            mock.call(2),
+            mock.call(3)
         ]
         self.assertEqual(expected_sleeps, s_mock.mock_calls)
 
@@ -111,7 +113,9 @@
         ]
         t_mock.side_effect = [
             1000,  # Start time
+            1000,  # LOG.warning() calls time.time() loop 1
             1001,  # Sleep loop 1
+            1001,  # LOG.warning() calls time.time() loop 2
             1002,  # Sleep loop 2
             1003,  # Sleep loop 3
             1004  # LOG.error() calls time.time()
diff --git a/tools/check_logs.py b/tools/check_logs.py
index f3204e3..15988a6 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -28,7 +28,7 @@
 is_neutron = os.environ.get('DEVSTACK_GATE_NEUTRON', "0") == "1"
 is_grenade = (os.environ.get('DEVSTACK_GATE_GRENADE', "0") == "1" or
               os.environ.get('DEVSTACK_GATE_GRENADE_FORWARD', "0") == "1")
-dump_all_errors = is_neutron
+dump_all_errors = True
 
 
 def process_files(file_specs, url_specs, whitelists):
@@ -69,6 +69,7 @@
                     print_log_name = False
                 if not whitelisted:
                     had_errors = True
+                    print("*** Not Whitelisted ***"),
                 print(line)
     return had_errors
 
diff --git a/tox.ini b/tox.ini
index 88f2537..1580b14 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,9 +6,6 @@
 [testenv]
 sitepackages = True
 setenv = VIRTUAL_ENV={envdir}
-         LANG=en_US.UTF-8
-         LANGUAGE=en_US:en
-         LC_ALL=C
          OS_TEST_PATH=./tempest/test_discover
 usedevelop = True
 install_command = pip install {opts} {packages}
@@ -40,6 +37,12 @@
 commands =
   bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
 
+[testenv:full-serial]
+# The regex below is used to select which tests to run and exclude the slow tag:
+# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
+commands =
+  bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+
 [testenv:testr-full]
 commands =
   bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'