Merge "Multiversion authentication part2"
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index da4ccbe..fd069e7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -161,6 +161,18 @@
                 return
             time.sleep(self.build_interval)
 
+    @staticmethod
+    def _delete_volume(volumes_client, volume_id):
+        """Deletes the given volume and waits for it to be gone."""
+        try:
+            resp, _ = volumes_client.delete_volume(volume_id)
+            # TODO(mriedem): We should move the wait_for_resource_deletion
+            # into the delete_volume method as a convenience to the caller.
+            volumes_client.wait_for_resource_deletion(volume_id)
+        except exceptions.NotFound:
+            LOG.warn("Unable to delete volume '%s' since it was not found. "
+                     "Maybe it was already deleted?" % volume_id)
+
 
 class BaseV2ComputeTest(BaseComputeTest):
 
@@ -231,14 +243,7 @@
     @classmethod
     def delete_volume(cls, volume_id):
         """Deletes the given volume and waits for it to be gone."""
-        try:
-            resp, _ = cls.volumes_extensions_client.delete_volume(volume_id)
-            # TODO(mriedem): We should move the wait_for_resource_deletion
-            # into the delete_volume method as a convenience to the caller.
-            cls.volumes_extensions_client.wait_for_resource_deletion(volume_id)
-        except exceptions.NotFound:
-            LOG.warn("Unable to delete volume '%s' since it was not found. "
-                     "Maybe it was already deleted?" % volume_id)
+        cls._delete_volume(cls.volumes_extensions_client, volume_id)
 
 
 class BaseV2ComputeAdminTest(BaseV2ComputeTest):
@@ -336,6 +341,11 @@
         cls.password = server['admin_password']
         return server['id']
 
+    @classmethod
+    def delete_volume(cls, volume_id):
+        """Deletes the given volume and waits for it to be gone."""
+        cls._delete_volume(cls.volumes_client, volume_id)
+
 
 class BaseV3ComputeAdminTest(BaseV3ComputeTest):
     """Base test case class for all Compute Admin API V3 tests."""
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 20c5d7f..0bf604c 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -24,6 +24,7 @@
 
     @classmethod
     def setUpClass(cls):
+        cls.set_network_resources(network=True, subnet=True, router=True)
         super(ServerRescueTestJSON, cls).setUpClass()
         cls.device = 'vdf'
 
@@ -41,20 +42,10 @@
         cls.sg_id = cls.sg['id']
 
         # Create a volume and wait for it to become ready for attach
-        resp, cls.volume_to_attach = \
-            cls.volumes_extensions_client.create_volume(1,
-                                                        display_name=
-                                                        'test_attach')
+        resp, cls.volume = cls.volumes_extensions_client.create_volume(
+            1, display_name=data_utils.rand_name(cls.__name__ + '_volume'))
         cls.volumes_extensions_client.wait_for_volume_status(
-            cls.volume_to_attach['id'], 'available')
-
-        # Create a volume and wait for it to become ready for attach
-        resp, cls.volume_to_detach = \
-            cls.volumes_extensions_client.create_volume(1,
-                                                        display_name=
-                                                        'test_detach')
-        cls.volumes_extensions_client.wait_for_volume_status(
-            cls.volume_to_detach['id'], 'available')
+            cls.volume['id'], 'available')
 
         # Server for positive tests
         resp, server = cls.create_test_server(wait_until='BUILD')
@@ -78,9 +69,7 @@
     def tearDownClass(cls):
         # Deleting the floating IP which is created in this method
         cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
-        client = cls.volumes_extensions_client
-        client.delete_volume(str(cls.volume_to_attach['id']).strip())
-        client.delete_volume(str(cls.volume_to_detach['id']).strip())
+        cls.delete_volume(cls.volume['id'])
         resp, cls.sg = cls.security_groups_client.delete_security_group(
             cls.sg_id)
         super(ServerRescueTestJSON, cls).tearDownClass()
@@ -93,9 +82,6 @@
         self.volumes_extensions_client.wait_for_volume_status(volume_id,
                                                               'available')
 
-    def _delete(self, volume_id):
-        self.volumes_extensions_client.delete_volume(volume_id)
-
     def _unrescue(self, server_id):
         resp, body = self.servers_client.unrescue_server(server_id)
         self.assertEqual(202, resp.status)
@@ -159,32 +145,31 @@
         self.assertRaises(exceptions.Conflict,
                           self.servers_client.attach_volume,
                           self.server_id,
-                          self.volume_to_attach['id'],
+                          self.volume['id'],
                           device='/dev/%s' % self.device)
 
     @attr(type=['negative', 'gate'])
     def test_rescued_vm_detach_volume(self):
         # Attach the volume to the server
         self.servers_client.attach_volume(self.server_id,
-                                          self.volume_to_detach['id'],
+                                          self.volume['id'],
                                           device='/dev/%s' % self.device)
         self.volumes_extensions_client.wait_for_volume_status(
-            self.volume_to_detach['id'], 'in-use')
+            self.volume['id'], 'in-use')
 
         # Rescue the server
         self.servers_client.rescue_server(self.server_id,
                                           adminPass=self.password)
         self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
         # addCleanup is a LIFO queue
-        self.addCleanup(self._detach, self.server_id,
-                        self.volume_to_detach['id'])
+        self.addCleanup(self._detach, self.server_id, self.volume['id'])
         self.addCleanup(self._unrescue, self.server_id)
 
         # Detach the volume from the server expecting failure
         self.assertRaises(exceptions.Conflict,
                           self.servers_client.detach_volume,
                           self.server_id,
-                          self.volume_to_detach['id'])
+                          self.volume['id'])
 
     @attr(type='gate')
     def test_rescued_vm_associate_dissociate_floating_ip(self):
diff --git a/tempest/api/compute/v3/servers/test_server_metadata_negative.py b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
index 2c413db..ce6c340 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
@@ -34,6 +34,7 @@
 
         cls.server_id = server['id']
 
+    @test.skip_because(bug="1273948")
     @test.attr(type=['gate', 'negative'])
     def test_server_create_metadata_key_too_long(self):
         # Attempt to start a server with a meta-data key that is > 255
@@ -43,7 +44,7 @@
         for sz in [256, 257, 511, 1023]:
             key = "k" * sz
             meta = {key: 'data1'}
-            self.assertRaises(exceptions.OverLimit,
+            self.assertRaises(exceptions.BadRequest,
                               self.create_test_server,
                               meta=meta)
 
diff --git a/tempest/api/compute/v3/servers/test_server_rescue.py b/tempest/api/compute/v3/servers/test_server_rescue.py
index f8be1c1..fa7def0 100644
--- a/tempest/api/compute/v3/servers/test_server_rescue.py
+++ b/tempest/api/compute/v3/servers/test_server_rescue.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest.common.utils import data_utils
 from tempest import exceptions
 from tempest.test import attr
 
@@ -27,20 +28,10 @@
         cls.device = 'vdf'
 
         # Create a volume and wait for it to become ready for attach
-        resp, cls.volume_to_attach = \
-            cls.volumes_client.create_volume(1,
-                                             display_name=
-                                             'test_attach')
+        resp, cls.volume = cls.volumes_client.create_volume(
+            1, display_name=data_utils.rand_name(cls.__name__ + '_volume'))
         cls.volumes_client.wait_for_volume_status(
-            cls.volume_to_attach['id'], 'available')
-
-        # Create a volume and wait for it to become ready for attach
-        resp, cls.volume_to_detach = \
-            cls.volumes_client.create_volume(1,
-                                             display_name=
-                                             'test_detach')
-        cls.volumes_client.wait_for_volume_status(
-            cls.volume_to_detach['id'], 'available')
+            cls.volume['id'], 'available')
 
         # Server for positive tests
         resp, server = cls.create_test_server(wait_until='BUILD')
@@ -62,9 +53,7 @@
 
     @classmethod
     def tearDownClass(cls):
-        client = cls.volumes_client
-        client.delete_volume(str(cls.volume_to_attach['id']).strip())
-        client.delete_volume(str(cls.volume_to_detach['id']).strip())
+        cls.delete_volume(cls.volume['id'])
         super(ServerRescueV3Test, cls).tearDownClass()
 
     def tearDown(self):
@@ -75,9 +64,6 @@
         self.volumes_client.wait_for_volume_status(volume_id,
                                                    'available')
 
-    def _delete(self, volume_id):
-        self.volumes_client.delete_volume(volume_id)
-
     def _unrescue(self, server_id):
         resp, body = self.servers_client.unrescue_server(server_id)
         self.assertEqual(202, resp.status)
@@ -141,29 +127,27 @@
         self.assertRaises(exceptions.Conflict,
                           self.servers_client.attach_volume,
                           self.server_id,
-                          self.volume_to_attach['id'],
+                          self.volume['id'],
                           device='/dev/%s' % self.device)
 
     @attr(type=['negative', 'gate'])
     def test_rescued_vm_detach_volume(self):
         # Attach the volume to the server
         self.servers_client.attach_volume(self.server_id,
-                                          self.volume_to_detach['id'],
+                                          self.volume['id'],
                                           device='/dev/%s' % self.device)
-        self.volumes_client.wait_for_volume_status(
-            self.volume_to_detach['id'], 'in-use')
+        self.volumes_client.wait_for_volume_status(self.volume['id'], 'in-use')
 
         # Rescue the server
         self.servers_client.rescue_server(self.server_id,
                                           admin_password=self.password)
         self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
         # addCleanup is a LIFO queue
-        self.addCleanup(self._detach, self.server_id,
-                        self.volume_to_detach['id'])
+        self.addCleanup(self._detach, self.server_id, self.volume['id'])
         self.addCleanup(self._unrescue, self.server_id)
 
         # Detach the volume from the server expecting failure
         self.assertRaises(exceptions.Conflict,
                           self.servers_client.detach_volume,
                           self.server_id,
-                          self.volume_to_detach['id'])
+                          self.volume['id'])
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index c153699..12e0ad8 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -191,12 +191,13 @@
                           self.create_test_server,
                           key_name=key_name)
 
+    @test.skip_because(bug="1273948")
     @test.attr(type=['negative', 'gate'])
     def test_create_server_metadata_exceeds_length_limit(self):
         # Pass really long metadata while creating a server
 
         metadata = {'a': 'b' * 260}
-        self.assertRaises(exceptions.OverLimit,
+        self.assertRaises(exceptions.BadRequest,
                           self.create_test_server,
                           meta=metadata)
 
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
new file mode 100644
index 0000000..3b941d8
--- /dev/null
+++ b/tempest/api/data_processing/test_plugins.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.test import attr
+
+
+class PluginsTest(dp_base.BaseDataProcessingTest):
+    def _list_all_plugin_names(self):
+        """Returns all enabled plugin names.
+
+        It ensures response status and main plugins availability.
+        """
+        resp, plugins = self.client.list_plugins()
+
+        self.assertEqual(200, resp.status)
+
+        plugins_names = list([plugin['name'] for plugin in plugins])
+        self.assertIn('vanilla', plugins_names)
+        self.assertIn('hdp', plugins_names)
+
+        return plugins_names
+
+    @attr(type='smoke')
+    def test_plugin_list(self):
+        self._list_all_plugin_names()
+
+    @attr(type='smoke')
+    def test_plugin_get(self):
+        for plugin_name in self._list_all_plugin_names():
+            resp, plugin = self.client.get_plugin(plugin_name)
+
+            self.assertEqual(200, resp.status)
+            self.assertEqual(plugin_name, plugin['name'])
+
+            for plugin_version in plugin['versions']:
+                resp, detailed_plugin = self.client.get_plugin(plugin_name,
+                                                               plugin_version)
+
+                self.assertEqual(200, resp.status)
+                self.assertEqual(plugin_name, detailed_plugin['name'])
+
+                # check that required image tags contains name and version
+                image_tags = detailed_plugin['required_image_tags']
+                self.assertIn(plugin_name, image_tags)
+                self.assertIn(plugin_version, image_tags)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 37b848c..e439238 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -106,7 +106,7 @@
             cls.os_alt = clients.AltManager()
             identity_client = cls._get_identity_admin_client()
             cls.alt_tenant_id = identity_client.get_tenant_by_name(
-                cls.os_alt.tenant_name)['id']
+                cls.os_alt.credentials['tenant_name'])['id']
 
         cls.alt_img_cli = cls.os_alt.image_client
 
@@ -147,7 +147,7 @@
             cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
         else:
             cls.os_alt = clients.AltManager()
-            alt_tenant_name = cls.os_alt.tenant_name
+            alt_tenant_name = cls.os_alt.credentials['tenant_name']
             identity_client = cls._get_identity_admin_client()
             cls.alt_tenant_id = identity_client.get_tenant_by_name(
                 alt_tenant_name)['id']
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index bfb7b48..7c02787 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -17,7 +17,7 @@
 from tempest import test
 
 
-class L3AgentSchedulerJSON(base.BaseAdminNetworkTest):
+class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest):
     _interface = 'json'
 
     """
@@ -33,7 +33,7 @@
 
     @classmethod
     def setUpClass(cls):
-        super(L3AgentSchedulerJSON, cls).setUpClass()
+        super(L3AgentSchedulerTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('l3_agent_scheduler', 'network'):
             msg = "L3 Agent Scheduler Extension not enabled."
             raise cls.skipException(msg)
@@ -61,5 +61,5 @@
         self.assertEqual(204, resp.status)
 
 
-class L3AgentSchedulerXML(L3AgentSchedulerJSON):
+class L3AgentSchedulerTestXML(L3AgentSchedulerTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 1c2c4b0..b129786 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -49,6 +49,8 @@
         neutron as True
     """
 
+    force_tenant_isolation = False
+
     @classmethod
     def setUpClass(cls):
         # Create no network resources for these test.
@@ -57,6 +59,10 @@
         os = clients.Manager(interface=cls._interface)
         if not CONF.service_available.neutron:
             raise cls.skipException("Neutron support is required")
+
+        os = cls.get_client_manager()
+
+        cls.network_cfg = CONF.network
         cls.client = os.network_client
         cls.networks = []
         cls.subnets = []
@@ -110,6 +116,7 @@
         # Clean up networks
         for network in cls.networks:
             cls.client.delete_network(network['id'])
+        cls.clear_isolated_creds()
         super(BaseNetworkTest, cls).tearDownClass()
 
     @classmethod
@@ -269,5 +276,14 @@
             msg = ("Missing Administrative Network API credentials "
                    "in configuration.")
             raise cls.skipException(msg)
-        cls.admin_manager = clients.AdminManager(interface=cls._interface)
-        cls.admin_client = cls.admin_manager.network_client
+        if (CONF.compute.allow_tenant_isolation or
+            cls.force_tenant_isolation is True):
+            creds = cls.isolated_creds.get_admin_creds()
+            admin_username, admin_tenant_name, admin_password = creds
+            cls.os_adm = clients.Manager(username=admin_username,
+                                         password=admin_password,
+                                         tenant_name=admin_tenant_name,
+                                         interface=cls._interface)
+        else:
+            cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+        cls.admin_client = cls.os_adm.network_client
diff --git a/tempest/api/network/common.py b/tempest/api/network/common.py
index 0ce1769..d68ff1a 100644
--- a/tempest/api/network/common.py
+++ b/tempest/api/network/common.py
@@ -126,3 +126,21 @@
 
     def delete(self):
         self.client.delete_security_group_rule(self.id)
+
+
+class DeletablePool(DeletableResource):
+
+    def delete(self):
+        self.client.delete_pool(self.id)
+
+
+class DeletableMember(DeletableResource):
+
+    def delete(self):
+        self.client.delete_member(self.id)
+
+
+class DeletableVip(DeletableResource):
+
+    def delete(self):
+        self.client.delete_vip(self.id)
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
index 65eebf2..d5f2b5b 100644
--- a/tempest/api/network/test_load_balancer.py
+++ b/tempest/api/network/test_load_balancer.py
@@ -18,7 +18,7 @@
 from tempest import test
 
 
-class LoadBalancerJSON(base.BaseNetworkTest):
+class LoadBalancerTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
     """
@@ -39,7 +39,7 @@
 
     @classmethod
     def setUpClass(cls):
-        super(LoadBalancerJSON, cls).setUpClass()
+        super(LoadBalancerTestJSON, cls).setUpClass()
         if not test.is_extension_enabled('lbaas', 'network'):
             msg = "lbaas extension not enabled."
             raise cls.skipException(msg)
@@ -210,5 +210,5 @@
         self.assertEqual('204', resp['status'])
 
 
-class LoadBalancerXML(LoadBalancerJSON):
+class LoadBalancerTestXML(LoadBalancerTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 3aa765c..aee2a44 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -239,7 +239,7 @@
     _interface = 'xml'
 
 
-class BulkNetworkOpsJSON(base.BaseNetworkTest):
+class BulkNetworkOpsTestJSON(base.BaseNetworkTest):
     _interface = 'json'
 
     """
@@ -263,7 +263,7 @@
 
     @classmethod
     def setUpClass(cls):
-        super(BulkNetworkOpsJSON, cls).setUpClass()
+        super(BulkNetworkOpsTestJSON, cls).setUpClass()
         cls.network1 = cls.create_network()
         cls.network2 = cls.create_network()
 
@@ -390,5 +390,5 @@
             self.assertIn(n['id'], ports_list)
 
 
-class BulkNetworkOpsXML(BulkNetworkOpsJSON):
+class BulkNetworkOpsTestXML(BulkNetworkOpsTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 64b8a41..78bc80a 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -38,10 +38,10 @@
 
     @classmethod
     def setUpClass(cls):
-        super(VPNaaSJSON, cls).setUpClass()
         if not test.is_extension_enabled('vpnaas', 'network'):
             msg = "vpnaas extension not enabled."
             raise cls.skipException(msg)
+        super(VPNaaSJSON, cls).setUpClass()
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
         cls.router = cls.create_router(
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 146fac9..ac8b14f 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -36,7 +36,6 @@
         self.isolated_net_resources = {}
         self.ports = []
         self.name = name
-        self.config = CONF
         self.tempest_client = tempest_client
         self.interface = interface
         self.password = password
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index 0ed9b82..c772ce9 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -49,7 +49,7 @@
         self.channel_timeout = float(channel_timeout)
         self.buf_size = 1024
 
-    def _get_ssh_connection(self, sleep=1.5, backoff=1.01):
+    def _get_ssh_connection(self, sleep=1.5, backoff=1):
         """Returns an ssh connection to the specified host."""
         bsleep = sleep
         ssh = paramiko.SSHClient()
@@ -76,19 +76,21 @@
                          self.username, self.host)
                 return ssh
             except (socket.error,
-                    paramiko.SSHException):
-                attempts += 1
-                time.sleep(bsleep)
-                bsleep *= backoff
-                if not self._is_timed_out(_start_time):
-                    continue
-                else:
+                    paramiko.SSHException) as e:
+                if self._is_timed_out(_start_time):
                     LOG.exception("Failed to establish authenticated ssh"
                                   " connection to %s@%s after %d attempts",
                                   self.username, self.host, attempts)
                     raise exceptions.SSHTimeout(host=self.host,
                                                 user=self.username,
                                                 password=self.password)
+                bsleep += backoff
+                attempts += 1
+                LOG.warning("Failed to establish authenticated ssh"
+                            " connection to %s@%s (%s). Number attempts: %s."
+                            " Retry after %d seconds.",
+                            self.username, self.host, e, attempts, bsleep)
+                time.sleep(bsleep)
 
     def _is_timed_out(self, start_time):
         return (time.time() - self.timeout) > start_time
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 59a3aeb..840518a 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -22,7 +22,7 @@
 import cinderclient.client
 import glanceclient
 import heatclient.client
-import keystoneclient.apiclient.exceptions
+import keystoneclient.exceptions
 import keystoneclient.v2_0.client
 import netaddr
 from neutronclient.common import exceptions as exc
@@ -112,7 +112,7 @@
         region = CONF.identity.region
         endpoint = self.identity_client.service_catalog.url_for(
             attr='region', filter_value=region,
-            service_type='image', endpoint_type='publicURL')
+            service_type=CONF.images.catalog_type, endpoint_type='publicURL')
         dscv = CONF.identity.disable_ssl_certificate_validation
         return glanceclient.Client('1', endpoint=endpoint, token=token,
                                    insecure=dscv)
@@ -146,7 +146,7 @@
             keystone_admin.roles.add_user_role(self.identity_client.user_id,
                                                member_role.id,
                                                self.identity_client.tenant_id)
-        except keystoneclient.apiclient.exceptions.Conflict:
+        except keystoneclient.exceptions.Conflict:
             pass
 
         return swiftclient.Connection(auth_url, username, password,
@@ -167,11 +167,12 @@
         keystone = self._get_identity_client(username, password, tenant_name)
         region = CONF.identity.region
         token = keystone.auth_token
+        service_type = CONF.orchestration.catalog_type
         try:
             endpoint = keystone.service_catalog.url_for(
                 attr='region',
                 filter_value=region,
-                service_type='orchestration',
+                service_type=service_type,
                 endpoint_type='publicURL')
         except keystoneclient.exceptions.EndpointNotFound:
             return None
@@ -670,13 +671,17 @@
                          "Unable to determine which port to target.")
         return ports[0]['id']
 
-    def _create_floating_ip(self, server, external_network_id):
-        port_id = self._get_server_port_id(server)
+    def _create_floating_ip(self, thing, external_network_id,
+                            port_filters=None):
+        if port_filters is None:
+            port_id = self._get_server_port_id(thing)
+        else:
+            port_id = port_filters
         body = dict(
             floatingip=dict(
                 floating_network_id=external_network_id,
                 port_id=port_id,
-                tenant_id=server.tenant_id,
+                tenant_id=thing.tenant_id,
             )
         )
         result = self.network_client.create_floatingip(body=body)
@@ -713,6 +718,58 @@
         return tempest.test.call_until_true(
             ping, CONF.compute.ping_timeout, 1)
 
+    def _create_pool(self, lb_method, protocol, subnet_id):
+        """Wrapper utility that returns a test pool."""
+        name = data_utils.rand_name('pool-')
+        body = {
+            "pool": {
+                "protocol": protocol,
+                "name": name,
+                "subnet_id": subnet_id,
+                "lb_method": lb_method
+            }
+        }
+        resp = self.network_client.create_pool(body=body)
+        pool = net_common.DeletablePool(client=self.network_client,
+                                        **resp['pool'])
+        self.assertEqual(pool['name'], name)
+        self.set_resource(name, pool)
+        return pool
+
+    def _create_member(self, address, protocol_port, pool_id):
+        """Wrapper utility that returns a test member."""
+        body = {
+            "member": {
+                "protocol_port": protocol_port,
+                "pool_id": pool_id,
+                "address": address
+            }
+        }
+        resp = self.network_client.create_member(body)
+        member = net_common.DeletableMember(client=self.network_client,
+                                            **resp['member'])
+        self.set_resource(data_utils.rand_name('member-'), member)
+        return member
+
+    def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
+        """Wrapper utility that returns a test vip."""
+        name = data_utils.rand_name('vip-')
+        body = {
+            "vip": {
+                "protocol": protocol,
+                "name": name,
+                "subnet_id": subnet_id,
+                "pool_id": pool_id,
+                "protocol_port": protocol_port
+            }
+        }
+        resp = self.network_client.create_vip(body)
+        vip = net_common.DeletableVip(client=self.network_client,
+                                      **resp['vip'])
+        self.assertEqual(vip['name'], name)
+        self.set_resource(name, vip)
+        return vip
+
     def _check_vm_connectivity(self, ip_address,
                                username=None,
                                private_key=None,
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
new file mode 100644
index 0000000..68f6e62
--- /dev/null
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -0,0 +1,240 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+import urllib
+
+from tempest.api.network import common as net_common
+from tempest.common import ssh
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest.scenario import manager
+from tempest import test
+
+config = config.CONF
+
+
+class TestLoadBalancerBasic(manager.NetworkScenarioTest):
+
+    """
+    This test checks basic load balancing.
+
+    The following is the scenario outline:
+    1. Create an instance
+    2. SSH to the instance and start two servers
+    3. Create a load balancer with two members and with ROUND_ROBIN algorithm
+       associate the VIP with a floating ip
+    4. Send 10 requests to the floating ip and check that they are shared
+       between the two servers and that both of them get equal portions
+    of the requests
+    """
+
+    @classmethod
+    def check_preconditions(cls):
+        super(TestLoadBalancerBasic, cls).check_preconditions()
+        cfg = config.network
+        if not test.is_extension_enabled('lbaas', 'network'):
+            msg = 'LBaaS Extension is not enabled'
+            cls.enabled = False
+            raise cls.skipException(msg)
+        if not (cfg.tenant_networks_reachable or cfg.public_network_id):
+            msg = ('Either tenant_networks_reachable must be "true", or '
+                   'public_network_id must be defined.')
+            cls.enabled = False
+            raise cls.skipException(msg)
+
+    @classmethod
+    def setUpClass(cls):
+        super(TestLoadBalancerBasic, cls).setUpClass()
+        cls.check_preconditions()
+        cls.security_groups = {}
+        cls.networks = []
+        cls.subnets = []
+        cls.servers_keypairs = {}
+        cls.pools = []
+        cls.members = []
+        cls.vips = []
+        cls.floating_ips = {}
+        cls.port1 = 80
+        cls.port2 = 88
+
+    def _create_security_groups(self):
+        self.security_groups[self.tenant_id] =\
+            self._create_security_group_neutron(tenant_id=self.tenant_id)
+
+    def _create_server(self):
+        tenant_id = self.tenant_id
+        name = data_utils.rand_name("smoke_server-")
+        keypair = self.create_keypair(name='keypair-%s' % name)
+        security_groups = [self.security_groups[tenant_id].name]
+        nets = self.network_client.list_networks()
+        for net in nets['networks']:
+            if net['tenant_id'] == self.tenant_id:
+                self.networks.append(net)
+                create_kwargs = {
+                    'nics': [
+                        {'net-id': net['id']},
+                    ],
+                    'key_name': keypair.name,
+                    'security_groups': security_groups,
+                }
+                server = self.create_server(name=name,
+                                            create_kwargs=create_kwargs)
+                self.servers_keypairs[server] = keypair
+                break
+        self.assertTrue(self.servers_keypairs)
+
+    def _start_servers(self):
+        """
+        1. SSH to the instance
+        2. Start two servers listening on ports 80 and 88 respectively
+        """
+        for server in self.servers_keypairs.keys():
+            ssh_login = config.compute.image_ssh_user
+            private_key = self.servers_keypairs[server].private_key
+            network_name = self.networks[0]['name']
+
+            ip_address = server.networks[network_name][0]
+            ssh_client = ssh.Client(ip_address, ssh_login,
+                                    pkey=private_key,
+                                    timeout=100)
+            start_server = "while true; do echo -e 'HTTP/1.0 200 OK\r\n\r\n" \
+                           "%(server)s' | sudo nc -l -p %(port)s ; done &"
+            cmd = start_server % {'server': 'server1',
+                                  'port': self.port1}
+            ssh_client.exec_command(cmd)
+            cmd = start_server % {'server': 'server2',
+                                  'port': self.port2}
+            ssh_client.exec_command(cmd)
+
+    def _check_connection(self, check_ip):
+        def try_connect(ip):
+            try:
+                urllib.urlopen("http://{0}/".format(ip))
+                return True
+            except IOError:
+                return False
+        timeout = config.compute.ping_timeout
+        timer = 0
+        while not try_connect(check_ip):
+            time.sleep(1)
+            timer += 1
+            if timer >= timeout:
+                message = "Timed out trying to connect to %s" % check_ip
+                raise exceptions.TimeoutException(message)
+
+    def _create_pool(self):
+        """Create a pool with ROUND_ROBIN algorithm."""
+        subnets = self.network_client.list_subnets()
+        for subnet in subnets['subnets']:
+            if subnet['tenant_id'] == self.tenant_id:
+                self.subnets.append(subnet)
+                pool = super(TestLoadBalancerBasic, self)._create_pool(
+                    'ROUND_ROBIN',
+                    'HTTP',
+                    subnet['id'])
+                self.pools.append(pool)
+                break
+        self.assertTrue(self.pools)
+
+    def _create_members(self, network_name, server_ids):
+        """
+        Create two members.
+
+        In case there is only one server, create both members with the same ip
+        but with different ports to listen on.
+        """
+        servers = self.compute_client.servers.list()
+        for server in servers:
+            if server.id in server_ids:
+                ip = server.networks[network_name][0]
+                pool_id = self.pools[0]['id']
+                if len(set(server_ids)) == 1 or len(servers) == 1:
+                    member1 = self._create_member(ip, self.port1, pool_id)
+                    member2 = self._create_member(ip, self.port2, pool_id)
+                    self.members.extend([member1, member2])
+                else:
+                    member = self._create_member(ip, self.port1, pool_id)
+                    self.members.append(member)
+        self.assertTrue(self.members)
+
+    def _assign_floating_ip_to_vip(self, vip):
+        public_network_id = config.network.public_network_id
+        port_id = vip['port_id']
+        floating_ip = self._create_floating_ip(vip,
+                                               public_network_id,
+                                               port_filters=port_id)
+        self.floating_ips.setdefault(vip['id'], [])
+        self.floating_ips[vip['id']].append(floating_ip)
+
+    def _create_load_balancer(self):
+        self._create_pool()
+        self._create_members(self.networks[0]['name'],
+                             [self.servers_keypairs.keys()[0].id])
+        subnet_id = self.subnets[0]['id']
+        pool_id = self.pools[0]['id']
+        vip = super(TestLoadBalancerBasic, self)._create_vip('HTTP', 80,
+                                                             subnet_id,
+                                                             pool_id)
+        self.vips.append(vip)
+        self._status_timeout(NeutronRetriever(self.network_client,
+                                              self.network_client.vip_path,
+                                              net_common.DeletableVip),
+                             self.vips[0]['id'],
+                             expected_status='ACTIVE')
+        self._assign_floating_ip_to_vip(self.vips[0])
+
+    def _check_load_balancing(self):
+        """
+        1. Send 10 requests on the floating ip associated with the VIP
+        2. Check that the requests are shared between
+           the two servers and that both of them get equal portions
+           of the requests
+        """
+
+        vip = self.vips[0]
+        floating_ip_vip = self.floating_ips[
+            vip['id']][0]['floating_ip_address']
+        self._check_connection(floating_ip_vip)
+        resp = []
+        for count in range(10):
+            resp.append(
+                urllib.urlopen(
+                    "http://{0}/".format(floating_ip_vip)).read())
+        self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
+        self.assertEqual(5, resp.count("server1\n"))
+        self.assertEqual(5, resp.count("server2\n"))
+
+    @test.skip_because(bug="1277381")
+    @test.attr(type='smoke')
+    @test.services('compute', 'network')
+    def test_load_balancer_basic(self):
+        self._create_security_groups()
+        self._create_server()
+        self._start_servers()
+        self._create_load_balancer()
+        self._check_load_balancing()
+
+
+class NeutronRetriever(object):
+    def __init__(self, network_client, path, resource):
+        self.network_client = network_client
+        self.path = path
+        self.resource = resource
+
+    def get(self, thing_id):
+        obj = self.network_client.get(self.path % thing_id)
+        return self.resource(client=self.network_client, **obj.values()[0])
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index db21201..e96b44b 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -77,3 +77,17 @@
 
         uri = "node-group-templates/%s" % tmpl_id
         return self.delete(uri)
+
+    def list_plugins(self):
+        """List all enabled plugins."""
+
+        uri = 'plugins'
+        return self._request_and_parse(self.get, uri, 'plugins')
+
+    def get_plugin(self, plugin_name, plugin_version=None):
+        """Returns the details of a single plugin."""
+
+        uri = "plugins/%s" % plugin_name
+        if plugin_version:
+            uri += '/%s' % plugin_version
+        return self._request_and_parse(self.get, uri, 'plugin')
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 0a16b9f..b70b2e8 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -177,7 +177,7 @@
             stack_name = body['stack_name']
             stack_status = body['stack_status']
             if stack_status == status:
-                return
+                return body
             if fail_regexp.search(stack_status):
                 raise exceptions.StackBuildErrorException(
                     stack_identifier=stack_identifier,
diff --git a/tempest/tests/negative/__init__.py b/tempest/tests/negative/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/negative/__init__.py
diff --git a/tempest/tests/negative/test_generate_json.py b/tempest/tests/negative/test_generate_json.py
new file mode 100644
index 0000000..a0aa088
--- /dev/null
+++ b/tempest/tests/negative/test_generate_json.py
@@ -0,0 +1,53 @@
+# Copyright 2014 Deutsche Telekom AG
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import generate_json as gen
+import tempest.test
+
+
+class TestGenerateJson(tempest.test.BaseTestCase):
+
+    fake_input_str = {"type": "string",
+                      "minLength": 2,
+                      "maxLength": 8,
+                      'results': {'gen_number': 404}}
+
+    fake_input_int = {"type": "integer",
+                      "maximum": 255,
+                      "minimum": 1}
+
+    fake_input_obj = {"type": "object",
+                      "properties": {"minRam": {"type": "integer"},
+                                     "diskName": {"type": "string"},
+                                     "maxRam": {"type": "integer", }
+                                     }
+                      }
+
+    def _validate_result(self, data):
+        self.assertTrue(isinstance(data, list))
+        for t in data:
+            self.assertTrue(isinstance(t, tuple))
+
+    def test_generate_invalid_string(self):
+        result = gen.generate_invalid(self.fake_input_str)
+        self._validate_result(result)
+
+    def test_generate_invalid_integer(self):
+        result = gen.generate_invalid(self.fake_input_int)
+        self._validate_result(result)
+
+    def test_generate_invalid_obj(self):
+        result = gen.generate_invalid(self.fake_input_obj)
+        self._validate_result(result)
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
new file mode 100644
index 0000000..4c59383
--- /dev/null
+++ b/tempest/tests/negative/test_negative_auto_test.py
@@ -0,0 +1,64 @@
+# Copyright 2014 Deutsche Telekom AG
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+import tempest.test as test
+
+
+class TestNegativeAutoTest(test.BaseTestCase):
+    # Fake entries
+    _interface = 'json'
+    _service = 'compute'
+
+    fake_input_desc = {"name": "list-flavors-with-detail",
+                       "http-method": "GET",
+                       "url": "flavors/detail",
+                       "json-schema": {"type": "object",
+                                      "properties":
+                                      {"minRam": {"type": "integer"},
+                                       "minDisk": {"type": "integer"}}
+                                       },
+                       "resources": ["flavor", "volume", "image"]
+                       }
+
+    def _check_prop_entries(self, result, entry):
+        entries = [a for a in result if entry in a[0]]
+        self.assertIsNotNone(entries)
+        self.assertIs(len(entries), 2)
+        for entry in entries:
+            self.assertIsNotNone(entry[1]['schema'])
+
+    def _check_resource_entries(self, result, entry):
+        entries = [a for a in result if entry in a[0]]
+        self.assertIsNotNone(entries)
+        self.assertIs(len(entries), 3)
+        for entry in entries:
+            self.assertIsNotNone(entry[1]['resource'])
+
+    @mock.patch('tempest.test.NegativeAutoTest.load_schema')
+    def test_generate_scenario(self, open_mock):
+        open_mock.return_value = self.fake_input_desc
+        scenarios = test.NegativeAutoTest.\
+            generate_scenario(None)
+
+        self.assertIsInstance(scenarios, list)
+        for scenario in scenarios:
+            self.assertIsInstance(scenario, tuple)
+            self.assertIsInstance(scenario[0], str)
+            self.assertIsInstance(scenario[1], dict)
+        self._check_prop_entries(scenarios, "prop_minRam")
+        self._check_prop_entries(scenarios, "prop_minDisk")
+        self._check_resource_entries(scenarios, "inv_res")
diff --git a/tempest/tests/test_ssh.py b/tempest/tests/test_ssh.py
index 429ed56..a6eedc4 100644
--- a/tempest/tests/test_ssh.py
+++ b/tempest/tests/test_ssh.py
@@ -88,15 +88,17 @@
         client_mock.connect.side_effect = [socket.error, socket.error, True]
         t_mock.side_effect = [
             1000,  # Start time
+            1000,  # LOG.warning() calls time.time() loop 1
             1001,  # Sleep loop 1
+            1001,  # LOG.warning() calls time.time() loop 2
             1002   # Sleep loop 2
         ]
 
         client._get_ssh_connection(sleep=1)
 
         expected_sleeps = [
-            mock.call(1),
-            mock.call(1.01)
+            mock.call(2),
+            mock.call(3)
         ]
         self.assertEqual(expected_sleeps, s_mock.mock_calls)
 
@@ -111,7 +113,9 @@
         ]
         t_mock.side_effect = [
             1000,  # Start time
+            1000,  # LOG.warning() calls time.time() loop 1
             1001,  # Sleep loop 1
+            1001,  # LOG.warning() calls time.time() loop 2
             1002,  # Sleep loop 2
             1003,  # Sleep loop 3
             1004  # LOG.error() calls time.time()