Merge "Add CLI test of "nova server-group-list""
diff --git a/setup.cfg b/setup.cfg
index 339da12..5c62710 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,6 +21,7 @@
 console_scripts =
     verify-tempest-config = tempest.cmd.verify_tempest_config:main
     javelin2 = tempest.cmd.javelin:main
+    run-tempest-stress = tempest.cmd.run_stress:main
 
 [build_sphinx]
 all_files = 1
diff --git a/tempest/api/baremetal/test_ports.py b/tempest/api/baremetal/test_ports.py
index 8b76811..aeb77e3 100644
--- a/tempest/api/baremetal/test_ports.py
+++ b/tempest/api/baremetal/test_ports.py
@@ -165,8 +165,8 @@
         resp, body = self.client.list_ports_detail()
         self.assertEqual(200, resp.status)
 
-        ports_dict = {port['uuid']: port for port in body['ports']
-                      if port['uuid'] in uuids}
+        ports_dict = dict((port['uuid'], port) for port in body['ports']
+                          if port['uuid'] in uuids)
 
         for uuid in uuids:
             self.assertIn(uuid, ports_dict)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index b04ab8a..35f6fc2 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -75,7 +75,6 @@
                                                    to_port,
                                                    cidr=cidr,
                                                    group_id=group_id)
-        self.addCleanup(self.client.delete_security_group_rule, rule['id'])
         self.assertEqual(200, resp.status)
 
     @test.attr(type='smoke')
@@ -95,8 +94,6 @@
                                                    ip_protocol1,
                                                    from_port1, to_port1)
         rule1_id = rule['id']
-        # Delete the Security Group rule1 at the end of this method
-        self.addCleanup(self.client.delete_security_group_rule, rule1_id)
 
         # Add a second rule to the created Security Group
         ip_protocol2 = 'icmp'
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 768cc11..28d64fb 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import datetime
-
 from six import moves
 
 from tempest.api.compute import base
@@ -37,9 +35,8 @@
         # tearDownClass method of the super-class.
         cls.existing_fixtures = []
         cls.deleted_fixtures = []
-        cls.start_time = datetime.datetime.utcnow()
         for x in moves.xrange(2):
-            resp, srv = cls.create_test_server()
+            resp, srv = cls.create_test_server(wait_until='ACTIVE')
             cls.existing_fixtures.append(srv)
 
         resp, srv = cls.create_test_server()
@@ -127,19 +124,6 @@
         self.assertRaises(exceptions.BadRequest, self.client.list_servers,
                           {'limit': -1})
 
-    @test.attr(type='gate')
-    def test_list_servers_by_changes_since(self):
-        # Servers are listed by specifying changes-since date
-        changes_since = {'changes-since': self.start_time.isoformat()}
-        resp, body = self.client.list_servers(changes_since)
-        self.assertEqual('200', resp['status'])
-        # changes-since returns all instances, including deleted.
-        num_expected = (len(self.existing_fixtures) +
-                        len(self.deleted_fixtures))
-        self.assertEqual(num_expected, len(body['servers']),
-                         "Number of servers %d is wrong in %s" %
-                         (num_expected, body['servers']))
-
     @test.attr(type=['negative', 'gate'])
     def test_list_servers_by_changes_since_invalid_date(self):
         # Return an error when invalid date format is passed
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 80e6008..d0fd876 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -193,26 +193,46 @@
             if current_flavor == self.flavor_ref else self.flavor_ref
         return current_flavor, new_flavor_ref
 
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
-    @test.attr(type='smoke')
-    def test_resize_server_confirm(self):
+    def _test_resize_server_confirm(self, stop=False):
         # The server's RAM and disk space should be modified to that of
         # the provided flavor
 
         previous_flavor_ref, new_flavor_ref = \
             self._detect_server_image_flavor(self.server_id)
 
+        if stop:
+            resp = self.servers_client.stop(self.server_id)[0]
+            self.assertEqual(202, resp.status)
+            self.servers_client.wait_for_server_status(self.server_id,
+                                                       'SHUTOFF')
+
         resp, server = self.client.resize(self.server_id, new_flavor_ref)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
 
         self.client.confirm_resize(self.server_id)
-        self.client.wait_for_server_status(self.server_id, 'ACTIVE')
+        expected_status = 'SHUTOFF' if stop else 'ACTIVE'
+        self.client.wait_for_server_status(self.server_id, expected_status)
 
         resp, server = self.client.get_server(self.server_id)
         self.assertEqual(new_flavor_ref, server['flavor']['id'])
 
+        if stop:
+            # NOTE(mriedem): tearDown requires the server to be started.
+            self.client.start(self.server_id)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='smoke')
+    def test_resize_server_confirm(self):
+        self._test_resize_server_confirm(stop=False)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='smoke')
+    def test_resize_server_confirm_from_stopped(self):
+        self._test_resize_server_confirm(stop=True)
+
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     @test.attr(type='gate')
diff --git a/tempest/api/compute/v3/servers/test_list_servers_negative.py b/tempest/api/compute/v3/servers/test_list_servers_negative.py
index 9cbc4e0..18e5c67 100644
--- a/tempest/api/compute/v3/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_list_servers_negative.py
@@ -39,7 +39,7 @@
         cls.deleted_fixtures = []
         cls.start_time = datetime.datetime.utcnow()
         for x in moves.xrange(2):
-            resp, srv = cls.create_test_server()
+            resp, srv = cls.create_test_server(wait_until='ACTIVE')
             cls.existing_fixtures.append(srv)
 
         resp, srv = cls.create_test_server()
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index 721fe42..e098311 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -186,26 +186,46 @@
             if current_flavor == self.flavor_ref else self.flavor_ref
         return current_flavor, new_flavor_ref
 
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
-    @test.attr(type='smoke')
-    def test_resize_server_confirm(self):
+    def _test_resize_server_confirm(self, stop=False):
         # The server's RAM and disk space should be modified to that of
         # the provided flavor
 
         previous_flavor_ref, new_flavor_ref = \
             self._detect_server_image_flavor(self.server_id)
 
+        if stop:
+            resp = self.servers_client.stop(self.server_id)[0]
+            self.assertEqual(202, resp.status)
+            self.servers_client.wait_for_server_status(self.server_id,
+                                                       'SHUTOFF')
+
         resp, server = self.client.resize(self.server_id, new_flavor_ref)
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
 
         self.client.confirm_resize(self.server_id)
-        self.client.wait_for_server_status(self.server_id, 'ACTIVE')
+        expected_status = 'SHUTOFF' if stop else 'ACTIVE'
+        self.client.wait_for_server_status(self.server_id, expected_status)
 
         resp, server = self.client.get_server(self.server_id)
         self.assertEqual(new_flavor_ref, server['flavor']['id'])
 
+        if stop:
+            # NOTE(mriedem): tearDown requires the server to be started.
+            self.client.start(self.server_id)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='smoke')
+    def test_resize_server_confirm(self):
+        self._test_resize_server_confirm(stop=False)
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @test.attr(type='smoke')
+    def test_resize_server_confirm_from_stopped(self):
+        self._test_resize_server_confirm(stop=True)
+
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     @test.attr(type='gate')
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
new file mode 100644
index 0000000..03974e4
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -0,0 +1,102 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import exceptions
+from tempest import test
+
+
+class RegionsTestJSON(base.BaseIdentityV3AdminTest):
+    _interface = 'json'
+
+    @classmethod
+    @test.safe_setup
+    def setUpClass(cls):
+        super(RegionsTestJSON, cls).setUpClass()
+        cls.setup_regions = list()
+        cls.client = cls.region_client
+        for i in range(2):
+            r_description = data_utils.rand_name('description-')
+            _, region = cls.client.create_region(r_description)
+            cls.setup_regions.append(region)
+
+    @classmethod
+    def tearDownClass(cls):
+        for r in cls.setup_regions:
+            cls.client.delete_region(r['id'])
+        super(RegionsTestJSON, cls).tearDownClass()
+
+    def _delete_region(self, region_id):
+        resp, _ = self.client.delete_region(region_id)
+        self.assertEqual(204, resp.status)
+        self.assertRaises(exceptions.NotFound,
+                          self.client.get_region, region_id)
+
+    @test.attr(type='gate')
+    def test_create_update_get_delete_region(self):
+        r_description = data_utils.rand_name('description-')
+        resp, region = self.client.create_region(
+            r_description, parent_region_id=self.setup_regions[0]['id'])
+        self.assertEqual(201, resp.status)
+        self.addCleanup(self._delete_region, region['id'])
+        self.assertEqual(r_description, region['description'])
+        self.assertEqual(self.setup_regions[0]['id'],
+                         region['parent_region_id'])
+        # Update region with new description and parent ID
+        r_alt_description = data_utils.rand_name('description-')
+        resp, region = self.client.update_region(
+            region['id'],
+            description=r_alt_description,
+            parent_region_id=self.setup_regions[1]['id'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(r_alt_description, region['description'])
+        self.assertEqual(self.setup_regions[1]['id'],
+                         region['parent_region_id'])
+        # Get the details of region
+        resp, region = self.client.get_region(region['id'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(r_alt_description, region['description'])
+        self.assertEqual(self.setup_regions[1]['id'],
+                         region['parent_region_id'])
+
+    @test.attr(type='smoke')
+    def test_create_region_with_specific_id(self):
+        # Create a region with a specific id
+        r_region_id = data_utils.rand_uuid()
+        r_description = data_utils.rand_name('description-')
+        resp, region = self.client.create_region(
+            r_description, unique_region_id=r_region_id)
+        self.addCleanup(self._delete_region, region['id'])
+        # Asserting Create Region with specific id response body
+        self.assertEqual(201, resp.status)
+        self.assertEqual(r_region_id, region['id'])
+        self.assertEqual(r_description, region['description'])
+
+    @test.attr(type='gate')
+    def test_list_regions(self):
+        # Get a list of regions
+        resp, fetched_regions = self.client.list_regions()
+        self.assertEqual(200, resp.status)
+        missing_regions =\
+            [e for e in self.setup_regions if e not in fetched_regions]
+        # Asserting List Regions response
+        self.assertEqual(0, len(missing_regions),
+                         "Failed to find region %s in fetched list" %
+                         ', '.join(str(e) for e in missing_regions))
+
+
+class RegionsTestXML(RegionsTestJSON):
+    _interface = 'xml'
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index e4e74c1..697057f 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -96,6 +96,7 @@
         cls.client = cls.os_adm.identity_v3_client
         cls.token = cls.os_adm.token_v3_client
         cls.endpoints_client = cls.os_adm.endpoints_client
+        cls.region_client = cls.os_adm.region_client
         cls.data = DataGenerator(cls.client)
         cls.non_admin_client = cls.os.identity_v3_client
         cls.service_client = cls.os_adm.service_client
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index c8726a2..2df3f7f 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -247,11 +247,17 @@
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
-        super(ListSnapshotImagesTest, cls).setUpClass()
+        # This test class only uses nova v3 api to create snapshot
+        # as the similar test which uses nova v2 api already exists
+        # in nova v2 compute images api tests.
+        # Since nova v3 doesn't have images api proxy, this test
+        # class was added in the image api tests.
         if not CONF.compute_feature_enabled.api_v3:
-            cls.servers_client = cls.os.servers_client
-        else:
-            cls.servers_client = cls.os.servers_v3_client
+            skip_msg = ("%s skipped as nova v3 api is not available" %
+                        cls.__name__)
+            raise cls.skipException(skip_msg)
+        super(ListSnapshotImagesTest, cls).setUpClass()
+        cls.servers_client = cls.os.servers_v3_client
         cls.servers = []
         # We add a few images here to test the listing functionality of
         # the images API
@@ -281,8 +287,7 @@
         cls.servers.append(server)
         cls.servers_client.wait_for_server_status(
             server['id'], 'ACTIVE')
-        resp, image = cls.servers_client.create_image(
-            server['id'], name)
+        resp, _ = cls.servers_client.create_image(server['id'], name)
         image_id = data_utils.parse_image_id(resp['location'])
         cls.created_images.append(image_id)
         cls.client.wait_for_image_status(image_id,
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 97b3c4b..b98cea1 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -86,9 +86,6 @@
                 direction='ingress'
             )
             self.assertEqual('201', resp['status'])
-            self.addCleanup(self._delete_security_group_rule,
-                            rule_create_body['security_group_rule']['id']
-                            )
 
             # Show details of the created security rule
             resp, show_rule_body = self.client.show_security_group_rule(
@@ -130,9 +127,6 @@
 
         self.assertEqual('201', resp['status'])
         sec_group_rule = rule_create_body['security_group_rule']
-        self.addCleanup(self._delete_security_group_rule,
-                        sec_group_rule['id']
-                        )
 
         self.assertEqual(sec_group_rule['direction'], direction)
         self.assertEqual(sec_group_rule['protocol'], protocol)
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 6bda83b..5f46d01 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -67,6 +67,7 @@
         super(ContainerSyncTest, cls).tearDownClass()
 
     @test.attr(type='slow')
+    @test.skip_because(bug='1317133')
     def test_container_synchronization(self):
         # container to container synchronization
         # to allow/accept sync requests to/from other accounts
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index ee1d09a..3b8c214 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -118,14 +118,16 @@
                          'from the created Volume_type')
 
     @test.attr(type='smoke')
-    def test_volume_type_encryption_create_get(self):
-        # Create/get encryption type.
+    def test_volume_type_encryption_create_get_delete(self):
+        # Create/get/delete encryption type.
         provider = "LuksEncryptor"
         control_location = "front-end"
         name = data_utils.rand_name("volume-type-")
         resp, body = self.client.create_volume_type(name)
         self.assertEqual(200, resp.status)
         self.addCleanup(self._delete_volume_type, body['id'])
+
+        # Create encryption type
         resp, encryption_type = self.client.create_encryption_type(
             body['id'], provider=provider,
             control_location=control_location)
@@ -137,6 +139,8 @@
         self.assertEqual(control_location, encryption_type['control_location'],
                          "The created encryption_type control_location is not "
                          "equal to the requested control_location")
+
+        # Get encryption type
         resp, fetched_encryption_type = self.client.get_encryption_type(
             encryption_type['volume_type_id'])
         self.assertEqual(200, resp.status)
@@ -148,3 +152,15 @@
                          fetched_encryption_type['control_location'],
                          'The fetched encryption_type control_location is '
                          'different from the created encryption_type')
+
+        # Delete encryption type
+        resp, _ = self.client.delete_encryption_type(
+            encryption_type['volume_type_id'])
+        self.assertEqual(202, resp.status)
+        resource = {"id": encryption_type['volume_type_id'],
+                    "type": "encryption-type"}
+        self.client.wait_for_resource_deletion(resource)
+        resp, deleted_encryption_type = self.client.get_encryption_type(
+            encryption_type['volume_type_id'])
+        self.assertEqual(200, resp.status)
+        self.assertEmpty(deleted_encryption_type)
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index a8b0a8d..bc5b1dc 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -154,6 +154,7 @@
         self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
 
     @test.attr(type=['negative', 'gate'])
+    @test.services('compute')
     def test_attach_volumes_with_nonexistent_volume_id(self):
         srv_name = data_utils.rand_name('Instance-')
         resp, server = self.servers_client.create_server(srv_name,
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 6294cd9..26316d2 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -63,6 +63,7 @@
                 self.assertEqual(params[key], snap[key], msg)
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_snapshot_create_with_volume_in_use(self):
         # Create a snapshot when volume status is in-use
         # Create a test instance
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index 2002927..ca1f5aa 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -114,6 +114,8 @@
 
 list_server_metadata = copy.deepcopy(set_server_metadata)
 
+update_server_metadata = copy.deepcopy(set_server_metadata)
+
 delete_server_metadata_item = {
     'status_code': [204]
 }
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 682021f..6716249 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -95,3 +95,7 @@
         'required': ['addresses']
     }
 }
+
+update_server_metadata = copy.deepcopy(servers.update_server_metadata)
+# V3 API's response status_code is 201
+update_server_metadata['status_code'] = [201]
diff --git a/tempest/clients.py b/tempest/clients.py
index 7745d54..7532bf2 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -129,6 +129,7 @@
     IdentityV3ClientJSON
 from tempest.services.identity.v3.json.identity_client import V3TokenClientJSON
 from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
+from tempest.services.identity.v3.json.region_client import RegionClientJSON
 from tempest.services.identity.v3.json.service_client import \
     ServiceClientJSON
 from tempest.services.identity.v3.xml.credentials_client import \
@@ -138,6 +139,7 @@
     IdentityV3ClientXML
 from tempest.services.identity.v3.xml.identity_client import V3TokenClientXML
 from tempest.services.identity.v3.xml.policy_client import PolicyClientXML
+from tempest.services.identity.v3.xml.region_client import RegionClientXML
 from tempest.services.identity.v3.xml.service_client import \
     ServiceClientXML
 from tempest.services.identity.xml.identity_client import IdentityClientXML
@@ -248,6 +250,7 @@
             self.tenant_usages_client = TenantUsagesClientXML(
                 self.auth_provider)
             self.policy_client = PolicyClientXML(self.auth_provider)
+            self.region_client = RegionClientXML(self.auth_provider)
             self.hosts_client = HostsClientXML(self.auth_provider)
             self.hypervisor_client = HypervisorClientXML(self.auth_provider)
             self.network_client = NetworkClientXML(self.auth_provider)
@@ -334,6 +337,7 @@
             self.migrations_v3_client = MigrationsV3ClientJSON(
                 self.auth_provider)
             self.policy_client = PolicyClientJSON(self.auth_provider)
+            self.region_client = RegionClientJSON(self.auth_provider)
             self.hosts_client = HostsClientJSON(self.auth_provider)
             self.hypervisor_v3_client = HypervisorV3ClientJSON(
                 self.auth_provider)
diff --git a/tempest/stress/run_stress.py b/tempest/cmd/run_stress.py
similarity index 98%
rename from tempest/stress/run_stress.py
rename to tempest/cmd/run_stress.py
index c7c17c0..f773996 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/cmd/run_stress.py
@@ -70,7 +70,29 @@
     return tests
 
 
-def main(ns):
+parser = argparse.ArgumentParser(description='Run stress tests')
+parser.add_argument('-d', '--duration', default=300, type=int,
+                    help="Duration of test in secs")
+parser.add_argument('-s', '--serial', action='store_true',
+                    help="Trigger running tests serially")
+parser.add_argument('-S', '--stop', action='store_true',
+                    default=False, help="Stop on first error")
+parser.add_argument('-n', '--number', type=int,
+                    help="How often an action is executed for each process")
+group = parser.add_mutually_exclusive_group(required=True)
+group.add_argument('-a', '--all', action='store_true',
+                   help="Execute all stress tests")
+parser.add_argument('-T', '--type',
+                    help="Filters tests of a certain type (e.g. gate)")
+parser.add_argument('-i', '--call-inherited', action='store_true',
+                    default=False,
+                    help="Call also inherited function with stress attribute")
+group.add_argument('-t', "--tests", nargs='?',
+                   help="Name of the file with test description")
+
+
+def main():
+    ns = parser.parse_args()
     result = 0
     if not ns.all:
         tests = json.load(open(ns.tests, 'r'))
@@ -97,29 +119,9 @@
     return result
 
 
-parser = argparse.ArgumentParser(description='Run stress tests')
-parser.add_argument('-d', '--duration', default=300, type=int,
-                    help="Duration of test in secs")
-parser.add_argument('-s', '--serial', action='store_true',
-                    help="Trigger running tests serially")
-parser.add_argument('-S', '--stop', action='store_true',
-                    default=False, help="Stop on first error")
-parser.add_argument('-n', '--number', type=int,
-                    help="How often an action is executed for each process")
-group = parser.add_mutually_exclusive_group(required=True)
-group.add_argument('-a', '--all', action='store_true',
-                   help="Execute all stress tests")
-parser.add_argument('-T', '--type',
-                    help="Filters tests of a certain type (e.g. gate)")
-parser.add_argument('-i', '--call-inherited', action='store_true',
-                    default=False,
-                    help="Call also inherited function with stress attribute")
-group.add_argument('-t', "--tests", nargs='?',
-                   help="Name of the file with test description")
-
 if __name__ == "__main__":
     try:
-        sys.exit(main(parser.parse_args()))
+        sys.exit(main())
     except Exception:
         LOG.exception("Failure in the stress test framework")
         sys.exit(1)
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index 6580c65..6720847 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -75,3 +75,22 @@
 
 def ovs_db_dump():
     return sudo_cmd_call("ovsdb-client dump")
+
+
+def copy_file_to_host(file_from, dest, host, username, pkey):
+    dest = "%s@%s:%s" % (username, host, dest)
+    cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
+          "-o StrictHostKeyChecking=no " \
+          "-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
+                                              'file1': file_from,
+                                              'dest': dest}
+    args = shlex.split(cmd)
+    subprocess_args = {'stdout': subprocess.PIPE,
+                       'stderr': subprocess.STDOUT}
+    proc = subprocess.Popen(args, **subprocess_args)
+    stdout, stderr = proc.communicate()
+    if proc.returncode != 0:
+        LOG.error(("Command {0} returned with exit status {1},"
+                  "output {2}, error {3}").format(cmd, proc.returncode,
+                                                  stdout, stderr))
+    return stdout
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index d52ed7c..d8474a0 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -22,6 +22,16 @@
 LOG = logging.getLogger(__name__)
 
 
+def _console_dump(client, server_id):
+    try:
+        resp, output = client.get_console_output(server_id, None)
+        LOG.debug("Console Output for Server %s:\n%s" % (
+            server_id, output))
+    except exceptions.NotFound:
+        LOG.debug("Server %s: doesn't have a console" % server_id)
+        pass
+
+
 # NOTE(afazekas): This function needs to know a token and a subject.
 def wait_for_server_status(client, server_id, status, ready_wait=True,
                            extra_timeout=0, raise_on_error=True):
@@ -71,7 +81,9 @@
                      '/'.join((old_status, str(old_task_state))),
                      '/'.join((server_status, str(task_state))),
                      time.time() - start_time)
+
         if (server_status == 'ERROR') and raise_on_error:
+            _console_dump(client, server_id)
             raise exceptions.BuildErrorException(server_id=server_id)
 
         timed_out = int(time.time()) - start_time >= timeout
@@ -87,9 +99,11 @@
                         'timeout': timeout})
             message += ' Current status: %s.' % server_status
             message += ' Current task state: %s.' % task_state
+
             caller = misc_utils.find_test_caller()
             if caller:
                 message = '(%s) %s' % (caller, message)
+            _console_dump(client, server_id)
             raise exceptions.TimeoutException(message)
         old_status = server_status
         old_task_state = task_state
diff --git a/tempest/config.py b/tempest/config.py
index 1049f67..a06c9a6 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1070,8 +1070,21 @@
 class TempestConfigProxy(object):
     _config = None
 
+    _extra_log_defaults = [
+        'keystoneclient.session=INFO',
+        'paramiko.transport=INFO',
+        'requests.packages.urllib3.connectionpool=WARN'
+    ]
+
+    def _fix_log_levels(self):
+        """Tweak the oslo log defaults."""
+        for opt in logging.log_opts:
+            if opt.dest == 'default_log_levels':
+                opt.default.extend(self._extra_log_defaults)
+
     def __getattr__(self, attr):
         if not self._config:
+            self._fix_log_levels()
             self._config = TempestConfigPrivate()
 
         return getattr(self._config, attr)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 40fbf05..7703d4d 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -29,6 +29,7 @@
 from tempest.api.network import common as net_common
 from tempest import auth
 from tempest import clients
+from tempest.common import debug
 from tempest.common import isolated_creds
 from tempest.common.utils import data_utils
 from tempest.common.utils.linux import remote_client
@@ -285,7 +286,6 @@
         for ruleset in rulesets:
             sg_rule = client.security_group_rules.create(secgroup_id,
                                                          **ruleset)
-            self.set_resource(sg_rule.id, sg_rule)
             rules.append(sg_rule)
         return rules
 
@@ -447,6 +447,30 @@
         LOG.debug("image:%s" % self.image)
 
 
+# power/provision states as of icehouse
+class BaremetalPowerStates(object):
+    """Possible power states of an Ironic node."""
+    POWER_ON = 'power on'
+    POWER_OFF = 'power off'
+    REBOOT = 'rebooting'
+    SUSPEND = 'suspended'
+
+
+class BaremetalProvisionStates(object):
+    """Possible provision states of an Ironic node."""
+    NOSTATE = None
+    INIT = 'initializing'
+    ACTIVE = 'active'
+    BUILDING = 'building'
+    DEPLOYWAIT = 'wait call-back'
+    DEPLOYING = 'deploying'
+    DEPLOYFAIL = 'deploy failed'
+    DEPLOYDONE = 'deploy complete'
+    DELETING = 'deleting'
+    DELETED = 'deleted'
+    ERROR = 'error'
+
+
 class BaremetalScenarioTest(OfficialClientTest):
     @classmethod
     def setUpClass(cls):
@@ -522,6 +546,55 @@
             ports.append(self.baremetal_client.port.get(port.uuid))
         return ports
 
+    def add_keypair(self):
+        self.keypair = self.create_keypair()
+
+    def verify_connectivity(self, ip=None):
+        if ip:
+            dest = self.get_remote_client(ip)
+        else:
+            dest = self.get_remote_client(self.instance)
+        dest.validate_authentication()
+
+    def boot_instance(self):
+        create_kwargs = {
+            'key_name': self.keypair.id
+        }
+        self.instance = self.create_server(
+            wait=False, create_kwargs=create_kwargs)
+
+        self.set_resource('instance', self.instance)
+
+        self.wait_node(self.instance.id)
+        self.node = self.get_node(instance_id=self.instance.id)
+
+        self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_ON)
+
+        self.wait_provisioning_state(
+            self.node.uuid,
+            [BaremetalProvisionStates.DEPLOYWAIT,
+             BaremetalProvisionStates.ACTIVE],
+            timeout=15)
+
+        self.wait_provisioning_state(self.node.uuid,
+                                     BaremetalProvisionStates.ACTIVE,
+                                     timeout=CONF.baremetal.active_timeout)
+
+        self.status_timeout(
+            self.compute_client.servers, self.instance.id, 'ACTIVE')
+
+        self.node = self.get_node(instance_id=self.instance.id)
+        self.instance = self.compute_client.servers.get(self.instance.id)
+
+    def terminate_instance(self):
+        self.instance.delete()
+        self.remove_resource('instance')
+        self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_OFF)
+        self.wait_provisioning_state(
+            self.node.uuid,
+            BaremetalProvisionStates.NOSTATE,
+            timeout=CONF.baremetal.unprovision_timeout)
+
 
 class NetworkScenarioTest(OfficialClientTest):
     """
@@ -782,6 +855,51 @@
                                                   private_key)
             linux_client.validate_authentication()
 
+    def _check_public_network_connectivity(self, ip_address, username,
+                                           private_key, should_connect=True,
+                                           msg=None, servers=None):
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        LOG.debug('checking network connections to IP %s with user: %s' %
+                  (ip_address, username))
+        try:
+            self._check_vm_connectivity(ip_address,
+                                        username,
+                                        private_key,
+                                        should_connect=should_connect)
+        except Exception:
+            ex_msg = 'Public network connectivity check failed'
+            if msg:
+                ex_msg += ": " + msg
+            LOG.exception(ex_msg)
+            self._log_console_output(servers)
+            debug.log_net_debug()
+            raise
+
+    def _check_tenant_network_connectivity(self, server,
+                                           username,
+                                           private_key,
+                                           should_connect=True,
+                                           servers_for_debug=None):
+        if not CONF.network.tenant_networks_reachable:
+            msg = 'Tenant networks not configured to be reachable.'
+            LOG.info(msg)
+            return
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        try:
+            for net_name, ip_addresses in server.networks.iteritems():
+                for ip_address in ip_addresses:
+                    self._check_vm_connectivity(ip_address,
+                                                username,
+                                                private_key,
+                                                should_connect=should_connect)
+        except Exception:
+            LOG.exception('Tenant network connectivity check failed')
+            self._log_console_output(servers_for_debug)
+            debug.log_net_debug()
+            raise
+
     def _check_remote_connectivity(self, source, dest, should_succeed=True):
         """
         check ping server via source ssh connection
@@ -923,7 +1041,6 @@
             client=client,
             **sg_rule['security_group_rule']
         )
-        self.set_resource(sg_rule.id, sg_rule)
         self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
         self.assertEqual(secgroup.id, sg_rule.security_group_id)
 
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index c53aa83..82c6b5d 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -23,31 +23,7 @@
 LOG = logging.getLogger(__name__)
 
 
-# power/provision states as of icehouse
-class PowerStates(object):
-    """Possible power states of an Ironic node."""
-    POWER_ON = 'power on'
-    POWER_OFF = 'power off'
-    REBOOT = 'rebooting'
-    SUSPEND = 'suspended'
-
-
-class ProvisionStates(object):
-    """Possible provision states of an Ironic node."""
-    NOSTATE = None
-    INIT = 'initializing'
-    ACTIVE = 'active'
-    BUILDING = 'building'
-    DEPLOYWAIT = 'wait call-back'
-    DEPLOYING = 'deploying'
-    DEPLOYFAIL = 'deploy failed'
-    DEPLOYDONE = 'deploy complete'
-    DELETING = 'deleting'
-    DELETED = 'deleted'
-    ERROR = 'error'
-
-
-class BaremetalBasicOptsPXESSH(manager.BaremetalScenarioTest):
+class BaremetalBasicOpsPXESSH(manager.BaremetalScenarioTest):
     """
     This smoke test tests the pxe_ssh Ironic driver.  It follows this basic
     set of operations:
@@ -65,21 +41,11 @@
         * Monitors the associated Ironic node for power and
           expected state transitions
     """
-    def add_keypair(self):
-        self.keypair = self.create_keypair()
-
     def add_floating_ip(self):
         floating_ip = self.compute_client.floating_ips.create()
         self.instance.add_floating_ip(floating_ip)
         return floating_ip.ip
 
-    def verify_connectivity(self, ip=None):
-        if ip:
-            dest = self.get_remote_client(ip)
-        else:
-            dest = self.get_remote_client(self.instance)
-        dest.validate_authentication()
-
     def validate_driver_info(self):
         f_id = self.instance.flavor['id']
         flavor_extra = self.compute_client.flavors.get(f_id).get_keys()
@@ -98,43 +64,6 @@
             self.assertEqual(n_port['device_id'], self.instance.id)
             self.assertEqual(n_port['mac_address'], port.address)
 
-    def boot_instance(self):
-        create_kwargs = {
-            'key_name': self.keypair.id
-        }
-        self.instance = self.create_server(
-            wait=False, create_kwargs=create_kwargs)
-
-        self.set_resource('instance', self.instance)
-
-        self.wait_node(self.instance.id)
-        self.node = self.get_node(instance_id=self.instance.id)
-
-        self.wait_power_state(self.node.uuid, PowerStates.POWER_ON)
-
-        self.wait_provisioning_state(
-            self.node.uuid,
-            [ProvisionStates.DEPLOYWAIT, ProvisionStates.ACTIVE],
-            timeout=15)
-
-        self.wait_provisioning_state(self.node.uuid, ProvisionStates.ACTIVE,
-                                     timeout=CONF.baremetal.active_timeout)
-
-        self.status_timeout(
-            self.compute_client.servers, self.instance.id, 'ACTIVE')
-
-        self.node = self.get_node(instance_id=self.instance.id)
-        self.instance = self.compute_client.servers.get(self.instance.id)
-
-    def terminate_instance(self):
-        self.instance.delete()
-        self.remove_resource('instance')
-        self.wait_power_state(self.node.uuid, PowerStates.POWER_OFF)
-        self.wait_provisioning_state(
-            self.node.uuid,
-            ProvisionStates.NOSTATE,
-            timeout=CONF.baremetal.unprovision_timeout)
-
     @test.services('baremetal', 'compute', 'image', 'network')
     def test_baremetal_server_ops(self):
         self.add_keypair()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 9152220..1c24b5c 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -13,10 +13,14 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+
+import httplib
+import tempfile
 import time
-import urllib
+import urllib2
 
 from tempest.api.network import common as net_common
+from tempest.common import commands
 from tempest import config
 from tempest import exceptions
 from tempest.scenario import manager
@@ -135,71 +139,53 @@
 
         1. SSH to the instance
         2. Start two http backends listening on ports 80 and 88 respectively
-        In case there are two instances, each backend is created on a separate
-        instance.
-
-        The backends are the inetd services. To start them we need to edit
-        /etc/inetd.conf in the following way:
-        www stream tcp nowait root /bin/sh sh /home/cirros/script_name
-
-        Where /home/cirros/script_name is a path to a script which
-        echoes the responses:
-        echo -e 'HTTP/1.0 200 OK\r\n\r\nserver_name
-
-        If we want the server to listen on port 88, then we use
-        "kerberos" instead of "www".
         """
 
         for server_id, ip in self.server_ips.iteritems():
             private_key = self.servers_keypairs[server_id].private_key
             server_name = self.compute_client.servers.get(server_id).name
+            username = config.scenario.ssh_user
             ssh_client = self.get_remote_client(
                 server_or_ip=ip,
                 private_key=private_key)
             ssh_client.validate_authentication()
-            # Create service for inetd
-            create_script = """sudo sh -c "echo -e \\"echo -e 'HTTP/1.0 """ \
-                            """200 OK\\\\\\r\\\\\\n\\\\\\r\\\\\\n""" \
-                            """%(server)s'\\" >>/home/cirros/%(script)s\""""
 
-            cmd = create_script % {
-                'server': server_name,
-                'script': 'script1'}
+            # Write a backend's responce into a file
+            resp = """HTTP/1.0 200 OK\r\nContent-Length: 8\r\n\r\n%s"""
+            with tempfile.NamedTemporaryFile() as script:
+                script.write(resp % server_name)
+                script.flush()
+                with tempfile.NamedTemporaryFile() as key:
+                    key.write(private_key)
+                    key.flush()
+                    commands.copy_file_to_host(script.name,
+                                               "~/script1",
+                                               ip,
+                                               username, key.name)
+            # Start netcat
+            start_server = """sudo nc -ll -p %(port)s -e cat """ \
+                           """~/%(script)s &"""
+            cmd = start_server % {'port': self.port1,
+                                  'script': 'script1'}
             ssh_client.exec_command(cmd)
-            # Configure inetd
-            configure_inetd = """sudo sh -c "echo -e \\"%(service)s """ \
-                              """stream tcp nowait root /bin/sh sh """ \
-                              """/home/cirros/%(script)s\\" >> """ \
-                              """/etc/inetd.conf\""""
-            # "www" stands for port 80
-            cmd = configure_inetd % {'service': 'www',
-                                     'script': 'script1'}
-            ssh_client.exec_command(cmd)
-
             if len(self.server_ips) == 1:
-                cmd = create_script % {'server': 'server2',
-                                       'script': 'script2'}
+                with tempfile.NamedTemporaryFile() as script:
+                    script.write(resp % 'server2')
+                    script.flush()
+                    with tempfile.NamedTemporaryFile() as key:
+                        key.write(private_key)
+                        key.flush()
+                        commands.copy_file_to_host(script.name,
+                                                   "~/script2", ip,
+                                                   username, key.name)
+                cmd = start_server % {'port': self.port2,
+                                      'script': 'script2'}
                 ssh_client.exec_command(cmd)
-                # "kerberos" stands for port 88
-                cmd = configure_inetd % {'service': 'kerberos',
-                                         'script': 'script2'}
-                ssh_client.exec_command(cmd)
-
-            # Get PIDs of inetd
-            pids = ssh_client.get_pids('inetd')
-            if pids != ['']:
-                # If there are any inetd processes, reload them
-                kill_cmd = "sudo kill -HUP %s" % ' '.join(pids)
-                ssh_client.exec_command(kill_cmd)
-            else:
-                # In other case start inetd
-                start_inetd = "sudo /usr/sbin/inetd /etc/inetd.conf"
-                ssh_client.exec_command(start_inetd)
 
     def _check_connection(self, check_ip, port=80):
         def try_connect(ip, port):
             try:
-                resp = urllib.urlopen("http://{0}:{1}/".format(ip, port))
+                resp = urllib2.urlopen("http://{0}:{1}/".format(ip, port))
                 if resp.getcode() == 200:
                     return True
                 return False
@@ -284,27 +270,32 @@
 
     def _check_load_balancing(self):
         """
-        1. Send 100 requests on the floating ip associated with the VIP
+        1. Send 10 requests on the floating ip associated with the VIP
         2. Check that the requests are shared between
            the two servers and that both of them get equal portions
            of the requests
         """
 
         self._check_connection(self.vip_ip)
-        resp = self._send_requests(self.vip_ip)
-        self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
-        self.assertEqual(50, resp.count("server1\n"))
-        self.assertEqual(50, resp.count("server2\n"))
+        self._send_requests(self.vip_ip, set(["server1", "server2"]))
 
-    def _send_requests(self, vip_ip):
-        resp = []
-        for count in range(100):
-            resp.append(
-                urllib.urlopen(
-                    "http://{0}/".format(vip_ip)).read())
-        return resp
+    def _send_requests(self, vip_ip, expected, num_req=10):
+        count = 0
+        while count < num_req:
+            try:
+                resp = []
+                for i in range(len(self.members)):
+                    resp.append(
+                        urllib2.urlopen(
+                            "http://{0}/".format(vip_ip)).read())
+                count += 1
+                self.assertEqual(expected,
+                                 set(resp))
+            # NOTE: There always is a slim chance of getting this exception
+            #       due to special aspects of haproxy internal behavior.
+            except httplib.BadStatusLine:
+                pass
 
-    @test.skip_because(bug='1295165')
     @test.attr(type='smoke')
     @test.services('compute', 'network')
     def test_load_balancer_basic(self):
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 9b435bd..f1cd320 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -15,7 +15,6 @@
 
 import testtools
 
-from tempest.common import debug
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest.openstack.common import log as logging
@@ -84,57 +83,17 @@
         self.floating_ip = self._create_floating_ip(self.server,
                                                     public_network_id)
         self.addCleanup(self.cleanup_wrapper, self.floating_ip)
-        self._wait_server_status_and_check_network_connectivity()
-
-    def _check_tenant_network_connectivity(self, server,
-                                           username,
-                                           private_key,
-                                           should_connect=True):
-        if not CONF.network.tenant_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            for net_name, ip_addresses in server.networks.iteritems():
-                for ip_address in ip_addresses:
-                    self._check_vm_connectivity(ip_address,
-                                                username,
-                                                private_key,
-                                                should_connect=should_connect)
-        except Exception:
-            LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers=[server])
-            debug.log_ip_ns()
-            raise
-
-    def _check_public_network_connectivity(self, floating_ip,
-                                           username,
-                                           private_key,
-                                           should_connect=True):
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            self._check_vm_connectivity(floating_ip, username, private_key,
-                                        should_connect=should_connect)
-        except Exception:
-            LOG.exception("Public network connectivity check failed")
-            debug.log_ip_ns()
-            raise
 
     def _check_network_connectivity(self, should_connect=True):
         username = CONF.compute.image_ssh_user
         private_key = self.keypair.private_key
-        self._check_tenant_network_connectivity(self.server,
-                                                username,
-                                                private_key,
-                                                should_connect=should_connect)
+        self._check_tenant_network_connectivity(
+            self.server, username, private_key, should_connect=should_connect,
+            servers_for_debug=[self.server])
         floating_ip = self.floating_ip.floating_ip_address
-        self._check_public_network_connectivity(floating_ip,
-                                                username,
-                                                private_key,
-                                                should_connect=should_connect)
+        self._check_public_network_connectivity(floating_ip, username,
+                                                private_key, should_connect,
+                                                servers=[self.server])
 
     def _wait_server_status_and_check_network_connectivity(self):
         self.status_timeout(self.compute_client.servers, self.server.id,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index d5ab3d3..21782ee 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -156,24 +156,13 @@
         return dict(server=server, keypair=keypair)
 
     def _check_tenant_network_connectivity(self):
-        if not CONF.network.tenant_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
         ssh_login = CONF.compute.image_ssh_user
-        try:
-            for server, key in self.servers.iteritems():
-                for net_name, ip_addresses in server.networks.iteritems():
-                    for ip_address in ip_addresses:
-                        self._check_vm_connectivity(ip_address, ssh_login,
-                                                    key.private_key)
-        except Exception:
-            LOG.exception('Tenant connectivity check failed')
-            self._log_console_output(servers=self.servers.keys())
-            debug.log_net_debug()
-            raise
+        for server, key in self.servers.iteritems():
+            # call the common method in the parent class
+            super(TestNetworkBasicOps, self).\
+                _check_tenant_network_connectivity(
+                    server, ssh_login, key.private_key,
+                    servers_for_debug=self.servers.keys())
 
     def _create_and_associate_floating_ips(self):
         public_network_id = CONF.network.public_network_id
@@ -184,28 +173,16 @@
 
     def _check_public_network_connectivity(self, should_connect=True,
                                            msg=None):
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
         ssh_login = CONF.compute.image_ssh_user
-        LOG.debug('checking network connections')
         floating_ip, server = self.floating_ip_tuple
         ip_address = floating_ip.floating_ip_address
         private_key = None
         if should_connect:
             private_key = self.servers[server].private_key
-        try:
-            self._check_vm_connectivity(ip_address,
-                                        ssh_login,
-                                        private_key,
-                                        should_connect=should_connect)
-        except Exception:
-            ex_msg = 'Public network connectivity check failed'
-            if msg:
-                ex_msg += ": " + msg
-            LOG.exception(ex_msg)
-            self._log_console_output(servers=self.servers.keys())
-            debug.log_net_debug()
-            raise
+        # call the common method in the parent class
+        super(TestNetworkBasicOps, self)._check_public_network_connectivity(
+            ip_address, ssh_login, private_key, should_connect, msg,
+            self.servers.keys())
 
     def _disassociate_floating_ips(self):
         floating_ip, server = self.floating_ip_tuple
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 70123fe..36bb02f 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -308,6 +308,8 @@
         resp, body = self.post('servers/%s/metadata' % str(server_id),
                                post_body)
         body = json.loads(body)
+        self.validate_response(common_schema.update_server_metadata,
+                               resp, body)
         return resp, body['metadata']
 
     def get_server_metadata_item(self, server_id, key):
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index f397c4b..eed85c7 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -324,6 +324,7 @@
         resp, body = self.post('servers/%s/metadata' % str(server_id),
                                post_body)
         body = json.loads(body)
+        self.validate_response(schema.update_server_metadata, resp, body)
         return resp, body['metadata']
 
     def get_server_metadata_item(self, server_id, key):
diff --git a/tempest/services/identity/v3/json/region_client.py b/tempest/services/identity/v3/json/region_client.py
new file mode 100644
index 0000000..f95d00f
--- /dev/null
+++ b/tempest/services/identity/v3/json/region_client.py
@@ -0,0 +1,80 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class RegionClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(RegionClientJSON, self).__init__(auth_provider)
+        self.service = CONF.identity.catalog_type
+        self.endpoint_url = 'adminURL'
+        self.api_version = "v3"
+
+    def create_region(self, description, **kwargs):
+        """Create region."""
+        req_body = {
+            'description': description,
+        }
+        if kwargs.get('parent_region_id'):
+            req_body['parent_region_id'] = kwargs.get('parent_region_id')
+        req_body = json.dumps({'region': req_body})
+        if kwargs.get('unique_region_id'):
+            resp, body = self.put(
+                'regions/%s' % kwargs.get('unique_region_id'), req_body)
+        else:
+            resp, body = self.post('regions', req_body)
+        body = json.loads(body)
+        return resp, body['region']
+
+    def update_region(self, region_id, **kwargs):
+        """Updates a region."""
+        post_body = {}
+        if 'description' in kwargs:
+            post_body['description'] = kwargs.get('description')
+        if 'parent_region_id' in kwargs:
+            post_body['parent_region_id'] = kwargs.get('parent_region_id')
+        post_body = json.dumps({'region': post_body})
+        resp, body = self.patch('regions/%s' % region_id, post_body)
+        body = json.loads(body)
+        return resp, body['region']
+
+    def get_region(self, region_id):
+        """Get region."""
+        url = 'regions/%s' % region_id
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['region']
+
+    def list_regions(self, params=None):
+        """List regions."""
+        url = 'regions'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['regions']
+
+    def delete_region(self, region_id):
+        """Delete region."""
+        resp, body = self.delete('regions/%s' % region_id)
+        return resp, body
diff --git a/tempest/services/identity/v3/xml/region_client.py b/tempest/services/identity/v3/xml/region_client.py
new file mode 100644
index 0000000..9f9161d
--- /dev/null
+++ b/tempest/services/identity/v3/xml/region_client.py
@@ -0,0 +1,120 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common import http
+from tempest.common import rest_client
+from tempest.common import xml_utils as common
+from tempest import config
+
+CONF = config.CONF
+
+XMLNS = "http://docs.openstack.org/identity/api/v3"
+
+
+class RegionClientXML(rest_client.RestClient):
+    TYPE = "xml"
+
+    def __init__(self, auth_provider):
+        super(RegionClientXML, self).__init__(auth_provider)
+        self.service = CONF.identity.catalog_type
+        self.region_url = 'adminURL'
+        self.api_version = "v3"
+
+    def _parse_array(self, node):
+        array = []
+        for child in node.getchildren():
+            tag_list = child.tag.split('}', 1)
+            if tag_list[1] == "region":
+                array.append(common.xml_to_json(child))
+        return array
+
+    def _parse_body(self, body):
+        json = common.xml_to_json(body)
+        return json
+
+    def request(self, method, url, extra_headers=False, headers=None,
+                body=None, wait=None):
+        """Overriding the existing HTTP request in super class RestClient."""
+        if extra_headers:
+            try:
+                headers.update(self.get_headers())
+            except (ValueError, TypeError):
+                headers = self.get_headers()
+        dscv = CONF.identity.disable_ssl_certificate_validation
+        self.http_obj = http.ClosingHttp(
+            disable_ssl_certificate_validation=dscv)
+        return super(RegionClientXML, self).request(method, url,
+                                                    extra_headers,
+                                                    headers=headers,
+                                                    body=body)
+
+    def create_region(self, description, **kwargs):
+        """Create region."""
+        create_region = common.Element("region",
+                                       xmlns=XMLNS,
+                                       description=description)
+        if 'parent_region_id' in kwargs:
+            create_region.append(common.Element(
+                'parent_region_id', kwargs.get('parent_region_id')))
+        if 'unique_region_id' in kwargs:
+            resp, body = self.put(
+                'regions/%s' % kwargs.get('unique_region_id'),
+                str(common.Document(create_region)))
+        else:
+            resp, body = self.post('regions',
+                                   str(common.Document(create_region)))
+        body = self._parse_body(etree.fromstring(body))
+        return resp, body
+
+    def update_region(self, region_id, **kwargs):
+        """Updates an region with given parameters.
+        """
+        description = kwargs.get('description', None)
+        update_region = common.Element("region",
+                                       xmlns=XMLNS,
+                                       description=description)
+        if 'parent_region_id' in kwargs:
+            update_region.append(common.Element('parent_region_id',
+                                 kwargs.get('parent_region_id')))
+
+        resp, body = self.patch('regions/%s' % str(region_id),
+                                str(common.Document(update_region)))
+        body = self._parse_body(etree.fromstring(body))
+        return resp, body
+
+    def get_region(self, region_id):
+        """Get Region."""
+        url = 'regions/%s' % region_id
+        resp, body = self.get(url)
+        body = self._parse_body(etree.fromstring(body))
+        return resp, body
+
+    def list_regions(self, params=None):
+        """Get the list of regions."""
+        url = 'regions'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        body = self._parse_array(etree.fromstring(body))
+        return resp, body
+
+    def delete_region(self, region_id):
+        """Delete region."""
+        resp, body = self.delete('regions/%s' % region_id)
+        return resp, body
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/json/admin/volume_types_client.py
index c9c0582..65ecc67 100644
--- a/tempest/services/volume/json/admin/volume_types_client.py
+++ b/tempest/services/volume/json/admin/volume_types_client.py
@@ -18,6 +18,7 @@
 
 from tempest.common import rest_client
 from tempest import config
+from tempest import exceptions
 
 CONF = config.CONF
 
@@ -34,6 +35,26 @@
         self.build_interval = CONF.volume.build_interval
         self.build_timeout = CONF.volume.build_timeout
 
+    def is_resource_deleted(self, resource):
+        # to use this method self.resource must be defined to respective value
+        # Resource is a dictionary containing resource id and type
+        # Resource : {"id" : resource_id
+        #             "type": resource_type}
+        try:
+            if resource['type'] == "volume-type":
+                self.get_volume_type(resource['id'])
+            elif resource['type'] == "encryption-type":
+                resp, body = self.get_encryption_type(resource['id'])
+                assert 200 == resp.status
+                if not body:
+                    return True
+            else:
+                msg = (" resource value is either not defined or incorrect.")
+                raise exceptions.UnprocessableEntity(msg)
+        except exceptions.NotFound:
+            return True
+        return False
+
     def list_volume_types(self, params=None):
         """List all the volume_types created."""
         url = 'types'
@@ -150,3 +171,7 @@
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         return resp, body['encryption']
+
+    def delete_encryption_type(self, vol_type_id):
+        """Delete the encryption type for the specified volume-type."""
+        return self.delete("/types/%s/encryption/provider" % str(vol_type_id))
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index b56f96b..0a63679 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -34,14 +34,14 @@
 In order to use this discovery you have to be in the tempest root directory
 and execute the following:
 
-	tempest/stress/run_stress.py -a -d 30
+	run-tempest-stress -a -d 30
 
 Running the sample test
 -----------------------
 
-To test installation, do the following (from the tempest/stress directory):
+To test installation, do the following:
 
-	./run_stress.py -t etc/server-create-destroy-test.json -d 30
+	run-tempest-stress -t tempest/stress/etc/server-create-destroy-test.json -d 30
 
 This sample test tries to create a few VMs and kill a few VMs.
 
diff --git a/tempest/tests/stress/test_stress.py b/tempest/tests/stress/test_stress.py
index c76abde..5a334c5 100644
--- a/tempest/tests/stress/test_stress.py
+++ b/tempest/tests/stress/test_stress.py
@@ -18,12 +18,12 @@
 
 import tempest.cli as cli
 from tempest.openstack.common import log as logging
-import tempest.test
+from tempest.tests import base
 
 LOG = logging.getLogger(__name__)
 
 
-class StressFrameworkTest(tempest.test.BaseTestCase):
+class StressFrameworkTest(base.TestCase):
     """Basic test for the stress test framework.
     """
 
@@ -51,5 +51,5 @@
         return proc.returncode
 
     def test_help_function(self):
-        result = self._cmd("python", "-m tempest.stress.run_stress -h")
+        result = self._cmd("python", "-m tempest.cmd.run_stress -h")
         self.assertEqual(0, result)
diff --git a/tox.ini b/tox.ini
index 2110362..6b4acc6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -77,7 +77,7 @@
 [testenv:stress]
 sitepackages = True
 commands =
-    python -m tempest/stress/run_stress -a -d 3600 -S
+    run-tempest-stress -a -d 3600 -S
 
 [testenv:venv]
 commands = {posargs}