Merge "Stress ssh_floating test"
diff --git a/.testr.conf b/.testr.conf
index 510f4c9..05b12c4 100644
--- a/.testr.conf
+++ b/.testr.conf
@@ -1,7 +1,7 @@
 [DEFAULT]
 test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
              OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
-             OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-250} \
+             OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-500} \
              ${PYTHON:-python} -m subunit.run discover -t ./ ./tempest $LISTOPT $IDOPTION
 test_id_option=--load-list $IDFILE
 test_list_option=--list
diff --git a/doc/source/field_guide/unit_tests.rst b/doc/source/field_guide/unit_tests.rst
new file mode 120000
index 0000000..67a8b20
--- /dev/null
+++ b/doc/source/field_guide/unit_tests.rst
@@ -0,0 +1 @@
+../../../tempest/tests/README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 00c4e9a..f70cdd1 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -32,6 +32,7 @@
    field_guide/stress
    field_guide/thirdparty
    field_guide/whitebox
+   field_guide/unit_tests
 
 ------------------
 API and test cases
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 703d92a..d39ef70 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -390,3 +390,11 @@
 heat = false
 # Whether or not horizon is expected to be available
 horizon = True
+
+[stress]
+# Maximum number of instances to create during test
+max_instances = 32
+# Time (in seconds) between log file error checks
+log_check_interval = 60
+# The default number of threads created while stress test
+default_thread_number_per_action=4
diff --git a/requirements.txt b/requirements.txt
index 06db0e6..877b23c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,23 +1,23 @@
-d2to1>=0.2.10,<0.3
-pbr>=0.5,<0.6
-anyjson
+pbr>=0.5.21,<1.0
+anyjson>=0.3.3
 nose
-httplib2>=0.7.0
+httplib2
+jsonschema>=1.3.0,!=1.4.0
 testtools>=0.9.32
-lxml
-boto>=2.2.1
-paramiko
+lxml>=2.3
+boto>=2.4.0
+paramiko>=1.8.0
 netaddr
-python-glanceclient>=0.5.0
-python-keystoneclient>=0.2.0
-python-novaclient>=2.10.0
-python-neutronclient>=2.2.3,<3.0.0
+python-glanceclient>=0.9.0
+python-keystoneclient>=0.3.0
+python-novaclient>=2.12.0
+python-neutronclient>=2.2.3,<3
 python-cinderclient>=1.0.4
 python-heatclient>=0.2.3
-testresources
-keyring
-testrepository
+testresources>=0.2.4
+keyring>=1.6.1
+testrepository>=0.0.17
 oslo.config>=1.1.0
 # Needed for whitebox testing
-sqlalchemy
-eventlet>=0.12.0
+SQLAlchemy>=0.7.8,<=0.7.99
+eventlet>=0.13.0
diff --git a/run_tests.sh b/run_tests.sh
index f995cde..856ce54 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -11,7 +11,7 @@
   echo "  -u, --update             Update the virtual environment with any newer package versions"
   echo "  -s, --smoke              Only run smoke tests"
   echo "  -w, --whitebox           Only run whitebox tests"
-  echo "  -t, --parallel           Run testr parallel"
+  echo "  -t, --serial             Run testr serially"
   echo "  -c, --nova-coverage      Enable Nova coverage collection"
   echo "  -C, --config             Config file location"
   echo "  -p, --pep8               Just run pep8"
@@ -26,7 +26,7 @@
 just_pep8=0
 venv=.venv
 with_venv=tools/with_venv.sh
-parallel=0
+serial=0
 always_venv=0
 never_venv=0
 no_site_packages=0
@@ -38,7 +38,7 @@
 logging=0
 logging_config=etc/logging.conf
 
-if ! options=$(getopt -o VNnfuswtcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,parallel,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfuswtcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,serial,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
 then
     # parse error
     usage
@@ -61,7 +61,7 @@
     -p|--pep8) let just_pep8=1;;
     -s|--smoke) testrargs="$testrargs smoke";;
     -w|--whitebox) testrargs="$testrargs whitebox";;
-    -t|--parallel) parallel=1;;
+    -t|--serial) serial=1;;
     -l|--logging) logging=1;;
     -L|--logging-config) logging_config=$2; shift;;
     --) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no  ;;
@@ -101,10 +101,10 @@
 function run_tests {
   testr_init
   ${wrapper} find . -type f -name "*.pyc" -delete
-  if [ $parallel -eq 1 ]; then
-      ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
-  else
+  if [ $serial -eq 1 ]; then
       ${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+  else
+      ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
   fi
 }
 
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index e4e87c0..083fbd7 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -84,23 +84,17 @@
         # Keypair should be created, Got details by name and deleted
         k_name = rand_name('keypair-')
         resp, keypair = self.client.create_keypair(k_name)
-        try:
-            resp, keypair_detail = self.client.get_keypair(k_name)
-            self.assertEqual(200, resp.status)
-            self.assertIn('name', keypair_detail)
-            self.assertIn('public_key', keypair_detail)
-            self.assertEqual(keypair_detail['name'], k_name,
-                             "The created keypair name is not equal "
-                             "to requested name")
-            public_key = keypair_detail['public_key']
-            self.assertTrue(public_key is not None,
-                            "Field public_key is empty or not found.")
-        except Exception:
-            self.fail("GET keypair details requested by keypair name "
-                      "has failed")
-        finally:
-            resp, _ = self.client.delete_keypair(k_name)
-            self.assertEqual(202, resp.status)
+        self.addCleanup(self.client.delete_keypair, k_name)
+        resp, keypair_detail = self.client.get_keypair(k_name)
+        self.assertEqual(200, resp.status)
+        self.assertIn('name', keypair_detail)
+        self.assertIn('public_key', keypair_detail)
+        self.assertEqual(keypair_detail['name'], k_name,
+                         "The created keypair name is not equal "
+                         "to requested name")
+        public_key = keypair_detail['public_key']
+        self.assertTrue(public_key is not None,
+                        "Field public_key is empty or not found.")
 
     @attr(type='gate')
     def test_keypair_create_with_pub_key(self):
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 60297a9..efdadb0 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -21,8 +21,11 @@
 from tempest.common.utils.data_utils import parse_image_id
 from tempest.common.utils.data_utils import rand_name
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 from tempest.test import attr
 
+LOG = logging.getLogger(__name__)
+
 
 class AuthorizationTestJSON(base.BaseComputeTest):
     _interface = 'json'
@@ -204,7 +207,7 @@
             self.alt_keypairs_client.base_url = self.saved_base_url
             if (resp['status'] is not None):
                 resp, _ = self.alt_keypairs_client.delete_keypair(k_name)
-                self.fail("Create keypair request should not happen "
+                LOG.error("Create keypair request should not happen "
                           "if the tenant id does not match the current user")
 
     @attr(type='gate')
@@ -255,7 +258,7 @@
             self.alt_security_client.base_url = self.saved_base_url
             if resp['status'] is not None:
                 self.alt_security_client.delete_security_group(resp['id'])
-                self.fail("Create Security Group request should not happen if"
+                LOG.error("Create Security Group request should not happen if"
                           "the tenant id does not match the current user")
 
     @attr(type='gate')
@@ -297,7 +300,7 @@
             self.alt_security_client.base_url = self.saved_base_url
             if resp['status'] is not None:
                 self.alt_security_client.delete_security_group_rule(resp['id'])
-                self.fail("Create security group rule request should not "
+                LOG.error("Create security group rule request should not "
                           "happen if the tenant id does not match the"
                           " current user")
 
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index d98fb71..9d143ed 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -68,44 +68,30 @@
                          ', '.join(str(e) for e in missing_endpoints))
 
     @attr(type='gate')
-    def test_create_delete_endpoint(self):
+    def test_create_list_delete_endpoint(self):
         region = rand_name('region')
         url = rand_name('url')
         interface = 'public'
-        create_flag = False
-        matched = False
-        try:
-            resp, endpoint =\
-                self.client.create_endpoint(self.service_id, interface, url,
-                                            region=region, enabled=True)
-            create_flag = True
-            # Asserting Create Endpoint response body
-            self.assertEqual(resp['status'], '201')
-            self.assertEqual(region, endpoint['region'])
-            self.assertEqual(url, endpoint['url'])
-            # Checking if created endpoint is present in the list of endpoints
-            resp, fetched_endpoints = self.client.list_endpoints()
-            for e in fetched_endpoints:
-                if endpoint['id'] == e['id']:
-                    matched = True
-            if not matched:
-                self.fail("Created endpoint does not appear in the list"
-                          " of endpoints")
-        finally:
-            if create_flag:
-                matched = False
-                # Deleting the endpoint created in this method
-                resp_header, resp_body =\
-                    self.client.delete_endpoint(endpoint['id'])
-                self.assertEqual(resp_header['status'], '204')
-                self.assertEqual(resp_body, '')
-                # Checking whether endpoint is deleted successfully
-                resp, fetched_endpoints = self.client.list_endpoints()
-                for e in fetched_endpoints:
-                    if endpoint['id'] == e['id']:
-                        matched = True
-                if matched:
-                    self.fail("Delete endpoint is not successful")
+        resp, endpoint =\
+            self.client.create_endpoint(self.service_id, interface, url,
+                                        region=region, enabled=True)
+        # Asserting Create Endpoint response body
+        self.assertEqual(resp['status'], '201')
+        self.assertIn('id', endpoint)
+        self.assertEqual(region, endpoint['region'])
+        self.assertEqual(url, endpoint['url'])
+        # Checking if created endpoint is present in the list of endpoints
+        resp, fetched_endpoints = self.client.list_endpoints()
+        fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
+        self.assertIn(endpoint['id'], fetched_endpoints_id)
+        # Deleting the endpoint created in this method
+        resp, body = self.client.delete_endpoint(endpoint['id'])
+        self.assertEqual(resp['status'], '204')
+        self.assertEqual(body, '')
+        # Checking whether endpoint is deleted successfully
+        resp, fetched_endpoints = self.client.list_endpoints()
+        fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
+        self.assertNotIn(endpoint['id'], fetched_endpoints_id)
 
     @attr(type='smoke')
     def test_update_endpoint(self):
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index c599562..dd724c7 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -211,24 +211,18 @@
                                                             object_name,
                                                             orig_metadata)
         self.assertIn(int(resp['status']), HTTP_SUCCESS)
-        try:
-            # copy object from source container to destination container
-            resp, _ = self.object_client.copy_object_across_containers(
-                src_container_name, object_name, dst_container_name,
-                object_name)
-            self.assertEqual(resp['status'], '201')
-
-            # check if object is present in destination container
-            resp, body = self.object_client.get_object(dst_container_name,
-                                                       object_name)
-            self.assertEqual(body, data)
-            actual_meta_key = 'x-object-meta-' + meta_key
-            self.assertTrue(actual_meta_key in resp)
-            self.assertEqual(resp[actual_meta_key], meta_value)
-
-        except Exception as e:
-            self.fail("Got exception :%s ; while copying"
-                      " object across containers" % e)
+        # copy object from source container to destination container
+        resp, _ = self.object_client.copy_object_across_containers(
+            src_container_name, object_name, dst_container_name,
+            object_name)
+        self.assertEqual(resp['status'], '201')
+        # check if object is present in destination container
+        resp, body = self.object_client.get_object(dst_container_name,
+                                                   object_name)
+        self.assertEqual(body, data)
+        actual_meta_key = 'x-object-meta-' + meta_key
+        self.assertTrue(actual_meta_key in resp)
+        self.assertEqual(resp[actual_meta_key], meta_value)
 
     @attr(type='gate')
     def test_get_object_using_temp_url(self):
@@ -367,36 +361,32 @@
     def test_access_public_object_with_another_user_creds(self):
         # make container public-readable and access an object in it using
         # another user's credentials
-        try:
-            cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
-            resp_meta, body = self.container_client.update_container_metadata(
-                self.container_name, metadata=cont_headers,
-                metadata_prefix='')
-            self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
-            # create object
-            object_name = rand_name(name='Object')
-            data = arbitrary_string(size=len(object_name) * 1,
-                                    base_text=object_name)
-            resp, _ = self.object_client.create_object(self.container_name,
-                                                       object_name, data)
-            self.assertEqual(resp['status'], '201')
+        cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
+        resp_meta, body = self.container_client.update_container_metadata(
+            self.container_name, metadata=cont_headers,
+            metadata_prefix='')
+        self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
 
-            # list container metadata
-            resp, _ = self.container_client.list_container_metadata(
-                self.container_name)
-            self.assertIn(int(resp['status']), HTTP_SUCCESS)
-            self.assertIn('x-container-read', resp)
-            self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
+        # create object
+        object_name = rand_name(name='Object')
+        data = arbitrary_string(size=len(object_name) * 1,
+                                base_text=object_name)
+        resp, _ = self.object_client.create_object(self.container_name,
+                                                   object_name, data)
+        self.assertEqual(resp['status'], '201')
 
-            # get auth token of alternative user
-            token = self.identity_client_alt.get_auth()
-            headers = {'X-Auth-Token': token}
-            # access object using alternate user creds
-            resp, body = self.custom_object_client.get_object(
-                self.container_name, object_name,
-                metadata=headers)
-            self.assertEqual(body, data)
+        # list container metadata
+        resp, _ = self.container_client.list_container_metadata(
+            self.container_name)
+        self.assertIn(int(resp['status']), HTTP_SUCCESS)
+        self.assertIn('x-container-read', resp)
+        self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
 
-        except Exception as e:
-            self.fail("Failed to get public readable object with another"
-                      " user creds raised exception is %s" % e)
+        # get auth token of alternative user
+        token = self.identity_client_alt.get_auth()
+        headers = {'X-Auth-Token': token}
+        # access object using alternate user creds
+        resp, body = self.custom_object_client.get_object(
+            self.container_name, object_name,
+            metadata=headers)
+        self.assertEqual(body, data)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 745dd87..2a72c95 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -89,8 +89,8 @@
                 pass
 
     @classmethod
-    def _create_keypair(cls, namestart='keypair-heat-'):
-        kp_name = rand_name(namestart)
+    def _create_keypair(cls, name_start='keypair-heat-'):
+        kp_name = rand_name(name_start)
         resp, body = cls.keypairs_client.create_keypair(kp_name)
         cls.keypairs.append(kp_name)
         return body
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
new file mode 100644
index 0000000..c934020
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -0,0 +1,211 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
+    _interface = 'json'
+
+    template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+  Template which creates single EC2 instance
+Parameters:
+  KeyName:
+    Type: String
+  InstanceType:
+    Type: String
+  ImageId:
+    Type: String
+  ExternalRouterId:
+    Type: String
+Resources:
+  Network:
+    Type: OS::Quantum::Net
+    Properties: {name: NewNetwork}
+  Subnet:
+    Type: OS::Quantum::Subnet
+    Properties:
+      network_id: {Ref: Network}
+      name: NewSubnet
+      ip_version: 4
+      cidr: 10.0.3.0/24
+      dns_nameservers: ["8.8.8.8"]
+      allocation_pools:
+      - {end: 10.0.3.150, start: 10.0.3.20}
+  RouterInterface:
+    Type: OS::Quantum::RouterInterface
+    Properties:
+      router_id: {Ref: ExternalRouterId}
+      subnet_id: {Ref: Subnet}
+  Server:
+    Type: AWS::EC2::Instance
+    Metadata:
+      Name: SmokeServer
+    Properties:
+      ImageId: {Ref: ImageId}
+      InstanceType: {Ref: InstanceType}
+      KeyName: {Ref: KeyName}
+      SubnetId: {Ref: Subnet}
+      UserData:
+        Fn::Base64:
+          Fn::Join:
+          - ''
+          - - '#!/bin/bash -v
+
+              '
+            - /opt/aws/bin/cfn-signal -e 0 -r "SmokeServer created" '
+            - {Ref: WaitHandle}
+            - '''
+
+              '
+  WaitHandle:
+    Type: AWS::CloudFormation::WaitConditionHandle
+  WaitCondition:
+    Type: AWS::CloudFormation::WaitCondition
+    DependsOn: Server
+    Properties:
+      Handle: {Ref: WaitHandle}
+      Timeout: '600'
+"""
+
+    @classmethod
+    def setUpClass(cls):
+        super(NeutronResourcesTestJSON, cls).setUpClass()
+        if not cls.orchestration_cfg.image_ref:
+            raise cls.skipException("No image available to test")
+        cls.client = cls.orchestration_client
+        os = clients.Manager()
+        cls.network_cfg = os.config.network
+        if not cls.config.service_available.neutron:
+            raise cls.skipException("Neutron support is required")
+        cls.network_client = os.network_client
+        cls.stack_name = rand_name('heat')
+        cls.keypair_name = (cls.orchestration_cfg.keypair_name or
+                            cls._create_keypair()['name'])
+        cls.external_router_id = cls._get_external_router_id()
+
+        # create the stack
+        cls.stack_identifier = cls.create_stack(
+            cls.stack_name,
+            cls.template,
+            parameters={
+                'KeyName': cls.keypair_name,
+                'InstanceType': cls.orchestration_cfg.instance_type,
+                'ImageId': cls.orchestration_cfg.image_ref,
+                'ExternalRouterId': cls.external_router_id
+            })
+        cls.stack_id = cls.stack_identifier.split('/')[1]
+        cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
+        _, resources = cls.client.list_resources(cls.stack_identifier)
+        cls.test_resources = {}
+        for resource in resources:
+            cls.test_resources[resource['logical_resource_id']] = resource
+
+    @classmethod
+    def _get_external_router_id(cls):
+        resp, body = cls.network_client.list_ports()
+        ports = body['ports']
+        router_ports = filter(lambda port: port['device_owner'] ==
+                              'network:router_interface', ports)
+        return router_ports[0]['device_id']
+
+    @attr(type='slow')
+    def test_created_resources(self):
+        """Verifies created neutron resources."""
+        resources = [('Network', 'OS::Quantum::Net'),
+                     ('Subnet', 'OS::Quantum::Subnet'),
+                     ('RouterInterface', 'OS::Quantum::RouterInterface'),
+                     ('Server', 'AWS::EC2::Instance')]
+        for resource_name, resource_type in resources:
+            resource = self.test_resources.get(resource_name, None)
+            self.assertIsInstance(resource, dict)
+            self.assertEqual(resource_name, resource['logical_resource_id'])
+            self.assertEqual(resource_type, resource['resource_type'])
+            self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
+
+    @attr(type='slow')
+    def test_created_network(self):
+        """Verifies created netowrk."""
+        network_id = self.test_resources.get('Network')['physical_resource_id']
+        resp, body = self.network_client.show_network(network_id)
+        self.assertEqual('200', resp['status'])
+        network = body['network']
+        self.assertIsInstance(network, dict)
+        self.assertEqual(network_id, network['id'])
+        self.assertEqual('NewNetwork', network['name'])
+
+    @attr(type='slow')
+    def test_created_subnet(self):
+        """Verifies created subnet."""
+        subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
+        resp, body = self.network_client.show_subnet(subnet_id)
+        self.assertEqual('200', resp['status'])
+        subnet = body['subnet']
+        network_id = self.test_resources.get('Network')['physical_resource_id']
+        self.assertEqual(subnet_id, subnet['id'])
+        self.assertEqual(network_id, subnet['network_id'])
+        self.assertEqual('NewSubnet', subnet['name'])
+        self.assertEqual('8.8.8.8', subnet['dns_nameservers'][0])
+        self.assertEqual('10.0.3.20', subnet['allocation_pools'][0]['start'])
+        self.assertEqual('10.0.3.150', subnet['allocation_pools'][0]['end'])
+        self.assertEqual(4, subnet['ip_version'])
+        self.assertEqual('10.0.3.0/24', subnet['cidr'])
+
+    @attr(type='slow')
+    def test_created_router_interface(self):
+        """Verifies created router interface."""
+        network_id = self.test_resources.get('Network')['physical_resource_id']
+        subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
+        resp, body = self.network_client.list_ports()
+        self.assertEqual('200', resp['status'])
+        ports = body['ports']
+        router_ports = filter(lambda port: port['device_id'] ==
+                              self.external_router_id, ports)
+        created_network_ports = filter(lambda port: port['network_id'] ==
+                                       network_id, router_ports)
+        self.assertEqual(1, len(created_network_ports))
+        router_interface = created_network_ports[0]
+        fixed_ips = router_interface['fixed_ips']
+        subnet_fixed_ips = filter(lambda port: port['subnet_id'] ==
+                                  subnet_id, fixed_ips)
+        self.assertEqual(1, len(subnet_fixed_ips))
+        router_interface_ip = subnet_fixed_ips[0]['ip_address']
+        self.assertEqual('10.0.3.1', router_interface_ip)
+
+    @attr(type='slow')
+    def test_created_server(self):
+        """Verifies created sever."""
+        server_id = self.test_resources.get('Server')['physical_resource_id']
+        resp, server = self.servers_client.get_server(server_id)
+        self.assertEqual('200', resp['status'])
+        self.assertEqual(self.keypair_name, server['key_name'])
+        self.assertEqual('ACTIVE', server['status'])
+        network = server['addresses']['NewNetwork'][0]
+        self.assertEqual(4, network['version'])
+        ip_addr_prefix = network['addr'][:7]
+        ip_addr_suffix = int(network['addr'].split('.')[3])
+        self.assertEqual('10.0.3.', ip_addr_prefix)
+        self.assertTrue(ip_addr_suffix >= 20)
+        self.assertTrue(ip_addr_suffix <= 150)
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
new file mode 100644
index 0000000..defb910
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -0,0 +1,169 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class StacksTestJSON(base.BaseOrchestrationTest):
+    _interface = 'json'
+
+    template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+  Template which creates single EC2 instance
+Parameters:
+  KeyName:
+    Type: String
+  InstanceType:
+    Type: String
+  ImageId:
+    Type: String
+Resources:
+  SmokeServer:
+    Type: AWS::EC2::Instance
+    Metadata:
+      Name: SmokeServer
+    Properties:
+      ImageId: {Ref: ImageId}
+      InstanceType: {Ref: InstanceType}
+      KeyName: {Ref: KeyName}
+      UserData:
+        Fn::Base64:
+          Fn::Join:
+          - ''
+          - - '#!/bin/bash -v
+
+              '
+            - /opt/aws/bin/cfn-signal -e 0 -r "SmokeServer created" '
+            - {Ref: WaitHandle}
+            - '''
+
+              '
+  WaitHandle:
+    Type: AWS::CloudFormation::WaitConditionHandle
+  WaitCondition:
+    Type: AWS::CloudFormation::WaitCondition
+    DependsOn: SmokeServer
+    Properties:
+      Handle: {Ref: WaitHandle}
+      Timeout: '600'
+"""
+
+    @classmethod
+    def setUpClass(cls):
+        super(StacksTestJSON, cls).setUpClass()
+        if not cls.orchestration_cfg.image_ref:
+            raise cls.skipException("No image available to test")
+        cls.client = cls.orchestration_client
+        cls.stack_name = rand_name('heat')
+        keypair_name = (cls.orchestration_cfg.keypair_name or
+                        cls._create_keypair()['name'])
+
+        # create the stack
+        cls.stack_identifier = cls.create_stack(
+            cls.stack_name,
+            cls.template,
+            parameters={
+                'KeyName': keypair_name,
+                'InstanceType': cls.orchestration_cfg.instance_type,
+                'ImageId': cls.orchestration_cfg.image_ref
+            })
+        cls.stack_id = cls.stack_identifier.split('/')[1]
+        cls.resource_name = 'SmokeServer'
+        cls.resource_type = 'AWS::EC2::Instance'
+        cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
+
+    @attr(type='slow')
+    def test_stack_list(self):
+        """Created stack should be on the list of existing stacks."""
+        resp, stacks = self.client.list_stacks()
+        self.assertEqual('200', resp['status'])
+        self.assertIsInstance(stacks, list)
+        stacks_names = map(lambda stack: stack['stack_name'], stacks)
+        self.assertIn(self.stack_name, stacks_names)
+
+    @attr(type='slow')
+    def test_stack_show(self):
+        """Getting details about created stack should be possible."""
+        resp, stack = self.client.get_stack(self.stack_name)
+        self.assertEqual('200', resp['status'])
+        self.assertIsInstance(stack, dict)
+        self.assertEqual(self.stack_name, stack['stack_name'])
+        self.assertEqual(self.stack_id, stack['id'])
+
+    @attr(type='slow')
+    def test_list_resources(self):
+        """Getting list of created resources for the stack should be possible.
+        """
+        resp, resources = self.client.list_resources(self.stack_identifier)
+        self.assertEqual('200', resp['status'])
+        self.assertIsInstance(resources, list)
+        resources_names = map(lambda resource: resource['logical_resource_id'],
+                              resources)
+        self.assertIn(self.resource_name, resources_names)
+        resources_types = map(lambda resource: resource['resource_type'],
+                              resources)
+        self.assertIn(self.resource_type, resources_types)
+
+    @attr(type='slow')
+    def test_show_resource(self):
+        """Getting details about created resource should be possible."""
+        resp, resource = self.client.get_resource(self.stack_identifier,
+                                                  self.resource_name)
+        self.assertIsInstance(resource, dict)
+        self.assertEqual(self.resource_name, resource['logical_resource_id'])
+        self.assertEqual(self.resource_type, resource['resource_type'])
+
+    @attr(type='slow')
+    def test_resource_metadata(self):
+        """Getting metadata for created resource should be possible."""
+        resp, metadata = self.client.show_resource_metadata(
+            self.stack_identifier,
+            self.resource_name)
+        self.assertEqual('200', resp['status'])
+        self.assertIsInstance(metadata, dict)
+        self.assertEqual(self.resource_name, metadata.get('Name', None))
+
+    @attr(type='slow')
+    def test_list_events(self):
+        """Getting list of created events for the stack should be possible."""
+        resp, events = self.client.list_events(self.stack_identifier)
+        self.assertEqual('200', resp['status'])
+        self.assertIsInstance(events, list)
+        resource_statuses = map(lambda event: event['resource_status'], events)
+        self.assertIn('CREATE_IN_PROGRESS', resource_statuses)
+        self.assertIn('CREATE_COMPLETE', resource_statuses)
+
+    @attr(type='slow')
+    def test_show_event(self):
+        """Getting details about existing event should be possible."""
+        resp, events = self.client.list_resource_events(self.stack_identifier,
+                                                        self.resource_name)
+        self.assertNotEqual([], events)
+        events.sort(key=lambda event: event['event_time'])
+        event_id = events[0]['id']
+        resp, event = self.client.show_event(self.stack_identifier,
+                                             self.resource_name, event_id)
+        self.assertEqual('200', resp['status'])
+        self.assertEqual('CREATE_IN_PROGRESS', event['resource_status'])
+        self.assertEqual('state changed', event['resource_status_reason'])
+        self.assertEqual(self.resource_name, event['logical_resource_id'])
+        self.assertIsInstance(event, dict)
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index f1f1f7e..4bda5ab 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -33,8 +33,7 @@
 
     @attr(type='smoke')
     def test_stack_list_responds(self):
-        resp, body = self.client.list_stacks()
-        stacks = body['stacks']
+        resp, stacks = self.client.list_stacks()
         self.assertEqual('200', resp['status'])
         self.assertIsInstance(stacks, list)
 
@@ -42,9 +41,6 @@
     def test_stack_crud_no_resources(self):
         stack_name = rand_name('heat')
 
-        # count how many stacks to start with
-        resp, body = self.client.list_stacks()
-
         # create the stack
         stack_identifier = self.create_stack(
             stack_name, self.empty_template)
@@ -54,21 +50,21 @@
         self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
 
         # check for stack in list
-        resp, body = self.client.list_stacks()
-        list_ids = list([stack['id'] for stack in body['stacks']])
+        resp, stacks = self.client.list_stacks()
+        list_ids = list([stack['id'] for stack in stacks])
         self.assertIn(stack_id, list_ids)
 
         # fetch the stack
-        resp, body = self.client.get_stack(stack_identifier)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+        resp, stack = self.client.get_stack(stack_identifier)
+        self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
 
         # fetch the stack by name
-        resp, body = self.client.get_stack(stack_name)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+        resp, stack = self.client.get_stack(stack_name)
+        self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
 
         # fetch the stack by id
-        resp, body = self.client.get_stack(stack_id)
-        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+        resp, stack = self.client.get_stack(stack_id)
+        self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
 
         # delete the stack
         resp = self.client.delete_stack(stack_identifier)
diff --git a/tempest/api/orchestration/stacks/test_templates.py b/tempest/api/orchestration/stacks/test_templates.py
new file mode 100644
index 0000000..6a7c541
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_templates.py
@@ -0,0 +1,86 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TemplateYAMLTestJSON(base.BaseOrchestrationTest):
+    _interface = 'json'
+
+    template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+  Template which creates only a new user
+Resources:
+  CfnUser:
+    Type: AWS::IAM::User
+"""
+
+    invalid_template_url = 'http://www.example.com/template.yaml'
+
+    @classmethod
+    def setUpClass(cls):
+        super(TemplateYAMLTestJSON, cls).setUpClass()
+        cls.client = cls.orchestration_client
+        cls.stack_name = rand_name('heat')
+        cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
+        cls.client.wait_for_stack_status(cls.stack_identifier,
+                                         'CREATE_COMPLETE')
+        cls.stack_id = cls.stack_identifier.split('/')[1]
+        cls.parameters = {}
+
+    @attr(type='gate')
+    def test_show_template(self):
+        """Getting template used to create the stack."""
+        resp, template = self.client.show_template(self.stack_identifier)
+        self.assertEqual('200', resp['status'])
+
+    @attr(type='gate')
+    def test_validate_template(self):
+        """Validating template passing it content."""
+        resp, parameters = self.client.validate_template(self.template,
+                                                         self.parameters)
+        self.assertEqual('200', resp['status'])
+
+    @attr(type=['gate', 'negative'])
+    def test_validate_template_url(self):
+        """Validating template passing url to it."""
+        self.assertRaises(exceptions.BadRequest,
+                          self.client.validate_template_url,
+                          template_url=self.invalid_template_url,
+                          parameters=self.parameters)
+
+
+class TemplateAWSTestJSON(TemplateYAMLTestJSON):
+    template = """
+{
+  "AWSTemplateFormatVersion" : "2010-09-09",
+  "Description" : "Template which creates only a new user",
+  "Resources" : {
+    "CfnUser" : {
+      "Type" : "AWS::IAM::User"
+    }
+  }
+}
+"""
+
+    invalid_template_url = 'http://www.example.com/template.template'
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 5861497..9fa86b6 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -18,6 +18,7 @@
 from tempest.api.volume.base import BaseVolumeTest
 from tempest.common.utils.data_utils import rand_name
 from tempest.test import attr
+from tempest.test import stresstest
 
 
 class VolumesActionsTest(BaseVolumeTest):
@@ -52,24 +53,21 @@
 
         super(VolumesActionsTest, cls).tearDownClass()
 
+    @stresstest(class_setup_per='process')
     @attr(type='smoke')
     def test_attach_detach_volume_to_instance(self):
         # Volume is attached and detached successfully from an instance
-        try:
-            mountpoint = '/dev/vdc'
-            resp, body = self.client.attach_volume(self.volume['id'],
-                                                   self.server['id'],
-                                                   mountpoint)
-            self.assertEqual(202, resp.status)
-            self.client.wait_for_volume_status(self.volume['id'], 'in-use')
-        except Exception:
-            self.fail("Could not attach volume to instance")
-        finally:
-            # Detach the volume from the instance
-            resp, body = self.client.detach_volume(self.volume['id'])
-            self.assertEqual(202, resp.status)
-            self.client.wait_for_volume_status(self.volume['id'], 'available')
+        mountpoint = '/dev/vdc'
+        resp, body = self.client.attach_volume(self.volume['id'],
+                                               self.server['id'],
+                                               mountpoint)
+        self.assertEqual(202, resp.status)
+        self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+        resp, body = self.client.detach_volume(self.volume['id'])
+        self.assertEqual(202, resp.status)
+        self.client.wait_for_volume_status(self.volume['id'], 'available')
 
+    @stresstest(class_setup_per='process')
     @attr(type='gate')
     def test_get_volume_attachment(self):
         # Verify that a volume's attachment information is retrieved
@@ -77,22 +75,22 @@
         resp, body = self.client.attach_volume(self.volume['id'],
                                                self.server['id'],
                                                mountpoint)
-        self.client.wait_for_volume_status(self.volume['id'], 'in-use')
         self.assertEqual(202, resp.status)
-        try:
-            resp, volume = self.client.get_volume(self.volume['id'])
-            self.assertEqual(200, resp.status)
-            self.assertIn('attachments', volume)
-            attachment = volume['attachments'][0]
-            self.assertEqual(mountpoint, attachment['device'])
-            self.assertEqual(self.server['id'], attachment['server_id'])
-            self.assertEqual(self.volume['id'], attachment['id'])
-            self.assertEqual(self.volume['id'], attachment['volume_id'])
-        except Exception:
-            self.fail("Could not get attachment details from volume")
-        finally:
-            self.client.detach_volume(self.volume['id'])
-            self.client.wait_for_volume_status(self.volume['id'], 'available')
+        self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+        # NOTE(gfidente): added in reverse order because functions will be
+        # called in reverse order to the order they are added (LIFO)
+        self.addCleanup(self.client.wait_for_volume_status,
+                        self.volume['id'],
+                        'available')
+        self.addCleanup(self.client.detach_volume, self.volume['id'])
+        resp, volume = self.client.get_volume(self.volume['id'])
+        self.assertEqual(200, resp.status)
+        self.assertIn('attachments', volume)
+        attachment = volume['attachments'][0]
+        self.assertEqual(mountpoint, attachment['device'])
+        self.assertEqual(self.server['id'], attachment['server_id'])
+        self.assertEqual(self.volume['id'], attachment['id'])
+        self.assertEqual(self.volume['id'], attachment['volume_id'])
 
     @attr(type='gate')
     def test_volume_upload(self):
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 08f585a..cbb8d08 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -101,31 +101,30 @@
         flags = creds + ' ' + flags
         return self.cmd(cmd, action, flags, params, fail_ok)
 
-    def check_output(self, cmd, **kwargs):
-        # substitutes subprocess.check_output which is not in python2.6
-        kwargs['stdout'] = subprocess.PIPE
-        proc = subprocess.Popen(cmd, **kwargs)
-        output = proc.communicate()[0]
-        if proc.returncode != 0:
-            raise CommandFailed(proc.returncode, cmd, output)
-        return output
-
     def cmd(self, cmd, action, flags='', params='', fail_ok=False,
             merge_stderr=False):
         """Executes specified command for the given action."""
         cmd = ' '.join([os.path.join(CONF.cli.cli_dir, cmd),
                         flags, action, params])
         LOG.info("running: '%s'" % cmd)
+        cmd_str = cmd
         cmd = shlex.split(cmd)
+        result = ''
+        result_err = ''
         try:
-            if merge_stderr:
-                result = self.check_output(cmd, stderr=subprocess.STDOUT)
-            else:
-                with open('/dev/null', 'w') as devnull:
-                    result = self.check_output(cmd, stderr=devnull)
-        except subprocess.CalledProcessError as e:
-            LOG.error("command output:\n%s" % e.output)
-            raise
+            stdout = subprocess.PIPE
+            stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
+            proc = subprocess.Popen(
+                cmd, stdout=stdout, stderr=stderr)
+            result, result_err = proc.communicate()
+            if not fail_ok and proc.returncode != 0:
+                raise CommandFailed(proc.returncode,
+                                    cmd,
+                                    result)
+        finally:
+            LOG.debug('output of %s:\n%s' % (cmd_str, result))
+            if not merge_stderr and result_err:
+                LOG.debug('error output of %s:\n%s' % (cmd_str, result_err))
         return result
 
     def assertTableStruct(self, items, field_names):
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index bfd7f9e..f22ec4e 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -133,6 +133,10 @@
     if not isinstance(output_lines, list):
         output_lines = output_lines.split('\n')
 
+    if not output_lines[-1]:
+        # skip last line if empty (just newline at the end)
+        output_lines = output_lines[:-1]
+
     for line in output_lines:
         if delimiter_line.match(line):
             columns = _table_columns(line)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 1e8009f..4c1c27f 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -46,9 +46,12 @@
         out = self.keystone('catalog')
         catalog = self.parser.details_multiple(out, with_label=True)
         for svc in catalog:
-            self.assertTrue(svc['__label'].startswith('Service:'),
-                            msg=('Invalid beginning of service block: %s' %
-                                 svc['__label']))
+            if svc.get('__label'):
+                self.assertTrue(svc['__label'].startswith('Service:'),
+                                msg=('Invalid beginning of service block: '
+                                     '%s' % svc['__label']))
+            self.assertIn('id', svc.keys())
+            self.assertIn('region', svc.keys())
 
     def test_admin_endpoint_list(self):
         out = self.keystone('endpoint-list')
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index d744e3d..8dfff6e 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -307,6 +307,12 @@
         self.LOG.info("Response Status: " + status)
         headers = resp.copy()
         del headers['status']
+        if headers.get('x-compute-request-id'):
+            self.LOG.info("Nova request id: %s" %
+                          headers.pop('x-compute-request-id'))
+        elif headers.get('x-openstack-request-id'):
+            self.LOG.info("Glance request id %s" %
+                          headers.pop('x-openstack-request-id'))
         if len(headers):
             self.LOG.debug('Response Headers: ' + str(headers))
         if resp_body:
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index be350c8..2ed1057 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -114,9 +114,13 @@
         err_data = []
         poll = select.poll()
         poll.register(channel, select.POLLIN)
+        start_time = time.time()
+
         while True:
             ready = poll.poll(self.channel_timeout)
             if not any(ready):
+                if not self._is_timed_out(self.timeout, start_time):
+                    continue
                 raise exceptions.TimeoutException(
                     "Command: '{0}' executed on host '{1}'.".format(
                         cmd, self.host))
diff --git a/tempest/config.py b/tempest/config.py
index e0ac843..3b09b5e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -519,7 +519,10 @@
                help='regexp for list of log files.'),
     cfg.StrOpt('log_check_interval',
                default=60,
-               help='time between log file error checks.')
+               help='time (in seconds) between log file error checks.'),
+    cfg.StrOpt('default_thread_number_per_action',
+               default=4,
+               help='The number of threads created while stress test.')
 ]
 
 
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index f9eb968..8cfd548 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -53,17 +53,6 @@
                      " in tempest/api/* tests"))
 
 
-def import_no_files_in_tests(physical_line, filename):
-    """Check for merges that try to land into tempest/tests
-
-    T103: tempest/tests directory is deprecated
-    """
-
-    if "tempest/tests" in filename:
-        return (0, ("T103: tempest/tests is deprecated"))
-
-
 def factory(register):
     register(skip_bugs)
     register(import_no_clients_in_api)
-    register(import_no_files_in_tests)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index e93d9bc..7681f04 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -29,12 +29,12 @@
 import neutronclient.v2_0.client
 import novaclient.client
 
-
 from tempest.api.network import common as net_common
 from tempest.common import isolated_creds
 from tempest.common import ssh
 from tempest.common.utils.data_utils import rand_name
 from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest import exceptions
 import tempest.manager
 from tempest.openstack.common import log as logging
 import tempest.test
@@ -283,9 +283,9 @@
             thing = things.get(thing_id)
             new_status = thing.status
             if new_status == 'ERROR':
-                self.fail("%s failed to get to expected status. "
-                          "In ERROR state."
-                          % thing)
+                message = "%s failed to get to expected status. \
+                          In ERROR state." % (thing)
+                raise exceptions.BuildErrorException(message)
             elif new_status == expected_status:
                 return True  # All good.
             LOG.debug("Waiting for %s to get to %s status. "
@@ -295,8 +295,9 @@
             check_status,
             self.config.compute.build_timeout,
             self.config.compute.build_interval):
-            self.fail("Timed out waiting for thing %s to become %s"
-                      % (thing_id, expected_status))
+            message = "Timed out waiting for thing %s \
+                      to become %s" % (thing_id, expected_status)
+            raise exceptions.TimeoutException(message)
 
     def create_loginable_secgroup_rule(self, client=None, secgroup_id=None):
         if client is None:
@@ -343,11 +344,8 @@
         LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
                   name, image, flavor)
         server = client.servers.create(name, image, flavor, **create_kwargs)
-        try:
-            self.assertEqual(server.name, name)
-            self.set_resource(name, server)
-        except AttributeError:
-            self.fail("Server not successfully created.")
+        self.assertEqual(server.name, name)
+        self.set_resource(name, server)
         self.status_timeout(client.servers, server.id, 'ACTIVE')
         # The instance retrieved on creation is missing network
         # details, necessitating retrieval after it becomes active to
@@ -373,6 +371,24 @@
         LOG.debug("Created volume: %s", volume)
         return volume
 
+    def create_server_snapshot(self, server, compute_client=None,
+                               image_client=None, name=None):
+        if compute_client is None:
+            compute_client = self.compute_client
+        if image_client is None:
+            image_client = self.image_client
+        if name is None:
+            name = rand_name('scenario-snapshot-')
+        LOG.debug("Creating a snapshot image for server: %s", server.name)
+        image_id = compute_client.servers.create_image(server, name)
+        self.addCleanup(image_client.images.delete, image_id)
+        self.status_timeout(image_client.images, image_id, 'active')
+        snapshot_image = image_client.images.get(image_id)
+        self.assertEquals(name, snapshot_image.name)
+        LOG.debug("Created snapshot image %s for server %s",
+                  snapshot_image.name, server.name)
+        return snapshot_image
+
     def create_keypair(self, client=None, name=None):
         if client is None:
             client = self.compute_client
@@ -429,12 +445,9 @@
         sg_name = rand_name(namestart)
         sg_desc = sg_name + " description"
         secgroup = client.security_groups.create(sg_name, sg_desc)
-        try:
-            self.assertEqual(secgroup.name, sg_name)
-            self.assertEqual(secgroup.description, sg_desc)
-            self.set_resource(sg_name, secgroup)
-        except AttributeError:
-            self.fail("SecurityGroup object not successfully created.")
+        self.assertEqual(secgroup.name, sg_name)
+        self.assertEqual(secgroup.description, sg_desc)
+        self.set_resource(sg_name, secgroup)
 
         # Add rules to the security group
         self.create_loginable_secgroup_rule(client, secgroup.id)
@@ -595,7 +608,3 @@
     @classmethod
     def _stack_rand_name(cls):
         return rand_name(cls.__name__ + '-')
-
-    def _create_keypair(self):
-        kp_name = rand_name('keypair-smoke')
-        return self.compute_client.keypairs.create(kp_name)
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
index cd959a8..78025ee 100644
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -12,16 +12,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.openstack.common import log as logging
 from tempest.scenario import manager
 from tempest.test import attr
 from tempest.test import call_until_true
 import time
 
 
-LOG = logging.getLogger(__name__)
-
-
 class AutoScalingTest(manager.OrchestrationScenarioTest):
 
     def setUp(self):
@@ -35,9 +31,8 @@
         if self.config.orchestration.keypair_name:
             self.keypair_name = self.config.orchestration.keypair_name
         else:
-            self.keypair = self._create_keypair()
+            self.keypair = self.create_keypair()
             self.keypair_name = self.keypair.id
-            self.set_resource('keypair', self.keypair)
 
     def launch_stack(self):
         self.parameters = {
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 2903687..8e14b06 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -44,12 +44,9 @@
         sg_desc = sg_name + " description"
         self.secgroup = self.compute_client.security_groups.create(sg_name,
                                                                    sg_desc)
-        try:
-            self.assertEqual(self.secgroup.name, sg_name)
-            self.assertEqual(self.secgroup.description, sg_desc)
-            self.set_resource('secgroup', self.secgroup)
-        except AttributeError:
-            self.fail("SecurityGroup object not successfully created.")
+        self.assertEqual(self.secgroup.name, sg_name)
+        self.assertEqual(self.secgroup.description, sg_desc)
+        self.set_resource('secgroup', self.secgroup)
 
         # Add rules to the security group
         self.create_loginable_secgroup_rule(secgroup_id=self.secgroup.id)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index c55e2a3..003c264 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -15,14 +15,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common.utils.data_utils import rand_name
-from tempest.openstack.common import log as logging
 from tempest.scenario import manager
 
 
-LOG = logging.getLogger(__name__)
-
-
 class TestSnapshotPattern(manager.OfficialClientTest):
     """
     This test is for snapshotting an instance and booting with it.
@@ -34,14 +29,6 @@
 
     """
 
-    def _wait_for_server_status(self, server, status):
-        self.status_timeout(self.compute_client.servers,
-                            server.id,
-                            status)
-
-    def _wait_for_image_status(self, image_id, status):
-        self.status_timeout(self.image_client.images, image_id, status)
-
     def _boot_image(self, image_id):
         create_kwargs = {
             'key_name': self.keypair.name
@@ -61,17 +48,6 @@
         ssh_client.exec_command('date > /tmp/timestamp; sync')
         self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
 
-    def _create_image(self, server):
-        snapshot_name = rand_name('scenario-snapshot-')
-        create_image_client = self.compute_client.servers.create_image
-        image_id = create_image_client(server, snapshot_name)
-        self.addCleanup(self.image_client.images.delete, image_id)
-        self._wait_for_server_status(server, 'ACTIVE')
-        self._wait_for_image_status(image_id, 'active')
-        snapshot_image = self.image_client.images.get(image_id)
-        self.assertEquals(snapshot_name, snapshot_image.name)
-        return image_id
-
     def _check_timestamp(self, server_or_ip):
         ssh_client = self._ssh_to_server(server_or_ip)
         got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
@@ -100,10 +76,10 @@
             self._write_timestamp(server)
 
         # snapshot the instance
-        snapshot_image_id = self._create_image(server)
+        snapshot_image = self.create_server_snapshot(server=server)
 
         # boot a second instance from the snapshot
-        server_from_snapshot = self._boot_image(snapshot_image_id)
+        server_from_snapshot = self._boot_image(snapshot_image.id)
 
         # check the existence of the timestamp file in the second instance
         if self.config.compute.use_floatingip_for_ssh:
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c74b88d..3cbd1fa 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -50,14 +50,6 @@
     14. Check the existence of a file which created at 6. in volume2
     """
 
-    def _wait_for_server_status(self, server, status):
-        self.status_timeout(self.compute_client.servers,
-                            server.id,
-                            status)
-
-    def _wait_for_image_status(self, image_id, status):
-        self.status_timeout(self.image_client.images, image_id, status)
-
     def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
         self.status_timeout(self.volume_client.volume_snapshots,
                             volume_snapshot.id, status)
@@ -84,17 +76,6 @@
         linux_client = self.get_remote_client(server_or_ip)
         return linux_client.ssh_client
 
-    def _create_image(self, server):
-        snapshot_name = rand_name('scenario-snapshot-')
-        create_image_client = self.compute_client.servers.create_image
-        image_id = create_image_client(server, snapshot_name)
-        self.addCleanup(self.image_client.images.delete, image_id)
-        self._wait_for_server_status(server, 'ACTIVE')
-        self._wait_for_image_status(image_id, 'active')
-        snapshot_image = self.image_client.images.get(image_id)
-        self.assertEquals(snapshot_name, snapshot_image.name)
-        return image_id
-
     def _create_volume_snapshot(self, volume):
         snapshot_name = rand_name('scenario-snapshot-')
         volume_snapshots = self.volume_client.volume_snapshots
@@ -189,14 +170,14 @@
         volume_snapshot = self._create_volume_snapshot(volume)
 
         # snapshot the instance
-        snapshot_image_id = self._create_image(server)
+        snapshot_image = self.create_server_snapshot(server=server)
 
         # create second volume from the snapshot(volume2)
         volume_from_snapshot = self._create_volume(
             snapshot_id=volume_snapshot.id)
 
         # boot second instance from the snapshot(instance2)
-        server_from_snapshot = self._boot_image(snapshot_image_id)
+        server_from_snapshot = self._boot_image(snapshot_image.id)
 
         # create and add floating IP to server_from_snapshot
         if self.config.compute.use_floatingip_for_ssh:
diff --git a/tempest/scenario/test_volume_snapshot_pattern.py b/tempest/scenario/test_volume_snapshot_pattern.py
index 8fa177e..d873d30 100644
--- a/tempest/scenario/test_volume_snapshot_pattern.py
+++ b/tempest/scenario/test_volume_snapshot_pattern.py
@@ -12,13 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.openstack.common import log as logging
-
 from tempest.common.utils.data_utils import rand_name
 from tempest.scenario import manager
 
-LOG = logging.getLogger(__name__)
-
 
 class TestVolumeSnapshotPattern(manager.OfficialClientTest):
 
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 22f3f26..e896e0d 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -42,7 +42,7 @@
 
         resp, body = self.get(uri)
         body = json.loads(body)
-        return resp, body
+        return resp, body['stacks']
 
     def create_stack(self, name, disable_rollback=True, parameters={},
                      timeout_mins=60, template=None, template_url=None):
@@ -135,7 +135,7 @@
                 # been created yet
                 pass
             else:
-                resource_name = body['logical_resource_id']
+                resource_name = body['resource_name']
                 resource_status = body['resource_status']
                 if resource_status == status:
                     return
@@ -176,3 +176,64 @@
                            (stack_name, status, self.build_timeout))
                 raise exceptions.TimeoutException(message)
             time.sleep(self.build_interval)
+
+    def show_resource_metadata(self, stack_identifier, resource_name):
+        """Returns the resource's metadata."""
+        url = ('stacks/{stack_identifier}/resources/{resource_name}'
+               '/metadata'.format(**locals()))
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['metadata']
+
+    def list_events(self, stack_identifier):
+        """Returns list of all events for a stack."""
+        url = 'stacks/{stack_identifier}/events'.format(**locals())
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['events']
+
+    def list_resource_events(self, stack_identifier, resource_name):
+        """Returns list of all events for a resource from stack."""
+        url = ('stacks/{stack_identifier}/resources/{resource_name}'
+               '/events'.format(**locals()))
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['events']
+
+    def show_event(self, stack_identifier, resource_name, event_id):
+        """Returns the details of a single stack's event."""
+        url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
+               '/{event_id}'.format(**locals()))
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body['event']
+
+    def show_template(self, stack_identifier):
+        """Returns the template for the stack."""
+        url = ('stacks/{stack_identifier}/template'.format(**locals()))
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body
+
+    def _validate_template(self, post_body):
+        """Returns the validation request result."""
+        post_body = json.dumps(post_body)
+        resp, body = self.post('validate', post_body, self.headers)
+        body = json.loads(body)
+        return resp, body
+
+    def validate_template(self, template, parameters={}):
+        """Returns the validation result for a template with parameters."""
+        post_body = {
+            'template': template,
+            'parameters': parameters,
+        }
+        return self._validate_template(post_body)
+
+    def validate_template_url(self, template_url, parameters={}):
+        """Returns the validation result for a template with parameters."""
+        post_body = {
+            'template_url': template_url,
+            'parameters': parameters,
+        }
+        return self._validate_template(post_body)
diff --git a/tempest/stress/actions/unit_test.py b/tempest/stress/actions/unit_test.py
index 95cc1bc..5ab5573 100644
--- a/tempest/stress/actions/unit_test.py
+++ b/tempest/stress/actions/unit_test.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 from tempest.openstack.common import importutils
+from tempest.openstack.common import log as logging
 import tempest.stress.stressaction as stressaction
 
 
@@ -46,6 +47,7 @@
         method = kwargs['test_method'].split('.')
         self.test_method = method.pop()
         self.klass = importutils.import_class('.'.join(method))
+        self.logger = logging.getLogger('.'.join(method))
         # valid options are 'process', 'application' , 'action'
         self.class_setup_per = kwargs.get('class_setup_per',
                                           SetUpClassRunTime.process)
@@ -55,6 +57,12 @@
             self.klass.setUpClass()
         self.setupclass_called = False
 
+    @property
+    def action(self):
+        if self.test_method:
+            return self.test_method
+        return super(UnitTest, self).action
+
     def run_core(self):
         res = self.klass(self.test_method).run()
         if res.errors:
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index efc57a9..e518d28 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -102,6 +102,8 @@
     """
     logfiles = admin_manager.config.stress.target_logfiles
     log_check_interval = int(admin_manager.config.stress.log_check_interval)
+    default_thread_num = int(admin_manager.config.stress.
+                             default_thread_number_per_action)
     if logfiles:
         controller = admin_manager.config.stress.target_controller
         computes = _get_compute_nodes(controller)
@@ -112,7 +114,7 @@
             manager = admin_manager
         else:
             manager = clients.Manager()
-        for p_number in xrange(test.get('threads', 1)):
+        for p_number in xrange(test.get('threads', default_thread_num)):
             if test.get('use_isolated_tenants', False):
                 username = rand_name("stress_user")
                 tenant_name = rand_name("stress_tenant")
@@ -146,7 +148,7 @@
 
             process = {'process': p,
                        'p_number': p_number,
-                       'action': test['action'],
+                       'action': test_run.action,
                        'statistic': shared_statistic}
 
             processes.append(process)
diff --git a/tempest/stress/run_stress.py b/tempest/stress/run_stress.py
index 32e3ae0..aab2afd 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/stress/run_stress.py
@@ -19,13 +19,52 @@
 import argparse
 import json
 import sys
+from testtools.testsuite import iterate_tests
+from unittest import loader
+
+
+def discover_stress_tests(path="./", filter_attr=None):
+    """Discovers all tempest tests and create action out of them
+    """
+
+    tests = []
+    testloader = loader.TestLoader()
+    list = testloader.discover(path)
+    for func in (iterate_tests(list)):
+        try:
+            method_name = getattr(func, '_testMethodName')
+            full_name = "%s.%s.%s" % (func.__module__,
+                                      func.__class__.__name__,
+                                      method_name)
+            test_func = getattr(func, method_name)
+            # NOTE(mkoderer): this contains a list of all type attributes
+            attrs = getattr(test_func, "__testtools_attrs")
+        except Exception:
+            next
+        if 'stress' in attrs:
+            if filter_attr is not None and not filter_attr in attrs:
+                continue
+            class_setup_per = getattr(test_func, "st_class_setup_per")
+
+            action = {'action':
+                      "tempest.stress.actions.unit_test.UnitTest",
+                      'kwargs': {"test_method": full_name,
+                                 "class_setup_per": class_setup_per
+                                 }
+                      }
+            tests.append(action)
+    return tests
 
 
 def main(ns):
     # NOTE(mkoderer): moved import to make "-h" possible without OpenStack
     from tempest.stress import driver
     result = 0
-    tests = json.load(open(ns.tests, 'r'))
+    if not ns.all:
+        tests = json.load(open(ns.tests, 'r'))
+    else:
+        tests = discover_stress_tests(filter_attr=ns.type)
+
     if ns.serial:
         for test in tests:
             step_result = driver.stress_openstack([test],
@@ -49,7 +88,13 @@
                     default=False, help="Stop on first error.")
 parser.add_argument('-n', '--number', type=int,
                     help="How often an action is executed for each process.")
-parser.add_argument('tests', help="Name of the file with test description.")
+group = parser.add_mutually_exclusive_group(required=True)
+group.add_argument('-a', '--all', action='store_true',
+                   help="Execute all stress tests")
+parser.add_argument('-T', '--type',
+                    help="Filters tests of a certain type (e.g. gate)")
+group.add_argument('-t', "--tests", nargs='?',
+                   help="Name of the file with test description.")
 
 if __name__ == "__main__":
     sys.exit(main(parser.parse_args()))
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index 3719841..28251af 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -33,6 +33,13 @@
         self.tearDown()
         sys.exit(0)
 
+    @property
+    def action(self):
+        """This methods returns the action. Overload this if you
+        create a stress test wrapper.
+        """
+        return self.__class__.__name__
+
     def setUp(self, **kwargs):
         """This method is called before the run method
         to help the test initiatlize any structures.
@@ -60,6 +67,8 @@
 
         while self.max_runs is None or (shared_statistic['runs'] <
                                         self.max_runs):
+            self.logger.debug("Trigger new run (run %d)" %
+                              shared_statistic['runs'])
             try:
                 self.run()
             except Exception:
diff --git a/tempest/test.py b/tempest/test.py
index 68cedf0..ca626da 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -57,6 +57,27 @@
     return decorator
 
 
+def stresstest(*args, **kwargs):
+    """Add stress test decorator
+
+    For all functions with this decorator a attr stress will be
+    set automatically.
+
+    @param class_setup_per: allowed values are application, process, action
+           ``application``: once in the stress job lifetime
+           ``process``: once in the worker process lifetime
+           ``action``: on each action
+    """
+    def decorator(f):
+        if 'class_setup_per' in kwargs:
+            setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
+        else:
+            setattr(f, "st_class_setup_per", 'process')
+        attr(type='stress')(f)
+        return f
+    return decorator
+
+
 # there is a mis-match between nose and testtools for older pythons.
 # testtools will set skipException to be either
 # unittest.case.SkipTest, unittest2.case.SkipTest or an internal skip
@@ -120,7 +141,7 @@
 
     @classmethod
     def tearDownClass(cls):
-        at_exit_set.remove(cls)
+        at_exit_set.discard(cls)
         if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
             super(BaseTestCase, cls).tearDownClass()
 
@@ -147,6 +168,11 @@
                 os.environ.get('OS_STDERR_CAPTURE') == '1'):
             stderr = self.useFixture(fixtures.StringStream('stderr')).stream
             self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
+        if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
+            os.environ.get('OS_LOG_CAPTURE') != '0'):
+            log_format = '%(asctime)-15s %(message)s'
+            self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
+                                                   format=log_format))
 
     @classmethod
     def _get_identity_admin_client(cls):
diff --git a/tempest/tests/README.rst b/tempest/tests/README.rst
new file mode 100644
index 0000000..4098686
--- /dev/null
+++ b/tempest/tests/README.rst
@@ -0,0 +1,25 @@
+Tempest Guide to Unit tests
+===========================
+
+What are these tests?
+---------------------
+
+Unit tests are the self checks for Tempest. They provide functional
+verification and regression checking for the internal components of tempest.
+They should be used to just verify that the individual pieces of tempest are
+working as expected. They should not require an external service to be running
+and should be able to run solely from the tempest tree.
+
+Why are these tests in tempest?
+-------------------------------
+These tests exist to make sure that the mechanisms that we use inside of
+tempest to are valid and remain functional. They are only here for self
+validation of tempest.
+
+
+Scope of these tests
+--------------------
+Unit tests should not require an external service to be running or any extra
+configuration to run. Any state that is required for a test should either be
+mocked out or created in a temporary test directory. (see test_wrappers.py for
+an example of using a temporary test directory)
diff --git a/tempest/tests/__init__.py b/tempest/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/__init__.py
diff --git a/tempest/tests/files/__init__.py b/tempest/tests/files/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/files/__init__.py
diff --git a/tempest/tests/files/failing-tests b/tempest/tests/files/failing-tests
new file mode 100644
index 0000000..0ec5421
--- /dev/null
+++ b/tempest/tests/files/failing-tests
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+class FakeTestClass(testtools.TestCase):
+    def test_pass(self):
+        self.assertTrue(False)
+
+    def test_pass_list(self):
+        test_list = ['test', 'a', 'b']
+        self.assertIn('fail', test_list)
diff --git a/tempest/tests/files/passing-tests b/tempest/tests/files/passing-tests
new file mode 100644
index 0000000..2f5b7c9
--- /dev/null
+++ b/tempest/tests/files/passing-tests
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+class FakeTestClass(testtools.TestCase):
+    def test_pass(self):
+        self.assertTrue(True)
+
+    def test_pass_list(self):
+        test_list = ['test', 'a', 'b']
+        self.assertIn('test', test_list)
diff --git a/tempest/tests/files/setup.cfg b/tempest/tests/files/setup.cfg
new file mode 100644
index 0000000..8639baa
--- /dev/null
+++ b/tempest/tests/files/setup.cfg
@@ -0,0 +1,20 @@
+[metadata]
+name = tempest_unit_tests
+version = 1
+summary = Fake Project for testing wrapper scripts
+author = OpenStack QA
+author-email = openstack-qa@lists.openstack.org
+home-page = http://www.openstack.org/
+classifier =
+    Intended Audience :: Information Technology
+    Intended Audience :: System Administrators
+    Intended Audience :: Developers
+    License :: OSI Approved :: Apache Software License
+    Operating System :: POSIX :: Linux
+    Programming Language :: Python
+    Programming Language :: Python :: 2
+    Programming Language :: Python :: 2.7
+
+[global]
+setup-hooks =
+    pbr.hooks.setup_hook
diff --git a/tempest/tests/files/testr-conf b/tempest/tests/files/testr-conf
new file mode 100644
index 0000000..d5ad083
--- /dev/null
+++ b/tempest/tests/files/testr-conf
@@ -0,0 +1,5 @@
+[DEFAULT]
+test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
+group_regex=([^\.]*\.)*
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
new file mode 100644
index 0000000..aeea98d
--- /dev/null
+++ b/tempest/tests/test_wrappers.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import shutil
+import subprocess
+import tempfile
+import testtools
+
+from tempest.test import attr
+
+DEVNULL = open(os.devnull, 'wb')
+
+
+class TestWrappers(testtools.TestCase):
+    def setUp(self):
+        super(TestWrappers, self).setUp()
+        # Setup test dirs
+        self.directory = tempfile.mkdtemp(prefix='tempest-unit')
+        self.test_dir = os.path.join(self.directory, 'tests')
+        os.mkdir(self.test_dir)
+        # Setup Test files
+        self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
+        self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
+        self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
+        self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
+        self.init_file = os.path.join(self.test_dir, '__init__.py')
+        self.setup_py = os.path.join(self.directory, 'setup.py')
+        shutil.copy('tempest/tests/files/testr-conf', self.testr_conf_file)
+        shutil.copy('tempest/tests/files/passing-tests', self.passing_file)
+        shutil.copy('tempest/tests/files/failing-tests', self.failing_file)
+        shutil.copy('setup.py', self.setup_py)
+        shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
+        shutil.copy('tempest/tests/files/__init__.py', self.init_file)
+
+    @attr(type='smoke')
+    def test_pretty_tox(self):
+        # Copy wrapper script and requirements:
+        pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
+        shutil.copy('tools/pretty_tox.sh', pretty_tox)
+        # Change directory, run wrapper and check result
+        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+        os.chdir(self.directory)
+        # Git init is required for the pbr testr command. pbr requires a git
+        # version or an sdist to work. so make the test directory a git repo
+        # too.
+        subprocess.call(['git', 'init'])
+        exit_code = subprocess.call('sh pretty_tox.sh tests.passing',
+                                    shell=True, stdout=DEVNULL, stderr=DEVNULL)
+        self.assertEquals(exit_code, 0)
+
+    @attr(type='smoke')
+    def test_pretty_tox_fails(self):
+        # Copy wrapper script and requirements:
+        pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
+        shutil.copy('tools/pretty_tox.sh', pretty_tox)
+        # Change directory, run wrapper and check result
+        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+        os.chdir(self.directory)
+        # Git init is required for the pbr testr command. pbr requires a git
+        # version or an sdist to work. so make the test directory a git repo
+        # too.
+        subprocess.call(['git', 'init'])
+        exit_code = subprocess.call('sh pretty_tox.sh', shell=True,
+                                    stdout=DEVNULL, stderr=DEVNULL)
+        self.assertEquals(exit_code, 1)
+
+    @attr(type='smoke')
+    def test_pretty_tox_serial(self):
+        # Copy wrapper script and requirements:
+        pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
+        shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
+        # Change directory, run wrapper and check result
+        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+        os.chdir(self.directory)
+        exit_code = subprocess.call('sh pretty_tox_serial.sh tests.passing',
+                                    shell=True, stdout=DEVNULL, stderr=DEVNULL)
+        self.assertEquals(exit_code, 0)
+
+    @attr(type='smoke')
+    def test_pretty_tox_serial_fails(self):
+        # Copy wrapper script and requirements:
+        pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
+        shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
+        # Change directory, run wrapper and check result
+        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+        os.chdir(self.directory)
+        exit_code = subprocess.call('sh pretty_tox_serial.sh', shell=True,
+                                    stdout=DEVNULL, stderr=DEVNULL)
+        self.assertEquals(exit_code, 1)
diff --git a/tempest/whitebox/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
index 0afb17e..06dcd7f 100644
--- a/tempest/whitebox/test_images_whitebox.py
+++ b/tempest/whitebox/test_images_whitebox.py
@@ -16,10 +16,13 @@
 #    under the License.
 
 from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
 from tempest.whitebox import manager
 
 from novaclient import exceptions
 
+LOG = logging.getLogger(__name__)
+
 
 class ImagesWhiteboxTest(manager.ComputeWhiteboxTest):
     _interface = 'json'
@@ -65,8 +68,9 @@
                               self.create_image,
                               self.shared_server.id, image_name)
         except Exception:
-            self.fail("Should not allow create image when vm_state=%s and "
+            LOG.error("Should not allow create image when vm_state=%s and "
                       "task_state=%s" % (vm_state, task_state))
+            raise
         finally:
             self.update_state(self.shared_server.id, 'active', None)
 
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
index abe903c..b6c888c 100644
--- a/tempest/whitebox/test_servers_whitebox.py
+++ b/tempest/whitebox/test_servers_whitebox.py
@@ -17,8 +17,11 @@
 
 from tempest.api.identity.base import BaseIdentityAdminTest
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 from tempest.whitebox import manager
 
+LOG = logging.getLogger(__name__)
+
 
 class ServersWhiteboxTest(manager.ComputeWhiteboxTest):
     _interface = 'json'
@@ -66,25 +69,21 @@
         Base method for delete server tests based on vm and task states.
         Validates for successful server termination.
         """
-        try:
-            server = self.create_server()
-            self.update_state(server['id'], vm_state, task_state)
+        server = self.create_server()
+        self.update_state(server['id'], vm_state, task_state)
 
-            resp, body = self.client.delete_server(server['id'])
-            self.assertEqual('204', resp['status'])
-            self.client.wait_for_server_termination(server['id'],
-                                                    ignore_error=True)
+        resp, body = self.client.delete_server(server['id'])
+        self.assertEqual('204', resp['status'])
+        self.client.wait_for_server_termination(server['id'],
+                                                ignore_error=True)
 
-            instances = self.meta.tables['instances']
-            stmt = instances.select().where(instances.c.uuid == server['id'])
-            result = self.connection.execute(stmt).first()
+        instances = self.meta.tables['instances']
+        stmt = instances.select().where(instances.c.uuid == server['id'])
+        result = self.connection.execute(stmt).first()
 
-            self.assertEqual(True, result.deleted > 0)
-            self.assertEqual('deleted', result.vm_state)
-            self.assertEqual(None, result.task_state)
-        except Exception:
-            self.fail("Should be able to delete a server when vm_state=%s and "
-                      "task_state=%s" % (vm_state, task_state))
+        self.assertEqual(True, result.deleted > 0)
+        self.assertEqual('deleted', result.vm_state)
+        self.assertEqual(None, result.task_state)
 
     def _test_delete_server_403_base(self, vm_state, task_state):
         """
@@ -98,8 +97,9 @@
                               self.client.delete_server,
                               self.shared_server['id'])
         except Exception:
-            self.fail("Should not allow delete server when vm_state=%s and "
+            LOG.error("Should not allow delete server when vm_state=%s and "
                       "task_state=%s" % (vm_state, task_state))
+            raise
         finally:
             self.update_state(self.shared_server['id'], 'active', None)
 
diff --git a/test-requirements.txt b/test-requirements.txt
index 236a473..6c313ca 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,8 +1,4 @@
-# Install bounded pep8/pyflakes first, then let flake8 install
-pep8==1.4.5
-pyflakes==0.7.2
-flake8==2.0
-hacking>=0.5.6,<0.7
+hacking>=0.5.6,<0.8
 # needed for doc build
 docutils==0.9.1
 sphinx>=1.1.2
diff --git a/tox.ini b/tox.ini
index ea27b92..0b57eb2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,13 +19,13 @@
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
 commands =
-  sh tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+  sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests)) {posargs}'
 
 [testenv:testr-full]
 sitepackages = True
 setenv = VIRTUAL_ENV={envdir}
 commands =
-  sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+  sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests)) {posargs}'
 
 [testenv:heat-slow]
 sitepackages = True
@@ -34,6 +34,13 @@
 commands =
   sh tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
 
+[testenv:large-ops]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+commands =
+  python setup.py testr --slowest --testr-args='tempest.scenario.test_large_ops {posargs}'
+
+
 [testenv:py26-full]
 sitepackages = True
 setenv = VIRTUAL_ENV={envdir}
@@ -44,7 +51,7 @@
          NOSE_OPENSTACK_SHOW_ELAPSED=1
          NOSE_OPENSTACK_STDOUT=1
 commands =
-  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --xunit-file=nosetests-full.xml tempest/api tempest/scenario tempest/thirdparty tempest/cli {posargs}
+  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --xunit-file=nosetests-full.xml tempest/api tempest/scenario tempest/thirdparty tempest/cli tempest/tests {posargs}
 
 [testenv:py26-smoke]
 setenv = VIRTUAL_ENV={envdir}
@@ -60,6 +67,9 @@
 [testenv:smoke]
 sitepackages = True
 setenv = VIRTUAL_ENV={envdir}
+# This is still serial because neutron doesn't work with parallel. See:
+# https://bugs.launchpad.net/tempest/+bug/1216076 so the neutron smoke
+# job would fail if we moved it to parallel.
 commands =
    sh tools/pretty_tox_serial.sh 'smoke {posargs}'
 
@@ -68,14 +78,14 @@
 setenv = VIRTUAL_ENV={envdir}
 commands =
    python -m tools/tempest_coverage -c start --combine
-   sh tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli))'
+   sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests))'
    python -m tools/tempest_coverage -c report --html {posargs}
 
 [testenv:stress]
 sitepackages = True
 setenv = VIRTUAL_ENV={envdir}
 commands =
-    python -m tempest/stress/run_stress tempest/stress/etc/stress-tox-job.json -d 3600
+    python -m tempest/stress/run_stress -a -d 3600
 
 [testenv:venv]
 commands = {posargs}