Merge "Remove skip code for test_servers_whitebox as bug had been fixed"
diff --git a/.testr.conf b/.testr.conf
index fbea056..510f4c9 100644
--- a/.testr.conf
+++ b/.testr.conf
@@ -1,5 +1,8 @@
 [DEFAULT]
-test_command=${PYTHON:-python} -m subunit.run $LISTOPT $IDOPTION
-test_id_option=$(${PYTHON:-python} -m tools/run_test_classes $IDFILE)
-test_list_option=discover -t ./ ./tempest --list
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
+             OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
+             OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-250} \
+             ${PYTHON:-python} -m subunit.run discover -t ./ ./tempest $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
 group_regex=([^\.]*\.)*
diff --git a/HACKING.rst b/HACKING.rst
index 2ac766e..1eb2d4f 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -32,7 +32,7 @@
 Exception Handling
 ------------------
 According to the ``The Zen of Python`` the
- ``Errors should never pass silently.``
+``Errors should never pass silently.``
 Tempest usually runs in special environment (jenkins gate jobs), in every
 error or failure situation we should provide as much error related
 information as possible, because we usually do not have the chance to
@@ -57,6 +57,10 @@
 exception at least logged.  When the exception is logged you usually need
 to ``raise`` the same or a different exception anyway.
 
+Use of ``self.addCleanup`` is often a good way to avoid having to catch
+exceptions and still ensure resources are correctly cleaned up if the
+test fails part way through.
+
 Use the ``self.assert*`` methods provided by the unit test framework
  the signal failures early.
 
@@ -74,5 +78,10 @@
 This and the service logs are your only guide to find the root cause of flaky
 issue.
 
-
-
+Guidelines
+----------
+- Do not submit changesets with only testcases which are skipped as
+  they will not be merged.
+- Consistently check the status code of responses in testcases. The
+  earlier a problem is detected the easier it is to debug, especially
+  where there is complicated setup required.
diff --git a/README.rst b/README.rst
index da0f5f3..f18628a 100644
--- a/README.rst
+++ b/README.rst
@@ -1,5 +1,3 @@
-::
-
 Tempest - The OpenStack Integration Test Suite
 ==============================================
 
@@ -37,9 +35,11 @@
 Tempest is not tied to any single test runner, but Nose been the most commonly
 used tool. After setting up your configuration file, you can execute
 the set of Tempest tests by using ``nosetests`` ::
+
     $> nosetests tempest
 
 To run one single test  ::
+
     $> nosetests -sv tempest.api.compute.servers.test_server_actions.py:
        ServerActionsTestJSON.test_rebuild_nonexistent_server
 
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index f5e51cd..3aa0497 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,3 +1,15 @@
+[DEFAULT]
+# log_config = /opt/stack/tempest/etc/logging.conf.sample
+
+# disable logging to the stderr
+use_stderr = False
+
+# log file
+log_file = tempest.log
+
+# lock/semaphore base directory
+lock_path=/tmp
+
 [identity]
 # This section contains configuration options that a variety of Tempest
 # test clients use when authenticating with different user/tenant
@@ -91,6 +103,9 @@
 # IP version of the address used for SSH
 ip_version_for_ssh = 4
 
+# Number of seconds to wait to ping to an instance
+ping_timeout = 60
+
 # Number of seconds to wait to authenticate to an instance
 ssh_timeout = 300
 
@@ -209,8 +224,6 @@
 # for each tenant to have their own router.
 public_router_id = {$PUBLIC_ROUTER_ID}
 
-# Whether or not neutron is expected to be available
-neutron_available = false
 
 [volume]
 # This section contains the configuration options used when executing tests
@@ -308,9 +321,6 @@
 # tests spawn full VMs, which could be slow if the test is already in a VM.
 build_timeout = 300
 
-# Whether or not Heat is expected to be available
-heat_available = false
-
 # Instance type for tests. Needs to be big enough for a
 # full OS plus the test workload
 instance_type = m1.micro
@@ -323,6 +333,13 @@
 # any key, which will generate a keypair for each test class
 #keypair_name = heat_key
 
+[dashboard]
+# URL where to find the dashboard home page
+dashboard_url = 'http://localhost/'
+
+# URL where to submit the login form
+login_url = 'http://localhost/auth/login/'
+
 [scenario]
 # Directory containing image files
 img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
@@ -348,3 +365,19 @@
 enabled = True
 # directory where python client binaries are located
 cli_dir = /usr/local/bin
+
+[service_available]
+# Whether or not cinder is expected to be available
+cinder = True
+# Whether or not neutron is expected to be available
+neutron = false
+# Whether or not glance is expected to be available
+glance = True
+# Whether or not swift is expected to be available
+swift = True
+# Whether or not nova is expected to be available
+nova = True
+# Whether or not Heat is expected to be available
+heat = false
+# Whether or not horizon is expected to be available
+horizon = True
diff --git a/openstack-common.conf b/openstack-common.conf
index 24af119..ff84404 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,10 @@
 [DEFAULT]
 
 # The list of modules to copy from openstack-common
-modules=install_venv_common
+module=install_venv_common
+module=lockutils
+module=log
+module=importlib
 
 # The base module to hold the copy of openstack.common
 base=tempest
diff --git a/requirements.txt b/requirements.txt
index 06aa9f3..cc61b01 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,3 +19,4 @@
 oslo.config>=1.1.0
 # Needed for whitebox testing
 sqlalchemy
+eventlet>=0.12.0
diff --git a/run_tests.sh b/run_tests.sh
index d5081c7..a645b22 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -118,7 +118,7 @@
   if [ $with_testr -eq 1 ]; then
       testr_init
       ${wrapper} find . -type f -name "*.pyc" -delete
-      ${wrapper} testr run --parallel $noseargs
+      ${wrapper} testr run --parallel --subunit $noseargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
   else
       ${wrapper} $NOSETESTS
   fi
diff --git a/tempest/README.rst b/tempest/README.rst
index 8f07a07..33021c8 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -1,6 +1,6 @@
-============
+============================
 Tempest Field Guide Overview
-============
+============================
 
 Tempest is designed to be useful for a large number of different
 environments. This includes being useful for gating commits to
@@ -26,7 +26,7 @@
 
 
 api
-------------
+---
 
 API tests are validation tests for the OpenStack API. They should not
 use the existing python clients for OpenStack, but should instead use
@@ -41,7 +41,7 @@
 
 
 cli
-------------
+---
 
 CLI tests use the openstack CLI to interact with the OpenStack
 cloud. CLI testing in unit tests is somewhat difficult because unlike
@@ -51,7 +51,7 @@
 
 
 scenario
-------------
+--------
 
 Scenario tests are complex "through path" tests for OpenStack
 functionality. They are typically a series of steps where complicated
@@ -61,7 +61,7 @@
 
 
 stress
------------
+------
 
 Stress tests are designed to stress an OpenStack environment by
 running a high workload against it and seeing what breaks. Tools may
@@ -72,7 +72,7 @@
 
 
 thirdparty
-------------
+----------
 
 Many openstack components include 3rdparty API support. It is
 completely legitimate for Tempest to include tests of 3rdparty APIs,
@@ -81,7 +81,7 @@
 
 
 whitebox
-----------
+--------
 
 Whitebox tests are tests which require access to the database of the
 target OpenStack machine to verify internal state after operations
diff --git a/tempest/api/compute/__init__.py b/tempest/api/compute/__init__.py
index fb96b4a..fd26081 100644
--- a/tempest/api/compute/__init__.py
+++ b/tempest/api/compute/__init__.py
@@ -15,9 +15,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
 from tempest import config
 from tempest.exceptions import InvalidConfiguration
+from tempest.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index b66bd7e..107d635 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -18,6 +18,7 @@
 from tempest.api.compute import base
 from tempest.common.utils.data_utils import rand_name
 from tempest import exceptions
+from tempest.openstack.common import lockutils
 from tempest.test import attr
 
 
@@ -197,6 +198,7 @@
         self.assertIn(self.host, body['hosts'])
 
     @attr(type='gate')
+    @lockutils.synchronized('availability_zone', 'tempest-', True)
     def test_aggregate_add_host_create_server_with_az(self):
         # Add an host to the given aggregate and create a server.
         aggregate_name = rand_name(self.aggregate_name_prefix)
@@ -205,7 +207,6 @@
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
         self.client.add_host(aggregate['id'], self.host)
         self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
-
         server_name = rand_name('test_server_')
         servers_client = self.servers_client
         admin_servers_client = self.os_adm.servers_client
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 2eaf3b0..8b96370 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -56,7 +56,7 @@
 
     CONF = config.TempestConfig()
 
-    @testtools.skipIf(CONF.network.neutron_available, "This feature is not" +
+    @testtools.skipIf(CONF.service_available.neutron, "This feature is not" +
                       "implemented by Neutron. See bug: #1194569")
     @attr(type='gate')
     def test_list_fixed_ip_details(self):
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 6db20f9..6d0a5b5 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -60,7 +60,7 @@
         flavor_name = rand_name(self.flavor_name_prefix)
         new_flavor_id = rand_int_id(start=1000)
 
-        #Create the flavor
+        # Create the flavor
         resp, flavor = self.client.create_flavor(flavor_name,
                                                  self.ram, self.vcpus,
                                                  self.disk,
@@ -87,7 +87,7 @@
         if self._interface == "json":
             self.assertEqual(flavor['os-flavor-access:is_public'], True)
 
-        #Verify flavor is retrieved
+        # Verify flavor is retrieved
         resp, flavor = self.client.get_flavor_details(new_flavor_id)
         self.assertEqual(resp.status, 200)
         self.assertEqual(flavor['name'], flavor_name)
@@ -99,7 +99,7 @@
         flavor_name = rand_name(self.flavor_name_prefix)
         new_flavor_id = rand_int_id(start=1000)
 
-        #Create the flavor
+        # Create the flavor
         resp, flavor = self.client.create_flavor(flavor_name,
                                                  self.ram, self.vcpus,
                                                  self.disk,
@@ -109,7 +109,7 @@
                                                  rxtx=self.rxtx)
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         flag = False
-        #Verify flavor is retrieved
+        # Verify flavor is retrieved
         resp, flavors = self.client.list_flavors_with_detail()
         self.assertEqual(resp.status, 200)
         for flavor in flavors:
@@ -153,12 +153,12 @@
 
     @attr(type='gate')
     def test_create_list_flavor_without_extra_data(self):
-        #Create a flavor and ensure it is listed
-        #This operation requires the user to have 'admin' role
+        # Create a flavor and ensure it is listed
+        # This operation requires the user to have 'admin' role
         flavor_name = rand_name(self.flavor_name_prefix)
         new_flavor_id = rand_int_id(start=1000)
 
-        #Create the flavor
+        # Create the flavor
         resp, flavor = self.client.create_flavor(flavor_name,
                                                  self.ram, self.vcpus,
                                                  self.disk,
@@ -181,11 +181,11 @@
         if self._interface == "json":
             self.assertEqual(flavor['os-flavor-access:is_public'], True)
 
-        #Verify flavor is retrieved
+        # Verify flavor is retrieved
         resp, flavor = self.client.get_flavor_details(new_flavor_id)
         self.assertEqual(resp.status, 200)
         self.assertEqual(flavor['name'], flavor_name)
-        #Check if flavor is present in list
+        # Check if flavor is present in list
         resp, flavors = self.client.list_flavors_with_detail()
         self.assertEqual(resp.status, 200)
         for flavor in flavors:
@@ -195,13 +195,13 @@
 
     @attr(type='gate')
     def test_flavor_not_public_verify_entry_not_in_list_details(self):
-        #Create a flavor with os-flavor-access:is_public false should not
-        #be present in list_details.
-        #This operation requires the user to have 'admin' role
+        # Create a flavor with os-flavor-access:is_public false should not
+        # be present in list_details.
+        # This operation requires the user to have 'admin' role
         flavor_name = rand_name(self.flavor_name_prefix)
         new_flavor_id = rand_int_id(start=1000)
 
-        #Create the flavor
+        # Create the flavor
         resp, flavor = self.client.create_flavor(flavor_name,
                                                  self.ram, self.vcpus,
                                                  self.disk,
@@ -209,7 +209,7 @@
                                                  is_public="False")
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         flag = False
-        #Verify flavor is retrieved
+        # Verify flavor is retrieved
         resp, flavors = self.client.list_flavors_with_detail()
         self.assertEqual(resp.status, 200)
         for flavor in flavors:
@@ -219,12 +219,12 @@
 
     @attr(type='gate')
     def test_list_public_flavor_with_other_user(self):
-        #Create a Flavor with public access.
-        #Try to List/Get flavor with another user
+        # Create a Flavor with public access.
+        # Try to List/Get flavor with another user
         flavor_name = rand_name(self.flavor_name_prefix)
         new_flavor_id = rand_int_id(start=1000)
 
-            #Create the flavor
+            # Create the flavor
         resp, flavor = self.client.create_flavor(flavor_name,
                                                  self.ram, self.vcpus,
                                                  self.disk,
@@ -233,7 +233,7 @@
         self.addCleanup(self.flavor_clean_up, flavor['id'])
         flag = False
         self.new_client = self.flavors_client
-        #Verify flavor is retrieved with new user
+        # Verify flavor is retrieved with new user
         resp, flavors = self.new_client.list_flavors_with_detail()
         self.assertEqual(resp.status, 200)
         for flavor in flavors:
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 63d5025..107b23d 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -52,7 +52,7 @@
 
     @attr(type='gate')
     def test_flavor_access_add_remove(self):
-        #Test to add and remove flavor access to a given tenant.
+        # Test to add and remove flavor access to a given tenant.
         flavor_name = rand_name(self.flavor_name_prefix)
         new_flavor_id = rand_int_id(start=1000)
         resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -61,7 +61,7 @@
                                                      new_flavor_id,
                                                      is_public='False')
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
-        #Add flavor access to a tenant.
+        # Add flavor access to a tenant.
         resp_body = {
             "tenant_id": str(self.tenant_id),
             "flavor_id": str(new_flavor['id']),
@@ -71,25 +71,25 @@
         self.assertEqual(add_resp.status, 200)
         self.assertIn(resp_body, add_body)
 
-        #The flavor is present in list.
+        # The flavor is present in list.
         resp, flavors = self.flavors_client.list_flavors_with_detail()
         self.assertEqual(resp.status, 200)
         self.assertIn(new_flavor['id'], map(lambda x: x['id'], flavors))
 
-        #Remove flavor access from a tenant.
+        # Remove flavor access from a tenant.
         remove_resp, remove_body = \
             self.client.remove_flavor_access(new_flavor['id'], self.tenant_id)
         self.assertEqual(remove_resp.status, 200)
         self.assertNotIn(resp_body, remove_body)
 
-        #The flavor is not present in list.
+        # The flavor is not present in list.
         resp, flavors = self.flavors_client.list_flavors_with_detail()
         self.assertEqual(resp.status, 200)
         self.assertNotIn(new_flavor['id'], map(lambda x: x['id'], flavors))
 
     @attr(type=['negative', 'gate'])
     def test_flavor_non_admin_add(self):
-        #Test to add flavor access as a user without admin privileges.
+        # Test to add flavor access as a user without admin privileges.
         flavor_name = rand_name(self.flavor_name_prefix)
         new_flavor_id = rand_int_id(start=1000)
         resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -105,7 +105,7 @@
 
     @attr(type=['negative', 'gate'])
     def test_flavor_non_admin_remove(self):
-        #Test to remove flavor access as a user without admin privileges.
+        # Test to remove flavor access as a user without admin privileges.
         flavor_name = rand_name(self.flavor_name_prefix)
         new_flavor_id = rand_int_id(start=1000)
         resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -114,7 +114,7 @@
                                                      new_flavor_id,
                                                      is_public='False')
         self.addCleanup(self.client.delete_flavor, new_flavor['id'])
-        #Add flavor access to a tenant.
+        # Add flavor access to a tenant.
         self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
         self.addCleanup(self.client.remove_flavor_access,
                         new_flavor['id'], self.tenant_id)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index db376b5..7b79a12 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -47,7 +47,7 @@
         cls.new_flavor_id = 12345
         swap = 1024
         rxtx = 1
-        #Create a flavor so as to set/get/unset extra specs
+        # Create a flavor so as to set/get/unset extra specs
         resp, cls.flavor = cls.client.create_flavor(flavor_name,
                                                     ram, vcpus,
                                                     disk,
@@ -62,28 +62,28 @@
 
     @attr(type='gate')
     def test_flavor_set_get_unset_keys(self):
-        #Test to SET, GET UNSET flavor extra spec as a user
-        #with admin privileges.
-        #Assigning extra specs values that are to be set
+        # Test to SET, GET UNSET flavor extra spec as a user
+        # with admin privileges.
+        # Assigning extra specs values that are to be set
         specs = {"key1": "value1", "key2": "value2"}
-        #SET extra specs to the flavor created in setUp
+        # SET extra specs to the flavor created in setUp
         set_resp, set_body = \
             self.client.set_flavor_extra_spec(self.flavor['id'], specs)
         self.assertEqual(set_resp.status, 200)
         self.assertEqual(set_body, specs)
-        #GET extra specs and verify
+        # GET extra specs and verify
         get_resp, get_body = \
             self.client.get_flavor_extra_spec(self.flavor['id'])
         self.assertEqual(get_resp.status, 200)
         self.assertEqual(get_body, specs)
-        #UNSET extra specs that were set in this test
+        # UNSET extra specs that were set in this test
         unset_resp, _ = \
             self.client.unset_flavor_extra_spec(self.flavor['id'], "key1")
         self.assertEqual(unset_resp.status, 200)
 
     @attr(type=['negative', 'gate'])
     def test_flavor_non_admin_set_keys(self):
-        #Test to SET flavor extra spec as a user without admin privileges.
+        # Test to SET flavor extra spec as a user without admin privileges.
         specs = {"key1": "value1", "key2": "value2"}
         self.assertRaises(exceptions.Unauthorized,
                           self.flavors_client.set_flavor_extra_spec,
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index a47e6c9..849cebb 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -48,7 +48,7 @@
         resp, hosts = self.client.list_hosts(params)
         self.assertEqual(200, resp.status)
         self.assertTrue(len(hosts) >= 1)
-        self.assertTrue(host in hosts)
+        self.assertIn(host, hosts)
 
     @attr(type='negative')
     def test_list_hosts_with_non_existent_zone(self):
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index a6b4e31..d8d162e 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -36,7 +36,7 @@
 
         resp, tenants = cls.identity_admin_client.list_tenants()
 
-        #NOTE(afazekas): these test cases should always create and use a new
+        # NOTE(afazekas): these test cases should always create and use a new
         # tenant most of them should be skipped if we can't do that
         if cls.config.compute.allow_tenant_isolation:
             cls.demo_tenant_id = cls.isolated_creds[0][0]['tenantId']
@@ -44,13 +44,13 @@
             cls.demo_tenant_id = [tnt['id'] for tnt in tenants if tnt['name']
                                   == cls.config.identity.tenant_name][0]
 
-        cls.default_quota_set = {'injected_file_content_bytes': 10240,
-                                 'metadata_items': 128, 'injected_files': 5,
-                                 'ram': 51200, 'floating_ips': 10,
-                                 'fixed_ips': -1, 'key_pairs': 100,
-                                 'injected_file_path_bytes': 255,
-                                 'instances': 10, 'security_group_rules': 20,
-                                 'cores': 20, 'security_groups': 10}
+        cls.default_quota_set = set(('injected_file_content_bytes',
+                                     'metadata_items', 'injected_files',
+                                     'ram', 'floating_ips',
+                                     'fixed_ips', 'key_pairs',
+                                     'injected_file_path_bytes',
+                                     'instances', 'security_group_rules',
+                                     'cores', 'security_groups'))
 
     @classmethod
     def tearDownClass(cls):
@@ -64,12 +64,13 @@
     @attr(type='smoke')
     def test_get_default_quotas(self):
         # Admin can get the default resource quota set for a tenant
-        expected_quota_set = self.default_quota_set.copy()
-        expected_quota_set['id'] = self.demo_tenant_id
+        expected_quota_set = self.default_quota_set | set(['id'])
         resp, quota_set = self.client.get_default_quota_set(
             self.demo_tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(expected_quota_set, quota_set)
+        self.assertEqual(sorted(expected_quota_set),
+                         sorted(quota_set.keys()))
+        self.assertEqual(quota_set['id'], self.demo_tenant_id)
 
     @testtools.skip("Skipped until the Bug #1160749 is resolved")
     @attr(type='gate')
@@ -101,28 +102,27 @@
             self.assertEqual(200, resp.status, "Failed to reset quota "
                              "defaults")
 
-    #TODO(afazekas): merge these test cases
+    # TODO(afazekas): merge these test cases
     @attr(type='gate')
     def test_get_updated_quotas(self):
         # Verify that GET shows the updated quota set
-        self.adm_client.update_quota_set(self.demo_tenant_id,
-                                         ram='5120')
-        self.addCleanup(self.adm_client.update_quota_set,
-                        self.demo_tenant_id, **self.default_quota_set)
-        try:
-            resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
-            self.assertEqual(200, resp.status)
-            self.assertEqual(quota_set['ram'], 5120)
-        except Exception:
-            self.fail("Could not get the update quota limit for resource")
-        finally:
-            # Reset quota resource limits to default values
-            resp, quota_set = self.adm_client.update_quota_set(
-                self.demo_tenant_id,
-                **self.default_quota_set)
-            self.assertEqual(200, resp.status, "Failed to reset quota "
-                             "defaults")
+        tenant_name = rand_name('cpu_quota_tenant_')
+        tenant_desc = tenant_name + '-desc'
+        identity_client = self.os_adm.identity_client
+        _, tenant = identity_client.create_tenant(name=tenant_name,
+                                                  description=tenant_desc)
+        tenant_id = tenant['id']
+        self.addCleanup(identity_client.delete_tenant,
+                        tenant_id)
 
+        self.adm_client.update_quota_set(tenant_id,
+                                         ram='5120')
+        resp, quota_set = self.adm_client.get_quota_set(tenant_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(quota_set['ram'], 5120)
+
+    # TODO(afazekas): Add dedicated tenant to the skiped quota tests
+    # it can be moved into the setUpClass as well
     @testtools.skip("Skipped until the Bug #1160749 is resolved")
     @attr(type='gate')
     def test_create_server_when_cpu_quota_is_full(self):
@@ -155,12 +155,12 @@
                         ram=default_mem_quota)
         self.assertRaises(exceptions.OverLimit, self.create_server)
 
-#TODO(afazekas): Add test that tried to update the quota_set as a regular user
+# TODO(afazekas): Add test that tried to update the quota_set as a regular user
 
     @testtools.skip("Skipped until the Bug #1160749 is resolved")
     @attr(type=['negative', 'gate'])
     def test_create_server_when_instances_quota_is_full(self):
-        #Once instances quota limit is reached, disallow server creation
+        # Once instances quota limit is reached, disallow server creation
         resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
         default_instances_quota = quota_set['instances']
         instances_quota = 0  # Set quota to zero to disallow server creation
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index 78dac21..434ea2f 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -1,6 +1,7 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
 # Copyright 2013 NEC Corporation
+# Copyright 2013 IBM Corp.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -36,17 +37,99 @@
 
     @attr(type='gate')
     def test_list_services(self):
-        # List Compute services
         resp, services = self.client.list_services()
         self.assertEqual(200, resp.status)
-        self.assertTrue(len(services) >= 2)
+        self.assertNotEqual(0, len(services))
 
     @attr(type=['negative', 'gate'])
     def test_list_services_with_non_admin_user(self):
-        # List Compute service with non admin user
         self.assertRaises(exceptions.Unauthorized,
                           self.non_admin_client.list_services)
 
+    @attr(type='gate')
+    def test_get_service_by_service_binary_name(self):
+        binary_name = 'nova-compute'
+        params = {'binary': binary_name}
+        resp, services = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertNotEqual(0, len(services))
+        for service in services:
+            self.assertEqual(binary_name, service['binary'])
+
+    @attr(type='gate')
+    def test_get_service_by_host_name(self):
+        resp, services = self.client.list_services()
+        host_name = services[0]['host']
+        services_on_host = [service for service in services if
+                            service['host'] == host_name]
+        params = {'host': host_name}
+        resp, services = self.client.list_services(params)
+
+        # we could have a periodic job checkin between the 2 service
+        # lookups, so only compare binary lists.
+        s1 = map(lambda x: x['binary'], services)
+        s2 = map(lambda x: x['binary'], services_on_host)
+
+        # sort the lists before comparing, to take out dependency
+        # on order.
+        self.assertEqual(sorted(s1), sorted(s2))
+
+    @attr(type=['negative', 'gate'])
+    def test_get_service_by_invalid_params(self):
+        # return all services if send the request with invalid parameter
+        resp, services = self.client.list_services()
+        params = {'xxx': 'nova-compute'}
+        resp, services_xxx = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(len(services), len(services_xxx))
+
+    @attr(type='gate')
+    def test_get_service_by_service_and_host_name(self):
+        resp, services = self.client.list_services()
+        host_name = services[0]['host']
+        binary_name = services[0]['binary']
+        params = {'host': host_name, 'binary': binary_name}
+        resp, services = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(1, len(services))
+        self.assertEqual(host_name, services[0]['host'])
+        self.assertEqual(binary_name, services[0]['binary'])
+
+    @attr(type=['negative', 'gate'])
+    def test_get_service_by_invalid_service_and_valid_host(self):
+        resp, services = self.client.list_services()
+        host_name = services[0]['host']
+        params = {'host': host_name, 'binary': 'xxx'}
+        resp, services = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(0, len(services))
+
+    @attr(type=['negative', 'gate'])
+    def test_get_service_with_valid_service_and_invalid_host(self):
+        resp, services = self.client.list_services()
+        binary_name = services[0]['binary']
+        params = {'host': 'xxx', 'binary': binary_name}
+        resp, services = self.client.list_services(params)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(0, len(services))
+
+    @attr(type='gate')
+    def test_service_enable_disable(self):
+        resp, services = self.client.list_services()
+        host_name = services[0]['host']
+        binary_name = services[0]['binary']
+
+        resp, service = self.client.disable_service(host_name, binary_name)
+        self.assertEqual(200, resp.status)
+        params = {'host': host_name, 'binary': binary_name}
+        resp, services = self.client.list_services(params)
+        self.assertEqual('disabled', services[0]['status'])
+
+        resp, service = self.client.enable_service(host_name, binary_name)
+        self.assertEqual(200, resp.status)
+        resp, services = self.client.list_services(params)
+        self.assertEqual('enabled', services[0]['status'])
+
 
 class ServicesAdminTestXML(ServicesAdminTestJSON):
     _interface = 'xml'
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index abc5899..d40b0e0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -19,10 +19,9 @@
 
 from tempest.api import compute
 from tempest import clients
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import parse_image_id
 from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
+from tempest.openstack.common import log as logging
 import tempest.test
 
 
@@ -36,6 +35,9 @@
 
     @classmethod
     def setUpClass(cls):
+        if not cls.config.service_available.nova:
+            skip_msg = ("%s skipped as nova is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
         cls.isolated_creds = []
 
         if cls.config.compute.allow_tenant_isolation:
@@ -79,89 +81,6 @@
         cls.servers_client_v3_auth = os.servers_client_v3_auth
 
     @classmethod
-    def _get_identity_admin_client(cls):
-        """
-        Returns an instance of the Identity Admin API client
-        """
-        os = clients.AdminManager(interface=cls._interface)
-        admin_client = os.identity_client
-        return admin_client
-
-    @classmethod
-    def _get_client_args(cls):
-
-        return (
-            cls.config,
-            cls.config.identity.admin_username,
-            cls.config.identity.admin_password,
-            cls.config.identity.uri
-        )
-
-    @classmethod
-    def _get_isolated_creds(cls):
-        """
-        Creates a new set of user/tenant/password credentials for a
-        **regular** user of the Compute API so that a test case can
-        operate in an isolated tenant container.
-        """
-        admin_client = cls._get_identity_admin_client()
-        password = "pass"
-
-        while True:
-            try:
-                rand_name_root = rand_name(cls.__name__)
-                if cls.isolated_creds:
-                # Main user already created. Create the alt one...
-                    rand_name_root += '-alt'
-                tenant_name = rand_name_root + "-tenant"
-                tenant_desc = tenant_name + "-desc"
-
-                resp, tenant = admin_client.create_tenant(
-                    name=tenant_name, description=tenant_desc)
-                break
-            except exceptions.Duplicate:
-                if cls.config.compute.allow_tenant_reuse:
-                    tenant = admin_client.get_tenant_by_name(tenant_name)
-                    LOG.info('Re-using existing tenant %s', tenant)
-                    break
-
-        while True:
-            try:
-                rand_name_root = rand_name(cls.__name__)
-                if cls.isolated_creds:
-                # Main user already created. Create the alt one...
-                    rand_name_root += '-alt'
-                username = rand_name_root + "-user"
-                email = rand_name_root + "@example.com"
-                resp, user = admin_client.create_user(username,
-                                                      password,
-                                                      tenant['id'],
-                                                      email)
-                break
-            except exceptions.Duplicate:
-                if cls.config.compute.allow_tenant_reuse:
-                    user = admin_client.get_user_by_username(tenant['id'],
-                                                             username)
-                    LOG.info('Re-using existing user %s', user)
-                    break
-        # Store the complete creds (including UUID ids...) for later
-        # but return just the username, tenant_name, password tuple
-        # that the various clients will use.
-        cls.isolated_creds.append((user, tenant))
-
-        return username, tenant_name, password
-
-    @classmethod
-    def clear_isolated_creds(cls):
-        if not cls.isolated_creds:
-            return
-        admin_client = cls._get_identity_admin_client()
-
-        for user, tenant in cls.isolated_creds:
-            admin_client.delete_user(user['id'])
-            admin_client.delete_tenant(tenant['id'])
-
-    @classmethod
     def clear_servers(cls):
         for server in cls.servers:
             try:
@@ -189,7 +108,7 @@
     def tearDownClass(cls):
         cls.clear_images()
         cls.clear_servers()
-        cls.clear_isolated_creds()
+        cls._clear_isolated_creds()
 
     @classmethod
     def create_server(cls, **kwargs):
@@ -263,10 +182,16 @@
         admin_username = cls.config.compute_admin.username
         admin_password = cls.config.compute_admin.password
         admin_tenant = cls.config.compute_admin.tenant_name
-
         if not (admin_username and admin_password and admin_tenant):
             msg = ("Missing Compute Admin API credentials "
                    "in configuration.")
             raise cls.skipException(msg)
-
-        cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+        if cls.config.compute.allow_tenant_isolation:
+            creds = cls._get_isolated_creds(admin=True)
+            admin_username, admin_tenant_name, admin_password = creds
+            cls.os_adm = clients.Manager(username=admin_username,
+                                         password=admin_password,
+                                         tenant_name=admin_tenant_name,
+                                         interface=cls._interface)
+        else:
+            cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 27526eb..51ce20c 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -35,14 +35,14 @@
         resp, flavor = self.client.get_flavor_details(self.flavor_ref)
         flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
                              'name': flavor['name']}
-        self.assertTrue(flavor_min_detail in flavors)
+        self.assertIn(flavor_min_detail, flavors)
 
     @attr(type='smoke')
     def test_list_flavors_with_detail(self):
         # Detailed list of all flavors should contain the expected flavor
         resp, flavors = self.client.list_flavors_with_detail()
         resp, flavor = self.client.get_flavor_details(self.flavor_ref)
-        self.assertTrue(flavor in flavors)
+        self.assertIn(flavor, flavors)
 
     @attr(type='smoke')
     def test_get_flavor(self):
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index 0d7f26d..930ebcb 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -32,15 +32,15 @@
         cls.client = cls.floating_ips_client
         cls.servers_client = cls.servers_client
 
-        #Server creation
+        # Server creation
         resp, server = cls.create_server(wait_until='ACTIVE')
         cls.server_id = server['id']
         resp, body = cls.servers_client.get_server(server['id'])
-        #Floating IP creation
+        # Floating IP creation
         resp, body = cls.client.create_floating_ip()
         cls.floating_ip_id = body['id']
         cls.floating_ip = body['ip']
-        #Generating a nonexistent floatingIP id
+        # Generating a nonexistent floatingIP id
         cls.floating_ip_ids = []
         resp, body = cls.client.list_floating_ips()
         for i in range(len(body)):
@@ -52,7 +52,7 @@
 
     @classmethod
     def tearDownClass(cls):
-        #Deleting the floating IP which is created in this method
+        # Deleting the floating IP which is created in this method
         resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
         super(FloatingIPsTestJSON, cls).tearDownClass()
 
@@ -66,17 +66,17 @@
             floating_ip_id_allocated = body['id']
             resp, floating_ip_details = \
                 self.client.get_floating_ip_details(floating_ip_id_allocated)
-            #Checking if the details of allocated IP is in list of floating IP
+            # Checking if the details of allocated IP is in list of floating IP
             resp, body = self.client.list_floating_ips()
-            self.assertTrue(floating_ip_details in body)
+            self.assertIn(floating_ip_details, body)
         finally:
-            #Deleting the floating IP which is created in this method
+            # Deleting the floating IP which is created in this method
             self.client.delete_floating_ip(floating_ip_id_allocated)
 
     @attr(type=['negative', 'gate'])
     def test_allocate_floating_ip_from_nonexistent_pool(self):
         # Positive test:Allocation of a new floating IP from a nonexistent_pool
-        #to a project should fail
+        # to a project should fail
         self.assertRaises(exceptions.NotFound,
                           self.client.create_floating_ip,
                           "non_exist_pool")
@@ -85,12 +85,12 @@
     def test_delete_floating_ip(self):
         # Positive test:Deletion of valid floating IP from project
         # should be successful
-        #Creating the floating IP that is to be deleted in this method
+        # Creating the floating IP that is to be deleted in this method
         resp, floating_ip_body = self.client.create_floating_ip()
-        #Storing the details of floating IP before deleting it
+        # Storing the details of floating IP before deleting it
         cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
         resp, floating_ip_details = cli_resp
-        #Deleting the floating IP from the project
+        # Deleting the floating IP from the project
         resp, body = self.client.delete_floating_ip(floating_ip_body['id'])
         self.assertEqual(202, resp.status)
         # Check it was really deleted.
@@ -101,12 +101,12 @@
         # Positive test:Associate and disassociate the provided floating IP
         # to a specific server should be successful
 
-        #Association of floating IP to fixed IP address
+        # Association of floating IP to fixed IP address
         resp, body = self.client.associate_floating_ip_to_server(
             self.floating_ip,
             self.server_id)
         self.assertEqual(202, resp.status)
-        #Disassociation of floating IP that was associated in this method
+        # Disassociation of floating IP that was associated in this method
         resp, body = self.client.disassociate_floating_ip_from_server(
             self.floating_ip,
             self.server_id)
@@ -142,18 +142,18 @@
     def test_associate_already_associated_floating_ip(self):
         # positive test:Association of an already associated floating IP
         # to specific server should change the association of the Floating IP
-        #Create server so as to use for Multiple association
+        # Create server so as to use for Multiple association
         resp, body = self.servers_client.create_server('floating-server2',
                                                        self.image_ref,
                                                        self.flavor_ref)
         self.servers_client.wait_for_server_status(body['id'], 'ACTIVE')
         self.new_server_id = body['id']
 
-        #Associating floating IP for the first time
+        # Associating floating IP for the first time
         resp, _ = self.client.associate_floating_ip_to_server(
             self.floating_ip,
             self.server_id)
-        #Associating floating IP for the second time
+        # Associating floating IP for the second time
         resp, body = self.client.associate_floating_ip_to_server(
             self.floating_ip,
             self.new_server_id)
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 3e1aa82..e380334 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -51,12 +51,12 @@
         self.assertNotEqual(0, len(floating_ips),
                             "Expected floating IPs. Got zero.")
         for i in range(3):
-            self.assertTrue(self.floating_ip[i] in floating_ips)
+            self.assertIn(self.floating_ip[i], floating_ips)
 
     @attr(type='gate')
     def test_get_floating_ip_details(self):
         # Positive test:Should be able to GET the details of floatingIP
-        #Creating a floating IP for which details are to be checked
+        # Creating a floating IP for which details are to be checked
         try:
             resp, body = self.client.create_floating_ip()
             floating_ip_instance_id = body['instance_id']
@@ -66,14 +66,14 @@
             resp, body = \
                 self.client.get_floating_ip_details(floating_ip_id)
             self.assertEqual(200, resp.status)
-            #Comparing the details of floating IP
+            # Comparing the details of floating IP
             self.assertEqual(floating_ip_instance_id,
                              body['instance_id'])
             self.assertEqual(floating_ip_ip, body['ip'])
             self.assertEqual(floating_ip_fixed_ip,
                              body['fixed_ip'])
             self.assertEqual(floating_ip_id, body['id'])
-        #Deleting the floating IP created in this method
+        # Deleting the floating IP created in this method
         finally:
             self.client.delete_floating_ip(floating_ip_id)
 
@@ -85,7 +85,7 @@
         resp, body = self.client.list_floating_ips()
         for i in range(len(body)):
             floating_ip_id.append(body[i]['id'])
-        #Creating a nonexistant floatingIP id
+        # Creating a nonexistant floatingIP id
         while True:
             non_exist_id = rand_name('999')
             if non_exist_id not in floating_ip_id:
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 7b8e1cc..52239cd 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -27,6 +27,10 @@
     @classmethod
     def setUpClass(cls):
         super(ImagesMetadataTestJSON, cls).setUpClass()
+        if not cls.config.service_available.glance:
+            skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
         cls.servers_client = cls.servers_client
         cls.client = cls.images_client
 
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index a74bb68..4f9364b 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -1,7 +1,6 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
 # Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -30,6 +29,9 @@
     @classmethod
     def setUpClass(cls):
         super(ImagesTestJSON, cls).setUpClass()
+        if not cls.config.service_available.glance:
+            skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
         cls.client = cls.images_client
         cls.servers_client = cls.servers_client
 
@@ -89,24 +91,29 @@
                           '!@#$%^&*()', name, meta)
 
     @attr(type=['negative', 'gate'])
-    def test_create_image_when_server_is_terminating(self):
-        # Return an error when creating image of server that is terminating
+    def test_create_image_from_stopped_server(self):
         resp, server = self.create_server(wait_until='ACTIVE')
-        self.servers_client.delete_server(server['id'])
-
+        self.servers_client.stop(server['id'])
+        self.servers_client.wait_for_server_status(server['id'],
+                                                   'SHUTOFF')
+        self.addCleanup(self.servers_client.delete_server, server['id'])
         snapshot_name = rand_name('test-snap-')
-        self.assertRaises(exceptions.Duplicate, self.client.create_image,
-                          server['id'], snapshot_name)
+        resp, image = self.create_image_from_server(server['id'],
+                                                    name=snapshot_name,
+                                                    wait_until='ACTIVE')
+        self.addCleanup(self.client.delete_image, image['id'])
+        self.assertEqual(snapshot_name, image['name'])
 
-    @attr(type=['negative', 'gate'])
-    def test_create_image_when_server_is_rebooting(self):
-        # Return error when creating an image of server that is rebooting
+    @attr(type='gate')
+    def test_delete_saving_image(self):
+        snapshot_name = rand_name('test-snap-')
         resp, server = self.create_server(wait_until='ACTIVE')
-        self.servers_client.reboot(server['id'], 'HARD')
-
-        snapshot_name = rand_name('test-snap-')
-        self.assertRaises(exceptions.Duplicate, self.client.create_image,
-                          server['id'], snapshot_name)
+        self.addCleanup(self.servers_client.delete_server, server['id'])
+        resp, image = self.create_image_from_server(server['id'],
+                                                    name=snapshot_name,
+                                                    wait_until='SAVING')
+        resp, body = self.client.delete_image(image['id'])
+        self.assertEqual('204', resp['status'])
 
     @attr(type=['negative', 'gate'])
     def test_create_image_specify_uuid_35_characters_or_less(self):
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 7740cfc..64f1854 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -40,6 +40,9 @@
     def setUpClass(cls):
         super(ImagesOneServerTestJSON, cls).setUpClass()
         cls.client = cls.images_client
+        if not cls.config.service_available.glance:
+            skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
 
         try:
             resp, cls.server = cls.create_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 5c6b630..a80f456 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -16,9 +16,9 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import parse_image_id
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 from tempest.test import attr
 
 
@@ -31,6 +31,9 @@
     @classmethod
     def setUpClass(cls):
         super(ListImageFiltersTestJSON, cls).setUpClass()
+        if not cls.config.service_available.glance:
+            skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
         cls.client = cls.images_client
         cls.image_ids = []
 
@@ -141,16 +144,16 @@
         # Verify only the expected number of results are returned
         params = {'limit': '1'}
         resp, images = self.client.list_images(params)
-        #when _interface='xml', one element for images_links in images
-        #ref: Question #224349
+        # when _interface='xml', one element for images_links in images
+        # ref: Question #224349
         self.assertEqual(1, len([x for x in images if 'id' in x]))
 
     @attr(type='gate')
     def test_list_images_filter_by_changes_since(self):
         # Verify only updated images are returned in the detailed list
 
-        #Becoming ACTIVE will modify the updated time
-        #Filter by the image's created time
+        # Becoming ACTIVE will modify the updated time
+        # Filter by the image's created time
         params = {'changes-since': self.image3['created']}
         resp, images = self.client.list_images(params)
         found = any([i for i in images if i['id'] == self.image3_id])
@@ -219,8 +222,8 @@
     def test_list_images_with_detail_filter_by_changes_since(self):
         # Verify an update image is returned
 
-        #Becoming ACTIVE will modify the updated time
-        #Filter by the image's created time
+        # Becoming ACTIVE will modify the updated time
+        # Filter by the image's created time
         params = {'changes-since': self.image1['created']}
         resp, images = self.client.list_images_with_detail(params)
         self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
diff --git a/tempest/api/compute/images/test_list_images.py b/tempest/api/compute/images/test_list_images.py
index fddad14..c7e23b1 100644
--- a/tempest/api/compute/images/test_list_images.py
+++ b/tempest/api/compute/images/test_list_images.py
@@ -25,6 +25,9 @@
     @classmethod
     def setUpClass(cls):
         super(ListImagesTestJSON, cls).setUpClass()
+        if not cls.config.service_available.glance:
+            skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
         cls.client = cls.images_client
 
     @attr(type='smoke')
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 6abca3f..e4e87c0 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -32,33 +32,33 @@
     @attr(type='gate')
     def test_keypairs_create_list_delete(self):
         # Keypairs created should be available in the response list
-        #Create 3 keypairs
+        # Create 3 keypairs
         key_list = list()
         for i in range(3):
             k_name = rand_name('keypair-')
             resp, keypair = self.client.create_keypair(k_name)
-            #Need to pop these keys so that our compare doesn't fail later,
-            #as the keypair dicts from list API doesn't have them.
+            # Need to pop these keys so that our compare doesn't fail later,
+            # as the keypair dicts from list API doesn't have them.
             keypair.pop('private_key')
             keypair.pop('user_id')
             self.assertEqual(200, resp.status)
             key_list.append(keypair)
-        #Fetch all keypairs and verify the list
-        #has all created keypairs
+        # Fetch all keypairs and verify the list
+        # has all created keypairs
         resp, fetched_list = self.client.list_keypairs()
         self.assertEqual(200, resp.status)
-        #We need to remove the extra 'keypair' element in the
-        #returned dict. See comment in keypairs_client.list_keypairs()
+        # We need to remove the extra 'keypair' element in the
+        # returned dict. See comment in keypairs_client.list_keypairs()
         new_list = list()
         for keypair in fetched_list:
             new_list.append(keypair['keypair'])
         fetched_list = new_list
-        #Now check if all the created keypairs are in the fetched list
+        # Now check if all the created keypairs are in the fetched list
         missing_kps = [kp for kp in key_list if kp not in fetched_list]
         self.assertFalse(missing_kps,
                          "Failed to find keypairs %s in fetched list"
                          % ', '.join(m_key['name'] for m_key in missing_kps))
-        #Delete all the keypairs created
+        # Delete all the keypairs created
         for keypair in key_list:
             resp, _ = self.client.delete_keypair(keypair['name'])
             self.assertEqual(202, resp.status)
@@ -87,8 +87,8 @@
         try:
             resp, keypair_detail = self.client.get_keypair(k_name)
             self.assertEqual(200, resp.status)
-            self.assertTrue('name' in keypair_detail)
-            self.assertTrue('public_key' in keypair_detail)
+            self.assertIn('name', keypair_detail)
+            self.assertIn('public_key', keypair_detail)
             self.assertEqual(keypair_detail['name'], k_name,
                              "The created keypair name is not equal "
                              "to requested name")
@@ -163,7 +163,7 @@
         k_name = rand_name('keypair-')
         resp, _ = self.client.create_keypair(k_name)
         self.assertEqual(200, resp.status)
-        #Now try the same keyname to ceate another key
+        # Now try the same keyname to ceate another key
         self.assertRaises(exceptions.Duplicate, self.client.create_keypair,
                           k_name)
         resp, _ = self.client.delete_keypair(k_name)
diff --git a/tempest/api/compute/limits/test_absolute_limits.py b/tempest/api/compute/limits/test_absolute_limits.py
index beae122..972e4a8 100644
--- a/tempest/api/compute/limits/test_absolute_limits.py
+++ b/tempest/api/compute/limits/test_absolute_limits.py
@@ -51,11 +51,11 @@
 
     @attr(type=['negative', 'gate'])
     def test_max_image_meta_exceed_limit(self):
-        #We should not create vm with image meta over maxImageMeta limit
+        # We should not create vm with image meta over maxImageMeta limit
         # Get max limit value
         max_meta = self.client.get_specific_absolute_limit('maxImageMeta')
 
-        #Create server should fail, since we are passing > metadata Limit!
+        # Create server should fail, since we are passing > metadata Limit!
         max_meta_data = int(max_meta) + 1
 
         meta_data = {}
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 6a32b64..8472561 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -33,14 +33,14 @@
     def test_security_group_rules_create(self):
         # Positive test: Creation of Security Group rule
         # should be successfull
-        #Creating a Security Group to add rules to it
+        # Creating a Security Group to add rules to it
         s_name = rand_name('securitygroup-')
         s_description = rand_name('description-')
         resp, securitygroup = \
             self.client.create_security_group(s_name, s_description)
         securitygroup_id = securitygroup['id']
         self.addCleanup(self.client.delete_security_group, securitygroup_id)
-        #Adding rules to the created Security Group
+        # Adding rules to the created Security Group
         ip_protocol = 'tcp'
         from_port = 22
         to_port = 22
@@ -60,21 +60,21 @@
 
         secgroup1 = None
         secgroup2 = None
-        #Creating a Security Group to add rules to it
+        # Creating a Security Group to add rules to it
         s_name = rand_name('securitygroup-')
         s_description = rand_name('description-')
         resp, securitygroup = \
             self.client.create_security_group(s_name, s_description)
         secgroup1 = securitygroup['id']
         self.addCleanup(self.client.delete_security_group, secgroup1)
-        #Creating a Security Group so as to assign group_id to the rule
+        # Creating a Security Group so as to assign group_id to the rule
         s_name2 = rand_name('securitygroup-')
         s_description2 = rand_name('description-')
         resp, securitygroup = \
             self.client.create_security_group(s_name2, s_description2)
         secgroup2 = securitygroup['id']
         self.addCleanup(self.client.delete_security_group, secgroup2)
-        #Adding rules to the created Security Group with optional arguments
+        # Adding rules to the created Security Group with optional arguments
         parent_group_id = secgroup1
         ip_protocol = 'tcp'
         from_port = 22
@@ -108,12 +108,12 @@
     def test_security_group_rules_create_with_invalid_ip_protocol(self):
         # Negative test: Creation of Security Group rule should FAIL
         # with invalid ip_protocol
-        #Creating a Security Group to add rule to it
+        # Creating a Security Group to add rule to it
         s_name = rand_name('securitygroup-')
         s_description = rand_name('description-')
         resp, securitygroup = self.client.create_security_group(s_name,
                                                                 s_description)
-        #Adding rules to the created Security Group
+        # Adding rules to the created Security Group
         parent_group_id = securitygroup['id']
         ip_protocol = rand_name('999')
         from_port = 22
@@ -128,12 +128,12 @@
     def test_security_group_rules_create_with_invalid_from_port(self):
         # Negative test: Creation of Security Group rule should FAIL
         # with invalid from_port
-        #Creating a Security Group to add rule to it
+        # Creating a Security Group to add rule to it
         s_name = rand_name('securitygroup-')
         s_description = rand_name('description-')
         resp, securitygroup = self.client.create_security_group(s_name,
                                                                 s_description)
-        #Adding rules to the created Security Group
+        # Adding rules to the created Security Group
         parent_group_id = securitygroup['id']
         ip_protocol = 'tcp'
         from_port = rand_name('999')
@@ -147,12 +147,12 @@
     def test_security_group_rules_create_with_invalid_to_port(self):
         # Negative test: Creation of Security Group rule should FAIL
         # with invalid from_port
-        #Creating a Security Group to add rule to it
+        # Creating a Security Group to add rule to it
         s_name = rand_name('securitygroup-')
         s_description = rand_name('description-')
         resp, securitygroup = self.client.create_security_group(s_name,
                                                                 s_description)
-        #Adding rules to the created Security Group
+        # Adding rules to the created Security Group
         parent_group_id = securitygroup['id']
         ip_protocol = 'tcp'
         from_port = 22
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index e105121..697a839 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -39,7 +39,7 @@
     @attr(type='gate')
     def test_security_groups_create_list_delete(self):
         # Positive test:Should return the list of Security Groups
-        #Create 3 Security Groups
+        # Create 3 Security Groups
         security_group_list = list()
         for i in range(3):
             s_name = rand_name('securitygroup-')
@@ -50,11 +50,11 @@
             self.addCleanup(self._delete_security_group,
                             securitygroup['id'])
             security_group_list.append(securitygroup)
-        #Fetch all Security Groups and verify the list
-        #has all created Security Groups
+        # Fetch all Security Groups and verify the list
+        # has all created Security Groups
         resp, fetched_list = self.client.list_security_groups()
         self.assertEqual(200, resp.status)
-        #Now check if all the created Security Groups are in fetched list
+        # Now check if all the created Security Groups are in fetched list
         missing_sgs = \
             [sg for sg in security_group_list if sg not in fetched_list]
         self.assertFalse(missing_sgs,
@@ -62,8 +62,8 @@
                          "list" % ', '.join(m_group['name']
                                             for m_group in missing_sgs))
 
-    #TODO(afazekas): scheduled for delete,
-    #test_security_group_create_get_delete covers it
+    # TODO(afazekas): scheduled for delete,
+    # test_security_group_create_get_delete covers it
     @attr(type='gate')
     def test_security_group_create_delete(self):
         # Security Group should be created, verified and deleted
@@ -71,13 +71,13 @@
         s_description = rand_name('description-')
         resp, securitygroup = \
             self.client.create_security_group(s_name, s_description)
-        self.assertTrue('id' in securitygroup)
+        self.assertIn('id', securitygroup)
         securitygroup_id = securitygroup['id']
         self.addCleanup(self._delete_security_group,
                         securitygroup_id)
         self.assertEqual(200, resp.status)
         self.assertFalse(securitygroup_id is None)
-        self.assertTrue('name' in securitygroup)
+        self.assertIn('name', securitygroup)
         securitygroup_name = securitygroup['name']
         self.assertEqual(securitygroup_name, s_name,
                          "The created Security Group name is "
@@ -94,12 +94,12 @@
                         securitygroup['id'])
 
         self.assertEqual(200, resp.status)
-        self.assertTrue('name' in securitygroup)
+        self.assertIn('name', securitygroup)
         securitygroup_name = securitygroup['name']
         self.assertEqual(securitygroup_name, s_name,
                          "The created Security Group name is "
                          "not equal to the requested name")
-        #Now fetch the created Security Group by its 'id'
+        # Now fetch the created Security Group by its 'id'
         resp, fetched_group = \
             self.client.get_security_group(securitygroup['id'])
         self.assertEqual(200, resp.status)
@@ -115,7 +115,7 @@
         resp, body = self.client.list_security_groups()
         for i in range(len(body)):
             security_group_id.append(body[i]['id'])
-        #Creating a nonexistant Security Group id
+        # Creating a nonexistant Security Group id
         while True:
             non_exist_id = rand_name('999')
             if non_exist_id not in security_group_id:
@@ -158,7 +158,7 @@
                           self.client.create_security_group, s_name,
                           s_description)
 
-    @testtools.skipIf(config.TempestConfig().network.neutron_available,
+    @testtools.skipIf(config.TempestConfig().service_available.neutron,
                       "Neutron allows duplicate names for security groups")
     @attr(type=['negative', 'gate'])
     def test_security_group_create_with_duplicate_name(self):
@@ -186,7 +186,7 @@
             if body[i]['name'] == 'default':
                 default_security_group_id = body[i]['id']
                 break
-        #Deleting the "default" Security Group
+        # Deleting the "default" Security Group
         self.assertRaises(exceptions.BadRequest,
                           self.client.delete_security_group,
                           default_security_group_id)
@@ -198,7 +198,7 @@
         resp, body = self.client.list_security_groups()
         for i in range(len(body)):
             security_group_id.append(body[i]['id'])
-        #Creating Non Existant Security Group
+        # Creating Non Existant Security Group
         while True:
             non_exist_id = rand_name('999')
             if non_exist_id not in security_group_id:
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index de095c5..9f66a6c 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -24,7 +24,7 @@
 
     @classmethod
     def setUpClass(cls):
-        if not cls.config.network.neutron_available:
+        if not cls.config.service_available.neutron:
             raise cls.skipException("Neutron is required")
         super(AttachInterfacesTestJSON, cls).setUpClass()
         cls.client = cls.os.interfaces_client
diff --git a/tempest/api/compute/servers/test_disk_config.py b/tempest/api/compute/servers/test_disk_config.py
index e9385b5..efb01af 100644
--- a/tempest/api/compute/servers/test_disk_config.py
+++ b/tempest/api/compute/servers/test_disk_config.py
@@ -39,7 +39,7 @@
         resp, server = self.create_server(disk_config='AUTO',
                                           wait_until='ACTIVE')
 
-        #Verify the specified attributes are set correctly
+        # Verify the specified attributes are set correctly
         resp, server = self.client.get_server(server['id'])
         self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
 
@@ -47,14 +47,14 @@
                                            self.image_ref_alt,
                                            disk_config='MANUAL')
 
-        #Wait for the server to become active
+        # Wait for the server to become active
         self.client.wait_for_server_status(server['id'], 'ACTIVE')
 
-        #Verify the specified attributes are set correctly
+        # Verify the specified attributes are set correctly
         resp, server = self.client.get_server(server['id'])
         self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
 
-        #Delete the server
+        # Delete the server
         resp, body = self.client.delete_server(server['id'])
 
     @attr(type='gate')
@@ -63,7 +63,7 @@
         resp, server = self.create_server(disk_config='MANUAL',
                                           wait_until='ACTIVE')
 
-        #Verify the specified attributes are set correctly
+        # Verify the specified attributes are set correctly
         resp, server = self.client.get_server(server['id'])
         self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
 
@@ -71,14 +71,14 @@
                                            self.image_ref_alt,
                                            disk_config='AUTO')
 
-        #Wait for the server to become active
+        # Wait for the server to become active
         self.client.wait_for_server_status(server['id'], 'ACTIVE')
 
-        #Verify the specified attributes are set correctly
+        # Verify the specified attributes are set correctly
         resp, server = self.client.get_server(server['id'])
         self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
 
-        #Delete the server
+        # Delete the server
         resp, body = self.client.delete_server(server['id'])
 
     @testtools.skipUnless(compute.RESIZE_AVAILABLE, 'Resize not available.')
@@ -88,7 +88,7 @@
         resp, server = self.create_server(disk_config='MANUAL',
                                           wait_until='ACTIVE')
 
-        #Resize with auto option
+        # Resize with auto option
         self.client.resize(server['id'], self.flavor_ref_alt,
                            disk_config='AUTO')
         self.client.wait_for_server_status(server['id'], 'VERIFY_RESIZE')
@@ -98,7 +98,7 @@
         resp, server = self.client.get_server(server['id'])
         self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
 
-        #Delete the server
+        # Delete the server
         resp, body = self.client.delete_server(server['id'])
 
     @testtools.skipUnless(compute.RESIZE_AVAILABLE, 'Resize not available.')
@@ -108,7 +108,7 @@
         resp, server = self.create_server(disk_config='AUTO',
                                           wait_until='ACTIVE')
 
-        #Resize with manual option
+        # Resize with manual option
         self.client.resize(server['id'], self.flavor_ref_alt,
                            disk_config='MANUAL')
         self.client.wait_for_server_status(server['id'], 'VERIFY_RESIZE')
@@ -118,7 +118,7 @@
         resp, server = self.client.get_server(server['id'])
         self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
 
-        #Delete the server
+        # Delete the server
         resp, body = self.client.delete_server(server['id'])
 
 
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 31b44f7..b8f965c 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -133,7 +133,7 @@
         # Verify only the expected number of servers are returned
         params = {'limit': 1}
         resp, servers = self.client.list_servers(params)
-        #when _interface='xml', one element for servers_links in servers
+        # when _interface='xml', one element for servers_links in servers
         self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
 
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index db9bdc1..bad4a11 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -59,8 +59,9 @@
         if num_servers > 0:
             username = cls.os.username
             tenant_name = cls.os.tenant_name
-            msg = ("User/tenant %(username)s/%(tenant_name)s already have "
-                   "existing server instances. Skipping test.") % locals()
+            msg = ("User/tenant %(u)s/%(t)s already have "
+                   "existing server instances. Skipping test." %
+                   {'u': username, 't': tenant_name})
             raise cls.skipException(msg)
 
         resp, body = cls.alt_client.list_servers()
@@ -69,8 +70,9 @@
         if num_servers > 0:
             username = cls.alt_manager.username
             tenant_name = cls.alt_manager.tenant_name
-            msg = ("Alt User/tenant %(username)s/%(tenant_name)s already have "
-                   "existing server instances. Skipping test.") % locals()
+            msg = ("Alt User/tenant %(u)s/%(t)s already have "
+                   "existing server instances. Skipping test." %
+                   {'u': username, 't': tenant_name})
             raise cls.skipException(msg)
 
         # The following servers are created for use
@@ -93,7 +95,7 @@
                                                ignore_error=True)
         cls.deleted_fixtures.append(srv)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_with_a_deleted_server(self):
         # Verify deleted servers do not show by default in list servers
         # List servers and verify server not returned
@@ -105,7 +107,7 @@
         self.assertEqual('200', resp['status'])
         self.assertEqual([], actual)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_by_non_existing_image(self):
         # Listing servers for a non existing image returns empty list
         non_existing_image = '1234abcd-zzz0-aaa9-ppp3-0987654abcde'
@@ -114,7 +116,7 @@
         self.assertEqual('200', resp['status'])
         self.assertEqual([], servers)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_by_non_existing_flavor(self):
         # Listing servers by non existing flavor returns empty list
         non_existing_flavor = 1234
@@ -123,7 +125,7 @@
         self.assertEqual('200', resp['status'])
         self.assertEqual([], servers)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_by_non_existing_server_name(self):
         # Listing servers for a non existent server name returns empty list
         non_existing_name = 'junk_server_1234'
@@ -132,7 +134,7 @@
         self.assertEqual('200', resp['status'])
         self.assertEqual([], servers)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_status_non_existing(self):
         # Return an empty list when invalid status is specified
         non_existing_status = 'BALONEY'
@@ -146,23 +148,23 @@
         # List servers by specifying limits
         resp, body = self.client.list_servers({'limit': 1})
         self.assertEqual('200', resp['status'])
-        #when _interface='xml', one element for servers_links in servers
+        # when _interface='xml', one element for servers_links in servers
         self.assertEqual(1, len([x for x in body['servers'] if 'id' in x]))
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_by_limits_greater_than_actual_count(self):
         # List servers by specifying a greater value for limit
         resp, body = self.client.list_servers({'limit': 100})
         self.assertEqual('200', resp['status'])
         self.assertEqual(len(self.existing_fixtures), len(body['servers']))
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_by_limits_pass_string(self):
         # Return an error if a string value is passed for limit
         self.assertRaises(exceptions.BadRequest, self.client.list_servers,
                           {'limit': 'testing'})
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_by_limits_pass_negative_value(self):
         # Return an error if a negative value for limit is passed
         self.assertRaises(exceptions.BadRequest, self.client.list_servers,
@@ -180,13 +182,13 @@
                         len(self.deleted_fixtures))
         self.assertEqual(num_expected, len(body['servers']))
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_by_changes_since_invalid_date(self):
         # Return an error when invalid date format is passed
         self.assertRaises(exceptions.BadRequest, self.client.list_servers,
                           {'changes-since': '2011/01/01'})
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_by_changes_since_future_date(self):
         # Return an empty list when a date in the future is passed
         changes_since = {'changes-since': '2051-01-01T12:34:00Z'}
@@ -194,7 +196,7 @@
         self.assertEqual('200', resp['status'])
         self.assertEqual(0, len(body['servers']))
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_list_servers_detail_server_is_deleted(self):
         # Server details are not listed for a deleted server
         deleted_ids = [s['id'] for s in self.deleted_fixtures]
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index 9fde618..edfafec 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -47,7 +47,7 @@
         # reservation_id is not in the response body when the request send
         # contains return_reservation_id=False
         self.assertEqual('202', resp['status'])
-        self.assertFalse('reservation_id' in body)
+        self.assertNotIn('reservation_id', body)
 
     @attr(type=['negative', 'gate'])
     def test_min_count_less_than_one(self):
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 8b76f7f..66d8264 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -35,7 +35,7 @@
     run_ssh = tempest.config.TempestConfig().compute.run_ssh
 
     def setUp(self):
-        #NOTE(afazekas): Normally we use the same server with all test cases,
+        # NOTE(afazekas): Normally we use the same server with all test cases,
         # but if it has an issue, we build a new one
         super(ServerActionsTestJSON, self).setUp()
         # Check if the server is in a clean state after test
@@ -121,13 +121,13 @@
                                                    personality=personality,
                                                    adminPass=password)
 
-        #Verify the properties in the initial response are correct
+        # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
         rebuilt_image_id = rebuilt_server['image']['id']
         self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
         self.assertEqual(self.flavor_ref, int(rebuilt_server['flavor']['id']))
 
-        #Verify the server properties after the rebuild completes
+        # Verify the server properties after the rebuild completes
         self.client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE')
         resp, server = self.client.get_server(rebuilt_server['id'])
         rebuilt_image_id = rebuilt_server['image']['id']
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 442d30c..45de0d6 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -47,7 +47,7 @@
         # All metadata key/value pairs for a server should be returned
         resp, resp_metadata = self.client.list_server_metadata(self.server_id)
 
-        #Verify the expected metadata items are in the list
+        # Verify the expected metadata items are in the list
         self.assertEqual(200, resp.status)
         expected = {'key1': 'value1', 'key2': 'value2'}
         self.assertEqual(expected, resp_metadata)
@@ -55,14 +55,14 @@
     @attr(type='gate')
     def test_set_server_metadata(self):
         # The server's metadata should be replaced with the provided values
-        #Create a new set of metadata for the server
+        # Create a new set of metadata for the server
         req_metadata = {'meta2': 'data2', 'meta3': 'data3'}
         resp, metadata = self.client.set_server_metadata(self.server_id,
                                                          req_metadata)
         self.assertEqual(200, resp.status)
 
-        #Verify the expected values are correct, and that the
-        #previous values have been removed
+        # Verify the expected values are correct, and that the
+        # previous values have been removed
         resp, resp_metadata = self.client.list_server_metadata(self.server_id)
         self.assertEqual(resp_metadata, req_metadata)
 
@@ -98,7 +98,7 @@
                                                             meta)
         self.assertEqual(200, resp.status)
 
-        #Verify the values have been updated to the proper values
+        # Verify the values have been updated to the proper values
         resp, resp_metadata = self.client.list_server_metadata(self.server_id)
         expected = {'key1': 'alt1', 'key2': 'value2', 'key3': 'value3'}
         self.assertEqual(expected, resp_metadata)
@@ -123,13 +123,13 @@
     @attr(type='gate')
     def test_set_server_metadata_item(self):
         # The item's value should be updated to the provided value
-        #Update the metadata value
+        # Update the metadata value
         meta = {'nova': 'alt'}
         resp, body = self.client.set_server_metadata_item(self.server_id,
                                                           'nova', meta)
         self.assertEqual(200, resp.status)
 
-        #Verify the meta item's value has been updated
+        # Verify the meta item's value has been updated
         resp, resp_metadata = self.client.list_server_metadata(self.server_id)
         expected = {'key1': 'value1', 'key2': 'value2', 'nova': 'alt'}
         self.assertEqual(expected, resp_metadata)
@@ -141,7 +141,7 @@
                                                              'key1')
         self.assertEqual(204, resp.status)
 
-        #Verify the metadata item has been removed
+        # Verify the metadata item has been removed
         resp, resp_metadata = self.client.list_server_metadata(self.server_id)
         expected = {'key2': 'value2'}
         self.assertEqual(expected, resp_metadata)
@@ -197,7 +197,7 @@
         # Negative test: Should not be able to delete metadata item from a
         #  nonexistant server
 
-        #Delete the metadata item
+        # Delete the metadata item
         self.assertRaises(exceptions.NotFound,
                           self.client.delete_server_metadata_item, 999, 'd')
 
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 8225a4c..82559d5 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -32,12 +32,12 @@
         super(ServerRescueTestJSON, cls).setUpClass()
         cls.device = 'vdf'
 
-        #Floating IP creation
+        # Floating IP creation
         resp, body = cls.floating_ips_client.create_floating_ip()
         cls.floating_ip_id = str(body['id']).strip()
         cls.floating_ip = str(body['ip']).strip()
 
-        #Security group creation
+        # Security group creation
         cls.sg_name = rand_name('sg')
         cls.sg_desc = rand_name('sg-desc')
         resp, cls.sg = \
@@ -85,7 +85,7 @@
 
     @classmethod
     def tearDownClass(cls):
-        #Deleting the floating IP which is created in this method
+        # Deleting the floating IP which is created in this method
         cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
         client = cls.volumes_extensions_client
         client.delete_volume(str(cls.volume_to_attach['id']).strip())
@@ -110,6 +110,11 @@
         self.assertEqual(202, resp.status)
         self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
 
+    def _unpause(self, server_id):
+        resp, body = self.servers_client.unpause_server(server_id)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
     @attr(type='smoke')
     def test_rescue_unrescue_instance(self):
         resp, body = self.servers_client.rescue_server(
@@ -121,11 +126,30 @@
         self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
 
     @attr(type=['negative', 'gate'])
+    def test_rescue_paused_instance(self):
+        # Rescue a paused server
+        resp, body = self.servers_client.pause_server(
+            self.server_id)
+        self.addCleanup(self._unpause, self.server_id)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(self.server_id, 'PAUSED')
+        self.assertRaises(exceptions.Duplicate,
+                          self.servers_client.rescue_server,
+                          self.server_id)
+
+    @attr(type=['negative', 'gate'])
     def test_rescued_vm_reboot(self):
         self.assertRaises(exceptions.Duplicate, self.servers_client.reboot,
                           self.rescue_id, 'HARD')
 
     @attr(type=['negative', 'gate'])
+    def test_rescue_non_existent_server(self):
+        # Rescue a non-existing server
+        self.assertRaises(exceptions.NotFound,
+                          self.servers_client.rescue_server,
+                          '999erra43')
+
+    @attr(type=['negative', 'gate'])
     def test_rescued_vm_rebuild(self):
         self.assertRaises(exceptions.Duplicate,
                           self.servers_client.rebuild,
@@ -158,7 +182,7 @@
         # Rescue the server
         self.servers_client.rescue_server(self.server_id, self.password)
         self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
-        #addCleanup is a LIFO queue
+        # addCleanup is a LIFO queue
         self.addCleanup(self._detach, self.server_id,
                         self.volume_to_detach['id'])
         self.addCleanup(self._unrescue, self.server_id)
@@ -177,13 +201,13 @@
         self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
         self.addCleanup(self._unrescue, self.server_id)
 
-        #Association of floating IP to a rescued vm
+        # Association of floating IP to a rescued vm
         client = self.floating_ips_client
         resp, body = client.associate_floating_ip_to_server(self.floating_ip,
                                                             self.server_id)
         self.assertEqual(202, resp.status)
 
-        #Disassociation of floating IP that was associated in this method
+        # Disassociation of floating IP that was associated in this method
         resp, body = \
             client.disassociate_floating_ip_from_server(self.floating_ip,
                                                         self.server_id)
@@ -196,12 +220,12 @@
             self.server_id, self.password)
         self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
 
-        #Add Security group
+        # Add Security group
         resp, body = self.servers_client.add_security_group(self.server_id,
                                                             self.sg_name)
         self.assertEqual(202, resp.status)
 
-        #Delete Security group
+        # Delete Security group
         resp, body = self.servers_client.remove_security_group(self.server_id,
                                                                self.sg_name)
         self.assertEqual(202, resp.status)
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 5cc8dc6..703f143 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -99,6 +99,17 @@
                           self.server_id, 'SOFT')
 
     @attr(type=['negative', 'gate'])
+    def test_pause_paused_server(self):
+        # Pause a paused server.
+        resp, server = self.create_server(wait_until='ACTIVE')
+        self.server_id = server['id']
+        self.client.pause_server(self.server_id)
+        self.client.wait_for_server_status(self.server_id, 'PAUSED')
+        self.assertRaises(exceptions.Duplicate,
+                          self.client.pause_server,
+                          self.server_id)
+
+    @attr(type=['negative', 'gate'])
     def test_rebuild_deleted_server(self):
         # Rebuild a deleted server
 
@@ -210,13 +221,10 @@
     @attr(type=['negative', 'gate'])
     def test_delete_a_server_of_another_tenant(self):
         # Delete a server that belongs to another tenant
-        try:
-            resp, server = self.create_server(wait_until='ACTIVE')
-            self.assertRaises(exceptions.NotFound,
-                              self.alt_client.delete_server,
-                              server['id'])
-        finally:
-            self.client.delete_server(server['id'])
+        resp, server = self.create_server(wait_until='ACTIVE')
+        self.assertRaises(exceptions.NotFound,
+                          self.alt_client.delete_server,
+                          server['id'])
 
     @attr(type=['negative', 'gate'])
     def test_delete_server_pass_negative_id(self):
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 35f0fc0..2a5be8c 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -37,7 +37,7 @@
         resp, server = cls.create_server(wait_until='ACTIVE')
         cls.server_id = server['id']
 
-    @testtools.skipIf(CONF.network.neutron_available, "This feature is not " +
+    @testtools.skipIf(CONF.service_available.neutron, "This feature is not " +
                       "implemented by Neutron. See bug: #1183436")
     @attr(type='gate')
     def test_list_virtual_interfaces(self):
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 1a65a20..55dba97 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -126,7 +126,7 @@
     def test_list_servers_with_alternate_tenant(self):
         # A list on servers from one tenant should not
         # show on alternate tenant
-        #Listing servers from alternate tenant
+        # Listing servers from alternate tenant
         alt_server_ids = []
         resp, body = self.alt_client.list_servers()
         alt_server_ids = [s['id'] for s in body['servers']]
@@ -188,7 +188,7 @@
     def test_create_keypair_in_analt_user_tenant(self):
         # A create keypair request should fail if the tenant id does not match
         # the current user
-        #POST keypair with other user tenant
+        # POST keypair with other user tenant
         k_name = rand_name('keypair-')
         self.alt_keypairs_client._set_auth()
         self.saved_base_url = self.alt_keypairs_client.base_url
@@ -238,7 +238,7 @@
     def test_create_security_group_in_analt_user_tenant(self):
         # A create security group request should fail if the tenant id does not
         # match the current user
-        #POST security group with other user tenant
+        # POST security group with other user tenant
         s_name = rand_name('security-')
         s_description = rand_name('security')
         self.saved_base_url = self.alt_security_client.base_url
@@ -276,7 +276,7 @@
     def test_create_security_group_rule_in_analt_user_tenant(self):
         # A create security group rule request should fail if the tenant id
         # does not match the current user
-        #POST security group rule with other user tenant
+        # POST security group rule with other user tenant
         parent_group_id = self.security_group['id']
         ip_protocol = 'icmp'
         from_port = -1
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 291c8e4..4359c49 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -27,7 +27,7 @@
     def test_list_extensions(self):
         # List of all extensions
         resp, extensions = self.extensions_client.list_extensions()
-        self.assertTrue("extensions" in extensions)
+        self.assertIn("extensions", extensions)
         self.assertEqual(200, resp.status)
 
 
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 1a8a40b..8014fca 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -30,31 +30,33 @@
         resp, tenants = cls.admin_client.list_tenants()
         cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
                          cls.client.tenant_name][0]
-        cls.default_quota_set = {'injected_file_content_bytes': 10240,
-                                 'metadata_items': 128, 'injected_files': 5,
-                                 'ram': 51200, 'floating_ips': 10,
-                                 'fixed_ips': -1, 'key_pairs': 100,
-                                 'injected_file_path_bytes': 255,
-                                 'instances': 10, 'security_group_rules': 20,
-                                 'cores': 20, 'security_groups': 10}
+        cls.default_quota_set = set(('injected_file_content_bytes',
+                                     'metadata_items', 'injected_files',
+                                     'ram', 'floating_ips',
+                                     'fixed_ips', 'key_pairs',
+                                     'injected_file_path_bytes',
+                                     'instances', 'security_group_rules',
+                                     'cores', 'security_groups'))
 
     @attr(type='smoke')
     def test_get_quotas(self):
         # User can get the quota set for it's tenant
-        expected_quota_set = self.default_quota_set.copy()
-        expected_quota_set['id'] = self.tenant_id
+        expected_quota_set = self.default_quota_set | set(['id'])
         resp, quota_set = self.client.get_quota_set(self.tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(expected_quota_set, quota_set)
+        self.assertEqual(sorted(expected_quota_set),
+                         sorted(quota_set.keys()))
+        self.assertEqual(quota_set['id'], self.tenant_id)
 
     @attr(type='smoke')
     def test_get_default_quotas(self):
         # User can get the default quota set for it's tenant
-        expected_quota_set = self.default_quota_set.copy()
-        expected_quota_set['id'] = self.tenant_id
+        expected_quota_set = self.default_quota_set | set(['id'])
         resp, quota_set = self.client.get_default_quota_set(self.tenant_id)
         self.assertEqual(200, resp.status)
-        self.assertEqual(expected_quota_set, quota_set)
+        self.assertEqual(sorted(expected_quota_set),
+                         sorted(quota_set.keys()))
+        self.assertEqual(quota_set['id'], self.tenant_id)
 
 
 class QuotasTestXML(QuotasTestJSON):
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index b507e03..e756870 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -37,6 +37,9 @@
     def setUpClass(cls):
         super(AttachVolumeTestJSON, cls).setUpClass()
         cls.device = 'vdb'
+        if not cls.config.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
 
     def _detach(self, server_id, volume_id):
         self.servers_client.detach_volume(server_id, volume_id)
@@ -88,7 +91,7 @@
             linux_client = RemoteClient(server,
                                         self.ssh_user, server['adminPass'])
             partitions = linux_client.get_partitions()
-            self.assertTrue(self.device in partitions)
+            self.assertIn(self.device, partitions)
 
             self._detach(server['id'], volume['id'])
             self.attached = False
@@ -102,7 +105,7 @@
             linux_client = RemoteClient(server,
                                         self.ssh_user, server['adminPass'])
             partitions = linux_client.get_partitions()
-            self.assertFalse(self.device in partitions)
+            self.assertNotIn(self.device, partitions)
         except Exception:
             self.fail("The test_attach_detach_volume is faild!")
         finally:
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 1acc57d..4f0f17e 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -28,6 +28,9 @@
     def setUpClass(cls):
         super(VolumesGetTestJSON, cls).setUpClass()
         cls.client = cls.volumes_extensions_client
+        if not cls.config.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
 
     @attr(type='smoke')
     def test_volume_create_get_delete(self):
@@ -35,25 +38,25 @@
         volume = None
         v_name = rand_name('Volume-%s-') % self._interface
         metadata = {'Type': 'work'}
-        #Create volume
+        # Create volume
         resp, volume = self.client.create_volume(size=1,
                                                  display_name=v_name,
                                                  metadata=metadata)
         self.addCleanup(self._delete_volume, volume)
         self.assertEqual(200, resp.status)
-        self.assertTrue('id' in volume)
-        self.assertTrue('displayName' in volume)
+        self.assertIn('id', volume)
+        self.assertIn('displayName', volume)
         self.assertEqual(volume['displayName'], v_name,
                          "The created volume name is not equal "
                          "to the requested name")
         self.assertTrue(volume['id'] is not None,
                         "Field volume id is empty or not found.")
-        #Wait for Volume status to become ACTIVE
+        # Wait for Volume status to become ACTIVE
         self.client.wait_for_volume_status(volume['id'], 'available')
-        #GET Volume
+        # GET Volume
         resp, fetched_volume = self.client.get_volume(volume['id'])
         self.assertEqual(200, resp.status)
-        #Verfication of details of fetched Volume
+        # Verfication of details of fetched Volume
         self.assertEqual(v_name,
                          fetched_volume['displayName'],
                          'The fetched Volume is different '
@@ -71,27 +74,27 @@
     def test_volume_get_metadata_none(self):
         # CREATE, GET empty metadata dict
         v_name = rand_name('Volume-')
-        #Create volume
+        # Create volume
         resp, volume = self.client.create_volume(size=1,
                                                  display_name=v_name,
                                                  metadata={})
         self.addCleanup(self._delete_volume, volume)
         self.assertEqual(200, resp.status)
-        self.assertTrue('id' in volume)
-        self.assertTrue('displayName' in volume)
-        #Wait for Volume status to become ACTIVE
+        self.assertIn('id', volume)
+        self.assertIn('displayName', volume)
+        # Wait for Volume status to become ACTIVE
         self.client.wait_for_volume_status(volume['id'], 'available')
-        #GET Volume
+        # GET Volume
         resp, fetched_volume = self.client.get_volume(volume['id'])
         self.assertEqual(200, resp.status)
         self.assertEqual(fetched_volume['metadata'], {})
 
     def _delete_volume(self, volume):
-        #Delete the Volume created in this method
+        # Delete the Volume created in this method
         try:
             resp, _ = self.client.delete_volume(volume['id'])
             self.assertEqual(202, resp.status)
-            #Checking if the deleted Volume still exists
+            # Checking if the deleted Volume still exists
             self.client.wait_for_resource_deletion(volume['id'])
         except KeyError:
             return
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index d52349e..0e475cf 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -36,6 +36,9 @@
     def setUpClass(cls):
         super(VolumesTestJSON, cls).setUpClass()
         cls.client = cls.volumes_extensions_client
+        if not cls.config.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
         # Create 3 Volumes
         cls.volume_list = []
         cls.volume_id_list = []
@@ -94,10 +97,10 @@
     @attr(type='gate')
     def test_volume_list_with_details(self):
         # Should return the list of Volumes with details
-        #Fetch all Volumes
+        # Fetch all Volumes
         resp, fetched_list = self.client.list_volumes_with_detail()
         self.assertEqual(200, resp.status)
-        #Now check if all the Volumes created in setup are in fetched list
+        # Now check if all the Volumes created in setup are in fetched list
         missing_volumes = [
             v for v in self.volume_list if v not in fetched_list
         ]
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index de214fc..c91e95b 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -28,11 +28,14 @@
     def setUpClass(cls):
         super(VolumesNegativeTest, cls).setUpClass()
         cls.client = cls.volumes_extensions_client
+        if not cls.config.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_volume_get_nonexistant_volume_id(self):
         # Negative: Should not be able to get details of nonexistant volume
-        #Creating a nonexistant volume id
+        # Creating a nonexistant volume id
         volume_id_list = list()
         resp, body = self.client.list_volumes()
         for i in range(len(body)):
@@ -45,7 +48,7 @@
         self.assertRaises(exceptions.NotFound, self.client.get_volume,
                           non_exist_id)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_volume_delete_nonexistant_volume_id(self):
         # Negative: Should not be able to delete nonexistant Volume
         # Creating nonexistant volume id
@@ -61,7 +64,7 @@
         self.assertRaises(exceptions.NotFound, self.client.delete_volume,
                           non_exist_id)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_create_volume_with_invalid_size(self):
         # Negative: Should not be able to create volume with invalid size
         # in request
@@ -70,7 +73,7 @@
         self.assertRaises(exceptions.BadRequest, self.client.create_volume,
                           size='#$%', display_name=v_name, metadata=metadata)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_create_volume_with_out_passing_size(self):
         # Negative: Should not be able to create volume without passing size
         # in request
@@ -79,7 +82,7 @@
         self.assertRaises(exceptions.BadRequest, self.client.create_volume,
                           size='', display_name=v_name, metadata=metadata)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_create_volume_with_size_zero(self):
         # Negative: Should not be able to create volume with size zero
         v_name = rand_name('Volume-')
@@ -87,25 +90,25 @@
         self.assertRaises(exceptions.BadRequest, self.client.create_volume,
                           size='0', display_name=v_name, metadata=metadata)
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_get_invalid_volume_id(self):
         # Negative: Should not be able to get volume with invalid id
         self.assertRaises(exceptions.NotFound,
                           self.client.get_volume, '#$%%&^&^')
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_get_volume_without_passing_volume_id(self):
         # Negative: Should not be able to get volume when empty ID is passed
         self.assertRaises(exceptions.NotFound, self.client.get_volume, '')
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_delete_invalid_volume_id(self):
         # Negative: Should not be able to delete volume when invalid ID is
         # passed
         self.assertRaises(exceptions.NotFound,
                           self.client.delete_volume, '!@#$%^&*()')
 
-    @attr(type='gate')
+    @attr(type=['negative', 'gate'])
     def test_delete_volume_without_passing_volume_id(self):
         # Negative: Should not be able to delete volume when empty ID is passed
         self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
diff --git a/tempest/api/identity/__init__.py b/tempest/api/identity/__init__.py
index 718aa15..0ed47f5 100644
--- a/tempest/api/identity/__init__.py
+++ b/tempest/api/identity/__init__.py
@@ -15,7 +15,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 
diff --git a/tempest/api/identity/admin/test_roles.py b/tempest/api/identity/admin/test_roles.py
index 08b86ca..cc112cc 100644
--- a/tempest/api/identity/admin/test_roles.py
+++ b/tempest/api/identity/admin/test_roles.py
@@ -73,7 +73,7 @@
         # Role should be created, verified, and deleted
         role_name = rand_name('role-test-')
         resp, body = self.client.create_role(role_name)
-        self.assertTrue('status' in resp)
+        self.assertIn('status', resp)
         self.assertTrue(resp['status'].startswith('2'))
         self.assertEqual(role_name, body['name'])
 
@@ -82,7 +82,7 @@
         self.assertTrue(any(found))
 
         resp, body = self.client.delete_role(found[0]['id'])
-        self.assertTrue('status' in resp)
+        self.assertIn('status', resp)
         self.assertTrue(resp['status'].startswith('2'))
 
         resp, body = self.client.list_roles()
@@ -100,7 +100,7 @@
         role_name = rand_name('role-dup-')
         resp, body = self.client.create_role(role_name)
         role1_id = body.get('id')
-        self.assertTrue('status' in resp)
+        self.assertIn('status', resp)
         self.assertTrue(resp['status'].startswith('2'))
         self.addCleanup(self.client.delete_role, role1_id)
         self.assertRaises(exceptions.Duplicate, self.client.create_role,
diff --git a/tempest/api/identity/admin/test_services.py b/tempest/api/identity/admin/test_services.py
index 644853a..2be0c29 100644
--- a/tempest/api/identity/admin/test_services.py
+++ b/tempest/api/identity/admin/test_services.py
@@ -29,33 +29,33 @@
     def test_create_get_delete_service(self):
         # GET Service
         try:
-            #Creating a Service
+            # Creating a Service
             name = rand_name('service-')
             type = rand_name('type--')
             description = rand_name('description-')
             resp, service_data = self.client.create_service(
                 name, type, description=description)
             self.assertTrue(resp['status'].startswith('2'))
-            #Verifying response body of create service
-            self.assertTrue('id' in service_data)
+            # Verifying response body of create service
+            self.assertIn('id', service_data)
             self.assertFalse(service_data['id'] is None)
-            self.assertTrue('name' in service_data)
+            self.assertIn('name', service_data)
             self.assertEqual(name, service_data['name'])
-            self.assertTrue('type' in service_data)
+            self.assertIn('type', service_data)
             self.assertEqual(type, service_data['type'])
-            self.assertTrue('description' in service_data)
+            self.assertIn('description', service_data)
             self.assertEqual(description, service_data['description'])
-            #Get service
+            # Get service
             resp, fetched_service = self.client.get_service(service_data['id'])
             self.assertTrue(resp['status'].startswith('2'))
-            #verifying the existence of service created
-            self.assertTrue('id' in fetched_service)
+            # verifying the existence of service created
+            self.assertIn('id', fetched_service)
             self.assertEquals(fetched_service['id'], service_data['id'])
-            self.assertTrue('name' in fetched_service)
+            self.assertIn('name', fetched_service)
             self.assertEqual(fetched_service['name'], service_data['name'])
-            self.assertTrue('type' in fetched_service)
+            self.assertIn('type', fetched_service)
             self.assertEqual(fetched_service['type'], service_data['type'])
-            self.assertTrue('description' in fetched_service)
+            self.assertIn('description', fetched_service)
             self.assertEqual(fetched_service['description'],
                              service_data['description'])
         finally:
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 0bba250..6f90b04 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -240,12 +240,12 @@
         self.assertEquals('200', resp['status'])
         user_ids.append(user2['id'])
         self.data.users.append(user2)
-        #List of users for the respective tenant ID
+        # List of users for the respective tenant ID
         resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
-        self.assertTrue(resp['status'] in ('200', '203'))
+        self.assertIn(resp['status'], ('200', '203'))
         for i in body:
             fetched_user_ids.append(i['id'])
-        #verifying the user Id in the list
+        # verifying the user Id in the list
         missing_users =\
             [user for user in user_ids if user not in fetched_user_ids]
         self.assertEqual(0, len(missing_users),
@@ -260,7 +260,7 @@
         user = self.get_user_by_name(self.data.test_user)
         tenant = self.get_tenant_by_name(self.data.test_tenant)
         role = self.get_role_by_name(self.data.test_role)
-        #Assigning roles to two users
+        # Assigning roles to two users
         user_ids = list()
         fetched_user_ids = list()
         user_ids.append(user['id'])
@@ -277,12 +277,12 @@
                                                   second_user['id'],
                                                   role['id'])
         self.assertEquals('200', resp['status'])
-        #List of users with roles for the respective tenant ID
+        # List of users with roles for the respective tenant ID
         resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
         self.assertEquals('200', resp['status'])
         for i in body:
             fetched_user_ids.append(i['id'])
-        #verifying the user Id in the list
+        # verifying the user Id in the list
         missing_users = [missing_user for missing_user in user_ids
                          if missing_user not in fetched_user_ids]
         self.assertEqual(0, len(missing_users),
@@ -293,13 +293,13 @@
     def test_list_users_with_invalid_tenant(self):
         # Should not be able to return a list of all
         # users for a nonexistant tenant
-        #Assign invalid tenant ids
+        # Assign invalid tenant ids
         invalid_id = list()
         invalid_id.append(rand_name('999'))
         invalid_id.append('alpha')
         invalid_id.append(rand_name("dddd@#%%^$"))
         invalid_id.append('!@#()$%^&*?<>{}[]')
-        #List the users with invalid tenant id
+        # List the users with invalid tenant id
         for invalid in invalid_id:
             self.assertRaises(exceptions.NotFound,
                               self.client.list_users_for_tenant, invalid)
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 3d40eb3..9136934 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -33,7 +33,7 @@
 
     @attr(type='smoke')
     def test_list_domains(self):
-        #Test to list domains
+        # Test to list domains
         domain_ids = list()
         fetched_ids = list()
         for _ in range(3):
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index f01cc64..9f7b24b 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -37,7 +37,7 @@
                                                description=s_description)
         cls.service_id = cls.service_data['id']
         cls.service_ids.append(cls.service_id)
-        #Create endpoints so as to use for LIST and GET test cases
+        # Create endpoints so as to use for LIST and GET test cases
         cls.setup_endpoints = list()
         for i in range(2):
             region = rand_name('region')
@@ -58,7 +58,7 @@
     def test_list_endpoints(self):
         # Get a list of endpoints
         resp, fetched_endpoints = self.client.list_endpoints()
-        #Asserting LIST Endpoint
+        # Asserting LIST Endpoint
         self.assertEqual(resp['status'], '200')
         missing_endpoints =\
             [e for e in self.setup_endpoints if e not in fetched_endpoints]
@@ -78,11 +78,11 @@
                 self.client.create_endpoint(self.service_id, interface, url,
                                             region=region, enabled=True)
             create_flag = True
-            #Asserting Create Endpoint response body
+            # Asserting Create Endpoint response body
             self.assertEqual(resp['status'], '201')
             self.assertEqual(region, endpoint['region'])
             self.assertEqual(url, endpoint['url'])
-            #Checking if created endpoint is present in the list of endpoints
+            # Checking if created endpoint is present in the list of endpoints
             resp, fetched_endpoints = self.client.list_endpoints()
             for e in fetched_endpoints:
                 if endpoint['id'] == e['id']:
@@ -93,12 +93,12 @@
         finally:
             if create_flag:
                 matched = False
-                #Deleting the endpoint created in this method
+                # Deleting the endpoint created in this method
                 resp_header, resp_body =\
                     self.client.delete_endpoint(endpoint['id'])
                 self.assertEqual(resp_header['status'], '204')
                 self.assertEqual(resp_body, '')
-                #Checking whether endpoint is deleted successfully
+                # Checking whether endpoint is deleted successfully
                 resp, fetched_endpoints = self.client.list_endpoints()
                 for e in fetched_endpoints:
                     if endpoint['id'] == e['id']:
@@ -108,8 +108,8 @@
 
     @attr(type='smoke')
     def test_update_endpoint(self):
-        #Creating an endpoint so as to check update endpoint
-        #with new values
+        # Creating an endpoint so as to check update endpoint
+        # with new values
         region1 = rand_name('region')
         url1 = rand_name('url')
         interface1 = 'public'
@@ -117,7 +117,7 @@
             self.client.create_endpoint(self.service_id, interface1,
                                         url1, region=region1,
                                         enabled=True)
-        #Creating service so as update endpoint with new service ID
+        # Creating service so as update endpoint with new service ID
         s_name = rand_name('service-')
         s_type = rand_name('type--')
         s_description = rand_name('description-')
@@ -125,7 +125,7 @@
             self.identity_client.create_service(s_name, s_type,
                                                 description=s_description)
         self.service_ids.append(self.service2['id'])
-        #Updating endpoint with new values
+        # Updating endpoint with new values
         region2 = rand_name('region')
         url2 = rand_name('url')
         interface2 = 'internal'
@@ -135,7 +135,7 @@
                                         interface=interface2, url=url2,
                                         region=region2, enabled=False)
         self.assertEqual(resp['status'], '200')
-        #Asserting if the attributes of endpoint are updated
+        # Asserting if the attributes of endpoint are updated
         self.assertEqual(self.service2['id'], endpoint['service_id'])
         self.assertEqual(interface2, endpoint['interface'])
         self.assertEqual(url2, endpoint['url'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 799b081..737a0e0 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -29,7 +29,7 @@
 
     @attr(type='smoke')
     def test_list_policies(self):
-        #Test to list policies
+        # Test to list policies
         policy_ids = list()
         fetched_ids = list()
         for _ in range(3):
@@ -50,7 +50,7 @@
 
     @attr(type='smoke')
     def test_create_update_delete_policy(self):
-        #Test to update policy
+        # Test to update policy
         blob = rand_name('BlobName-')
         policy_type = rand_name('PolicyType-')
         resp, policy = self.policy_client.create_policy(blob, policy_type)
@@ -63,12 +63,12 @@
         self.assertEqual(policy_type, policy['type'])
         resp, fetched_policy = self.policy_client.get_policy(policy['id'])
         self.assertEqual(resp['status'], '200')
-        #Update policy
+        # Update policy
         update_type = rand_name('UpdatedPolicyType-')
         resp, data = self.policy_client.update_policy(
             policy['id'], type=update_type)
-        self.assertTrue('type' in data)
-        #Assertion for updated value with fetched value
+        self.assertIn('type', data)
+        # Assertion for updated value with fetched value
         resp, fetched_policy = self.policy_client.get_policy(policy['id'])
         self.assertIn('id', fetched_policy)
         self.assertIn('blob', fetched_policy)
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index b35b93a..bfa0d84 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -1,4 +1,4 @@
-#vim: tabstop=4 shiftwidth=4 softtabstop=4
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
 
 # Copyright 2013 OpenStack Foundation
 # All Rights Reserved.
@@ -33,7 +33,7 @@
         resp, body = self.client.create_service(
             name, type, description=description)
         self.assertEqual('200', resp['status'])
-        #Deleting the service created in this method
+        # Deleting the service created in this method
         self.addCleanup(self.client.delete_service, body['id'])
 
         s_id = body['id']
@@ -46,7 +46,7 @@
         self.assertEqual('200', resp['status'])
         self.assertNotEqual(resp1_desc, resp2_desc)
 
-        #Get service
+        # Get service
         resp, body = self.client.get_service(s_id)
         resp3_desc = body['description']
 
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 04e993d..bf7a554 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -26,7 +26,7 @@
     @attr(type='gate')
     def test_user_update(self):
         # Test case to check if updating of user attributes is successful.
-        #Creating first user
+        # Creating first user
         u_name = rand_name('user-')
         u_desc = u_name + 'description'
         u_email = u_name + '@testmail.tm'
@@ -36,12 +36,12 @@
             email=u_email, enabled=False)
         # Delete the User at the end of this method
         self.addCleanup(self.v3_client.delete_user, user['id'])
-        #Creating second project for updation
+        # Creating second project for updation
         resp, project = self.v3_client.create_project(
             rand_name('project-'), description=rand_name('project-desc-'))
         # Delete the Project at the end of this method
         self.addCleanup(self.v3_client.delete_project, project['id'])
-        #Updating user details with new values
+        # Updating user details with new values
         u_name2 = rand_name('user2-')
         u_email2 = u_name2 + '@testmail.tm'
         u_description2 = u_name2 + ' description'
@@ -49,7 +49,7 @@
             user['id'], name=u_name2, description=u_description2,
             project_id=project['id'],
             email=u_email2, enabled=False)
-        #Assert response body of update user.
+        # Assert response body of update user.
         self.assertEqual(200, resp.status)
         self.assertEqual(u_name2, update_user['name'])
         self.assertEqual(u_description2, update_user['description'])
@@ -57,9 +57,9 @@
                          update_user['project_id'])
         self.assertEqual(u_email2, update_user['email'])
         self.assertEqual('false', str(update_user['enabled']).lower())
-        #GET by id after updation
+        # GET by id after updation
         resp, new_user_get = self.v3_client.get_user(user['id'])
-        #Assert response body of GET after updation
+        # Assert response body of GET after updation
         self.assertEqual(u_name2, new_user_get['name'])
         self.assertEqual(u_description2, new_user_get['description'])
         self.assertEqual(project['id'],
@@ -69,14 +69,14 @@
 
     @attr(type='gate')
     def test_list_user_projects(self):
-        #List the projects that a user has access upon
+        # List the projects that a user has access upon
         assigned_project_ids = list()
         fetched_project_ids = list()
         _, u_project = self.v3_client.create_project(
             rand_name('project-'), description=rand_name('project-desc-'))
         # Delete the Project at the end of this method
         self.addCleanup(self.v3_client.delete_project, u_project['id'])
-        #Create a user.
+        # Create a user.
         u_name = rand_name('user-')
         u_desc = u_name + 'description'
         u_email = u_name + '@testmail.tm'
@@ -100,7 +100,7 @@
             _, project = self.v3_client.get_project(project_body['id'])
             # Delete the Project at the end of this method
             self.addCleanup(self.v3_client.delete_project, project_body['id'])
-            #Assigning roles to user on project
+            # Assigning roles to user on project
             self.v3_client.assign_user_role(project['id'],
                                             user['id'],
                                             role['id'])
@@ -109,7 +109,7 @@
         self.assertEqual(200, resp.status)
         for i in body:
             fetched_project_ids.append(i['id'])
-        #verifying the project ids in list
+        # verifying the project ids in list
         missing_projects =\
             [p for p in assigned_project_ids
              if p not in fetched_project_ids]
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index e62d84b..086c50e 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -15,9 +15,9 @@
 #    under the License.
 
 from tempest import clients
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import rand_name
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 import tempest.test
 
 LOG = logging.getLogger(__name__)
@@ -28,8 +28,20 @@
 
     @classmethod
     def setUpClass(cls):
-        cls.os = clients.Manager()
+        cls.isolated_creds = []
         cls.created_images = []
+        cls._interface = 'json'
+        if not cls.config.service_available.glance:
+            skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+        if cls.config.compute.allow_tenant_isolation:
+            creds = cls._get_isolated_creds()
+            username, tenant_name, password = creds
+            cls.os = clients.Manager(username=username,
+                                     password=password,
+                                     tenant_name=tenant_name)
+        else:
+            cls.os = clients.Manager()
 
     @classmethod
     def tearDownClass(cls):
@@ -41,6 +53,7 @@
 
         for image_id in cls.created_images:
                 cls.client.wait_for_resource_deletion(image_id)
+        cls._clear_isolated_creds()
 
     @classmethod
     def create_image(cls, **kwargs):
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 640daa5..327df0f 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -45,7 +45,7 @@
                                        disk_format='raw',
                                        is_public=True,
                                        properties=properties)
-        self.assertTrue('id' in body)
+        self.assertIn('id', body)
         image_id = body.get('id')
         self.assertEqual('New Name', body.get('name'))
         self.assertTrue(body.get('is_public'))
@@ -56,7 +56,7 @@
         # Now try uploading an image file
         image_file = StringIO.StringIO(('*' * 1024))
         resp, body = self.client.update_image(image_id, data=image_file)
-        self.assertTrue('size' in body)
+        self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
 
     @attr(type='gate')
@@ -69,7 +69,7 @@
                                                 '/someimage.iso',
                                        properties={'key1': 'value1',
                                                    'key2': 'value2'})
-        self.assertTrue('id' in body)
+        self.assertIn('id', body)
         self.assertEqual('New Remote Image', body.get('name'))
         self.assertTrue(body.get('is_public'))
         self.assertEqual('active', body.get('status'))
@@ -83,7 +83,7 @@
                                        container_format='bare',
                                        disk_format='raw', is_public=True,
                                        copy_from=self.config.images.http_image)
-        self.assertTrue('id' in body)
+        self.assertIn('id', body)
         image_id = body.get('id')
         self.assertEqual('New Http Image', body.get('name'))
         self.assertTrue(body.get('is_public'))
@@ -101,7 +101,7 @@
                                        is_public=True,
                                        min_ram=40,
                                        properties=properties)
-        self.assertTrue('id' in body)
+        self.assertIn('id', body)
         self.assertEqual('New_image_with_min_ram', body.get('name'))
         self.assertTrue(body.get('is_public'))
         self.assertEqual('queued', body.get('status'))
@@ -184,7 +184,7 @@
         self.assertEqual(resp['status'], '200')
         image_list = map(lambda x: x['id'], images_list)
         for image_id in self.created_images:
-            self.assertTrue(image_id in image_list)
+            self.assertIn(image_id, image_list)
 
     @attr(type='gate')
     def test_index_disk_format(self):
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 34db6e3..ad7be39 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -48,13 +48,13 @@
                                        container_format='bare',
                                        disk_format='raw',
                                        visibility='public')
-        self.assertTrue('id' in body)
+        self.assertIn('id', body)
         image_id = body.get('id')
-        self.assertTrue('name' in body)
+        self.assertIn('name', body)
         self.assertEqual('New Name', body.get('name'))
-        self.assertTrue('visibility' in body)
+        self.assertIn('visibility', body)
         self.assertTrue(body.get('visibility') == 'public')
-        self.assertTrue('status' in body)
+        self.assertIn('status', body)
         self.assertEqual('queued', body.get('status'))
 
         # Now try uploading an image file
@@ -62,7 +62,7 @@
         resp, body = self.client.store_image(image_id, image_file)
         self.assertEqual(resp.status, 204)
         resp, body = self.client.get_image_metadata(image_id)
-        self.assertTrue('size' in body)
+        self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
 
 
@@ -104,4 +104,9 @@
         self.assertEqual(resp['status'], '200')
         image_list = map(lambda x: x['id'], images_list)
         for image in self.created_images:
-            self.assertTrue(image in image_list)
+            self.assertIn(image, image_list)
+
+    @attr(type=['negative', 'gate'])
+    def test_get_image_meta_by_null_id(self):
+        self.assertRaises(exceptions.NotFound,
+                          self.client.get_image_metadata, '')
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 3b7f9dd..d3fa763 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -38,20 +38,28 @@
 
         tenant_network_mask_bits with the mask bits to be used to partition the
         block defined by tenant-network_cidr
+
+    Finally, it is assumed that the following option is defined in the
+    [service_available] section of etc/tempest.conf
+
+        neutron as True
     """
 
     @classmethod
     def setUpClass(cls):
         os = clients.Manager()
         cls.network_cfg = os.config.network
-        if not cls.network_cfg.neutron_available:
+        if not cls.config.service_available.neutron:
             raise cls.skipException("Neutron support is required")
         cls.client = os.network_client
         cls.networks = []
         cls.subnets = []
+        cls.ports = []
 
     @classmethod
     def tearDownClass(cls):
+        for port in cls.ports:
+            cls.client.delete_port(port['id'])
         for subnet in cls.subnets:
             cls.client.delete_subnet(subnet['id'])
         for network in cls.networks:
@@ -93,3 +101,11 @@
         subnet = body['subnet']
         cls.subnets.append(subnet)
         return subnet
+
+    @classmethod
+    def create_port(cls, network):
+        """Wrapper utility that returns a test port."""
+        resp, body = cls.client.create_port(network['id'])
+        port = body['port']
+        cls.ports.append(port)
+        return port
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 4481853..00a8ef7 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -35,6 +35,13 @@
         create a subnet for a tenant
         list tenant's subnets
         show a tenant subnet details
+        port create
+        port delete
+        port list
+        port show
+        port update
+        network update
+        subnet update
 
     v2.0 of the Neutron API is assumed. It is also assumed that the following
     options are defined in the [network] section of etc/tempest.conf:
@@ -53,21 +60,28 @@
         cls.name = cls.network['name']
         cls.subnet = cls.create_subnet(cls.network)
         cls.cidr = cls.subnet['cidr']
+        cls.port = cls.create_port(cls.network)
 
     @attr(type='gate')
-    def test_create_delete_network_subnet(self):
+    def test_create_update_delete_network_subnet(self):
         # Creates a network
         name = rand_name('network-')
         resp, body = self.client.create_network(name)
         self.assertEqual('201', resp['status'])
         network = body['network']
-        self.assertTrue(network['id'] is not None)
+        net_id = network['id']
+        # Verification of network update
+        new_name = "New_network"
+        resp, body = self.client.update_network(net_id, new_name)
+        self.assertEqual('200', resp['status'])
+        updated_net = body['network']
+        self.assertEqual(updated_net['name'], new_name)
         # Find a cidr that is not in use yet and create a subnet with it
         cidr = netaddr.IPNetwork(self.network_cfg.tenant_network_cidr)
         mask_bits = self.network_cfg.tenant_network_mask_bits
         for subnet_cidr in cidr.subnet(mask_bits):
             try:
-                resp, body = self.client.create_subnet(network['id'],
+                resp, body = self.client.create_subnet(net_id,
                                                        str(subnet_cidr))
                 break
             except exceptions.BadRequest as e:
@@ -76,11 +90,17 @@
                     raise
         self.assertEqual('201', resp['status'])
         subnet = body['subnet']
-        self.assertTrue(subnet['id'] is not None)
-        #Deletes subnet and network
-        resp, body = self.client.delete_subnet(subnet['id'])
+        subnet_id = subnet['id']
+        # Verification of subnet update
+        new_subnet = "New_subnet"
+        resp, body = self.client.update_subnet(subnet_id, new_subnet)
+        self.assertEqual('200', resp['status'])
+        updated_subnet = body['subnet']
+        self.assertEqual(updated_subnet['name'], new_subnet)
+        # Deletes subnet and network
+        resp, body = self.client.delete_subnet(subnet_id)
         self.assertEqual('204', resp['status'])
-        resp, body = self.client.delete_network(network['id'])
+        resp, body = self.client.delete_network(net_id)
         self.assertEqual('204', resp['status'])
 
     @attr(type='gate')
@@ -97,8 +117,12 @@
         # Verify the network exists in the list of all networks
         resp, body = self.client.list_networks()
         networks = body['networks']
-        found = any(n for n in networks if n['id'] == self.network['id'])
-        self.assertTrue(found)
+        found = None
+        for n in networks:
+            if (n['id'] == self.network['id']):
+                found = n['id']
+        msg = "Network list doesn't contain created network"
+        self.assertIsNotNone(found, msg)
 
     @attr(type='gate')
     def test_show_subnet(self):
@@ -114,5 +138,57 @@
         # Verify the subnet exists in the list of all subnets
         resp, body = self.client.list_subnets()
         subnets = body['subnets']
-        found = any(n for n in subnets if n['id'] == self.subnet['id'])
-        self.assertTrue(found)
+        found = None
+        for n in subnets:
+            if (n['id'] == self.subnet['id']):
+                found = n['id']
+        msg = "Subnet list doesn't contain created subnet"
+        self.assertIsNotNone(found, msg)
+
+    @attr(type='gate')
+    def test_create_update_delete_port(self):
+        # Verify that successful port creation & deletion
+        resp, body = self.client.create_port(self.network['id'])
+        self.assertEqual('201', resp['status'])
+        port = body['port']
+        # Verification of port update
+        new_port = "New_Port"
+        resp, body = self.client.update_port(port['id'], new_port)
+        self.assertEqual('200', resp['status'])
+        updated_port = body['port']
+        self.assertEqual(updated_port['name'], new_port)
+        # Verification of port delete
+        resp, body = self.client.delete_port(port['id'])
+        self.assertEqual('204', resp['status'])
+
+    @attr(type='gate')
+    def test_show_ports(self):
+        # Verify the details of port
+        resp, body = self.client.show_port(self.port['id'])
+        self.assertEqual('200', resp['status'])
+        port = body['port']
+        self.assertEqual(self.port['id'], port['id'])
+
+    @attr(type='gate')
+    def test_list_ports(self):
+        # Verify the port exists in the list of all ports
+        resp, body = self.client.list_ports()
+        self.assertEqual('200', resp['status'])
+        ports_list = body['ports']
+        found = None
+        for n in ports_list:
+            if (n['id'] == self.port['id']):
+                found = n['id']
+        self.assertIsNotNone(found, "Port list doesn't contain created port")
+
+    @attr(type=['negative', 'gate'])
+    def test_show_non_existent_network(self):
+        non_exist_id = rand_name('network')
+        self.assertRaises(exceptions.NotFound, self.client.show_network,
+                          non_exist_id)
+
+    @attr(type=['negative', 'gate'])
+    def test_show_non_existent_subnet(self):
+        non_exist_id = rand_name('subnet')
+        self.assertRaises(exceptions.NotFound, self.client.show_subnet,
+                          non_exist_id)
diff --git a/tempest/api/network/test_quotas.py b/tempest/api/network/test_quotas.py
new file mode 100644
index 0000000..ba70f34
--- /dev/null
+++ b/tempest/api/network/test_quotas.py
@@ -0,0 +1,91 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from tempest.api.network import base
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class QuotasTest(base.BaseNetworkTest):
+
+    """
+    Tests the following operations in the Neutron API using the REST client for
+    Neutron:
+
+        list quotas for tenants who have non-default quota values
+        show quotas for a specified tenant
+        update quotas for a specified tenant
+        reset quotas to default values for a specified tenant
+
+    v2.0 of the API is assumed. It is also assumed that the following
+    option is defined in the [service_available] section of etc/tempest.conf:
+
+        neutron as True
+
+    Finally, it is assumed that the per-tenant quota extension API is
+    configured in /etc/neutron/neutron.conf as follows:
+
+        quota_driver = neutron.db.quota_db.DbQuotaDriver
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(QuotasTest, cls).setUpClass()
+        admin_manager = clients.AdminManager()
+        cls.admin_client = admin_manager.network_client
+        cls.identity_admin_client = admin_manager.identity_client
+
+    @attr(type='gate')
+    def test_quotas(self):
+        # Add a tenant to conduct the test
+        test_tenant = rand_name('test_tenant_')
+        test_description = rand_name('desc_')
+        _, tenant = self.identity_admin_client.create_tenant(
+            name=test_tenant,
+            description=test_description)
+        tenant_id = tenant['id']
+        self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+        # Change quotas for tenant
+        new_quotas = {'network': 0, 'security_group': 0}
+        resp, quota_set = self.admin_client.update_quotas(tenant_id,
+                                                          **new_quotas)
+        self.assertEqual('200', resp['status'])
+        self.addCleanup(self.admin_client.reset_quotas, tenant_id)
+        self.assertEqual(0, quota_set['network'])
+        self.assertEqual(0, quota_set['security_group'])
+        # Confirm our tenant is listed among tenants with non default quotas
+        resp, non_default_quotas = self.admin_client.list_quotas()
+        self.assertEqual('200', resp['status'])
+        found = False
+        for qs in non_default_quotas:
+            if qs['tenant_id'] == tenant_id:
+                found = True
+        self.assertTrue(found)
+        # Confirm from APi quotas were changed as requested for tenant
+        resp, quota_set = self.admin_client.show_quotas(tenant_id)
+        self.assertEqual('200', resp['status'])
+        self.assertEqual(0, quota_set['network'])
+        self.assertEqual(0, quota_set['security_group'])
+        # Reset quotas to default and confirm
+        resp, body = self.admin_client.reset_quotas(tenant_id)
+        self.assertEqual('204', resp['status'])
+        resp, non_default_quotas = self.admin_client.list_quotas()
+        self.assertEqual('200', resp['status'])
+        for q in non_default_quotas:
+            self.assertNotEqual(tenant_id, q['tenant_id'])
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index bf013ec..5a1fb5a 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -26,6 +26,9 @@
 
     @classmethod
     def setUpClass(cls):
+        if not cls.config.service_available.swift:
+            skip_msg = ("%s skipped as swift is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
         cls.os = clients.Manager()
         cls.object_client = cls.os.object_client
         cls.container_client = cls.os.container_client
@@ -42,12 +45,6 @@
 
         cls.data = DataGenerator(cls.identity_admin_client)
 
-        try:
-            cls.account_client.list_account_containers()
-        except exceptions.EndpointNotFound:
-            skip_msg = "No OpenStack Object Storage API endpoint"
-            raise cls.skipException(skip_msg)
-
     @classmethod
     def delete_containers(cls, containers, container_client=None,
                           object_client=None):
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 029f2d5..52b37c1 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -42,7 +42,7 @@
 
         self.assertIsNotNone(container_list)
         container_names = [c['name'] for c in container_list]
-        self.assertTrue(self.container_name in container_names)
+        self.assertIn(self.container_name, container_names)
 
     @attr(type='smoke')
     def test_list_account_metadata(self):
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index 5cb6341..8b9fc8c 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -37,7 +37,7 @@
         container_name = rand_name(name='TestContainer')
         resp, body = self.container_client.create_container(container_name)
         self.containers.append(container_name)
-        self.assertTrue(resp['status'] in ('202', '201'))
+        self.assertIn(resp['status'], ('202', '201'))
 
     @attr(type='smoke')
     def test_delete_container(self):
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index ea8637c..5de4df0 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -67,9 +67,9 @@
                        (cont_client[1].base_url, str(cont[1]))}
             resp, body = \
                 cont_client[0].put(str(cont[0]), body=None, headers=headers)
-            self.assertTrue(resp['status'] in ('202', '201'),
-                            'Error installing X-Container-Sync-To '
-                            'for the container "%s"' % (cont[0]))
+            self.assertIn(resp['status'], ('202', '201'),
+                          'Error installing X-Container-Sync-To '
+                          'for the container "%s"' % (cont[0]))
             # create object in container
             object_name = rand_name(name='TestSyncObject')
             data = object_name[::-1]  # arbitrary_string()
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index aaa2c64..b546cec 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -45,7 +45,7 @@
     @testtools.skip('Until Bug #1069849 is resolved.')
     @attr(type='gate')
     def test_get_object_after_expiry_time(self):
-        #TODO(harika-vakadi): similar test case has to be created for
+        # TODO(harika-vakadi): similar test case has to be created for
         # "X-Delete-At", after this test case works.
 
         # create object
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index ffa534a..d06d942 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -12,11 +12,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
 import time
 
 from tempest import clients
 from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
 import tempest.test
 
 
@@ -31,7 +31,7 @@
 
         os = clients.OrchestrationManager()
         cls.orchestration_cfg = os.config.orchestration
-        if not cls.orchestration_cfg.heat_available:
+        if not os.config.service_available.heat:
             raise cls.skipException("Heat support is required")
         cls.build_timeout = cls.orchestration_cfg.build_timeout
         cls.build_interval = cls.orchestration_cfg.build_interval
diff --git a/tempest/api/orchestration/stacks/test_instance_cfn_init.py b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
index 4f22158..7897b70 100644
--- a/tempest/api/orchestration/stacks/test_instance_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
@@ -13,13 +13,13 @@
 #    under the License.
 
 import json
-from tempest.common import log as logging
 import testtools
 
 from tempest.api.orchestration import base
 from tempest.common.utils.data_utils import rand_name
 from tempest.common.utils.linux.remote_client import RemoteClient
 import tempest.config
+from tempest.openstack.common import log as logging
 from tempest.test import attr
 
 
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index 15979ed..f1f1f7e 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -12,10 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
-
 from tempest.api.orchestration import base
 from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
 from tempest.test import attr
 
 
diff --git a/tempest/api/utils.py b/tempest/api/utils.py
index 0738201..69ab7fb 100644
--- a/tempest/api/utils.py
+++ b/tempest/api/utils.py
@@ -17,7 +17,7 @@
 
 """Common utilities used in testing."""
 
-from testtools import TestCase
+from tempest.test import BaseTestCase
 
 
 class skip_unless_attr(object):
@@ -32,7 +32,7 @@
             """Wrapped skipper function."""
             testobj = args[0]
             if not getattr(testobj, self.attr, False):
-                raise TestCase.skipException(self.message)
+                raise BaseTestCase.skipException(self.message)
             func(*args, **kw)
         _skipper.__name__ = func.__name__
         _skipper.__doc__ = func.__doc__
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 086b981..b64a324 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -13,8 +13,8 @@
 #    under the License.
 
 from tempest.api.volume import base
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
 from tempest.services.volume.json.admin import volume_types_client
 from tempest.services.volume.json import volumes_client
 from tempest.test import attr
@@ -88,12 +88,12 @@
 
     @classmethod
     def tearDownClass(cls):
-        ## volumes deletion
+        # volumes deletion
         for volume_id in cls.volume_id_list:
             cls.volume_client.delete_volume(volume_id)
             cls.volume_client.wait_for_resource_deletion(volume_id)
 
-        ## volume types deletion
+        # volume types deletion
         for volume_type_id in cls.volume_type_id_list:
             cls.type_client.delete_volume_type(volume_type_id)
 
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 3c4b5d8..27caaad 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -64,14 +64,14 @@
                 vol_type_name,
                 extra_specs=extra_specs)
             self.assertEqual(200, resp.status)
-            self.assertTrue('id' in body)
-            self.assertTrue('name' in body)
+            self.assertIn('id', body)
+            self.assertIn('name', body)
             resp, volume = self.volumes_client.create_volume(
                 size=1, display_name=vol_name,
                 volume_type=vol_type_name)
             self.assertEqual(200, resp.status)
-            self.assertTrue('id' in volume)
-            self.assertTrue('display_name' in volume)
+            self.assertIn('id', volume)
+            self.assertIn('display_name', volume)
             self.assertEqual(volume['display_name'], vol_name,
                              "The created volume name is not equal "
                              "to the requested name")
@@ -113,8 +113,8 @@
                 name,
                 extra_specs=extra_specs)
             self.assertEqual(200, resp.status)
-            self.assertTrue('id' in body)
-            self.assertTrue('name' in body)
+            self.assertIn('id', body)
+            self.assertIn('name', body)
             self.assertEqual(body['name'], name,
                              "The created volume_type name is not equal "
                              "to the requested name")
@@ -137,8 +137,8 @@
                 name,
                 extra_specs=extra_specs)
             self.assertEqual(200, resp.status)
-            self.assertTrue('id' in body)
-            self.assertTrue('name' in body)
+            self.assertIn('id', body)
+            self.assertIn('name', body)
             self.assertEqual(body['name'], name,
                              "The created volume_type name is not equal "
                              "to the requested name")
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index fc510cb..379baa2 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -18,9 +18,7 @@
 import time
 
 from tempest import clients
-from tempest.common import log as logging
-from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
+from tempest.openstack.common import log as logging
 import tempest.test
 
 LOG = logging.getLogger(__name__)
@@ -34,6 +32,10 @@
     def setUpClass(cls):
         cls.isolated_creds = []
 
+        if not cls.config.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
         if cls.config.compute.allow_tenant_isolation:
             creds = cls._get_isolated_creds()
             username, tenant_name, password = creds
@@ -55,72 +57,17 @@
         cls.snapshots = []
         cls.volumes = []
 
-        skip_msg = ("%s skipped as Cinder endpoint is not available" %
-                    cls.__name__)
-        try:
-            cls.volumes_client.keystone_auth(cls.os.username,
-                                             cls.os.password,
-                                             cls.os.auth_url,
-                                             cls.volumes_client.service,
-                                             cls.os.tenant_name)
-        except exceptions.EndpointNotFound:
-            cls.clear_isolated_creds()
-            raise cls.skipException(skip_msg)
-
-    @classmethod
-    def _get_identity_admin_client(cls):
-        """
-        Returns an instance of the Identity Admin API client
-        """
-        os = clients.ComputeAdminManager()
-        return os.identity_client
-
-    @classmethod
-    def _get_isolated_creds(cls):
-        """
-        Creates a new set of user/tenant/password credentials for a
-        **regular** user of the Volume API so that a test case can
-        operate in an isolated tenant container.
-        """
-        admin_client = cls._get_identity_admin_client()
-        rand_name_root = rand_name(cls.__name__)
-        if cls.isolated_creds:
-            # Main user already created. Create the alt one...
-            rand_name_root += '-alt'
-        username = rand_name_root + "-user"
-        email = rand_name_root + "@example.com"
-        tenant_name = rand_name_root + "-tenant"
-        tenant_desc = tenant_name + "-desc"
-        password = "pass"
-
-        resp, tenant = admin_client.create_tenant(name=tenant_name,
-                                                  description=tenant_desc)
-        resp, user = admin_client.create_user(username,
-                                              password,
-                                              tenant['id'],
-                                              email)
-        # Store the complete creds (including UUID ids...) for later
-        # but return just the username, tenant_name, password tuple
-        # that the various clients will use.
-        cls.isolated_creds.append((user, tenant))
-
-        return username, tenant_name, password
-
-    @classmethod
-    def clear_isolated_creds(cls):
-        if not cls.isolated_creds:
-            return
-        admin_client = cls._get_identity_admin_client()
-
-        for user, tenant in cls.isolated_creds:
-            admin_client.delete_user(user['id'])
-            admin_client.delete_tenant(tenant['id'])
+        cls.volumes_client.keystone_auth(cls.os.username,
+                                         cls.os.password,
+                                         cls.os.auth_url,
+                                         cls.volumes_client.service,
+                                         cls.os.tenant_name)
 
     @classmethod
     def tearDownClass(cls):
         cls.clear_snapshots()
         cls.clear_volumes()
-        cls.clear_isolated_creds()
+        cls._clear_isolated_creds()
 
     @classmethod
     def create_snapshot(cls, volume_id=1, **kwargs):
@@ -133,7 +80,7 @@
                                                       'available')
         return snapshot
 
-    #NOTE(afazekas): these create_* and clean_* could be defined
+    # NOTE(afazekas): these create_* and clean_* could be defined
     # only in a single location in the source, and could be more general.
 
     @classmethod
@@ -201,6 +148,13 @@
             msg = ("Missing Volume Admin API credentials "
                    "in configuration.")
             raise cls.skipException(msg)
-
-        cls.os_adm = clients.AdminManager(interface=cls._interface)
+        if cls.config.compute.allow_tenant_isolation:
+            creds = cls._get_isolated_creds(admin=True)
+            admin_username, admin_tenant_name, admin_password = creds
+            cls.os_adm = clients.Manager(username=admin_username,
+                                         password=admin_password,
+                                         tenant_name=admin_tenant_name,
+                                         interface=cls._interface)
+        else:
+            cls.os_adm = clients.AdminManager(interface=cls._interface)
         cls.client = cls.os_adm.volume_types_client
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 56a3006..5861497 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -82,7 +82,7 @@
         try:
             resp, volume = self.client.get_volume(self.volume['id'])
             self.assertEqual(200, resp.status)
-            self.assertTrue('attachments' in volume)
+            self.assertIn('attachments', volume)
             attachment = volume['attachments'][0]
             self.assertEqual(mountpoint, attachment['device'])
             self.assertEqual(self.server['id'], attachment['server_id'])
@@ -106,3 +106,4 @@
         self.addCleanup(self.image_client.delete_image, image_id)
         self.assertEqual(202, resp.status)
         self.image_client.wait_for_image_status(image_id, 'active')
+        self.client.wait_for_volume_status(self.volume['id'], 'available')
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index eda7153..39f61f3 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -34,14 +34,14 @@
             volume = {}
             v_name = rand_name('Volume')
             metadata = {'Type': 'Test'}
-            #Create a volume
+            # Create a volume
             resp, volume = self.client.create_volume(size=1,
                                                      display_name=v_name,
                                                      metadata=metadata,
                                                      **kwargs)
             self.assertEqual(200, resp.status)
-            self.assertTrue('id' in volume)
-            self.assertTrue('display_name' in volume)
+            self.assertIn('id', volume)
+            self.assertIn('display_name', volume)
             self.assertEqual(volume['display_name'], v_name,
                              "The created volume name is not equal "
                              "to the requested name")
@@ -83,10 +83,10 @@
                                                      display_name=v_name,
                                                      metadata={})
             self.assertEqual(200, resp.status)
-            self.assertTrue('id' in volume)
-            self.assertTrue('display_name' in volume)
+            self.assertIn('id', volume)
+            self.assertIn('display_name', volume)
             self.client.wait_for_volume_status(volume['id'], 'available')
-            #GET Volume
+            # GET Volume
             resp, fetched_volume = self.client.get_volume(volume['id'])
             self.assertEqual(200, resp.status)
             self.assertEqual(fetched_volume['metadata'], {})
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index eea37e0..e2b15a4 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -32,7 +32,7 @@
     @attr(type='gate')
     def test_volume_get_nonexistant_volume_id(self):
         # Should not be able to get a nonexistant volume
-        #Creating a nonexistant volume id
+        # Creating a nonexistant volume id
         volume_id_list = []
         resp, volumes = self.client.list_volumes()
         for i in range(len(volumes)):
@@ -41,7 +41,7 @@
             non_exist_id = rand_name('999')
             if non_exist_id not in volume_id_list:
                 break
-        #Trying to Get a non existant volume
+        # Trying to Get a non existant volume
         self.assertRaises(exceptions.NotFound, self.client.get_volume,
                           non_exist_id)
 
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 602209a..0328b44 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -13,8 +13,8 @@
 #    under the License.
 
 from tempest.api.volume import base
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
 from tempest.test import attr
 
 LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/README.rst b/tempest/cli/README.rst
index 3eae492..f86adf3 100644
--- a/tempest/cli/README.rst
+++ b/tempest/cli/README.rst
@@ -12,7 +12,7 @@
 Why are these tests in tempest?
 -------------------------------
 These tests exist here because it is extremely difficult to build a
-functional enough environment in the python-*client unit tests to
+functional enough environment in the python-\*client unit tests to
 provide this kind of testing. Because we already put up a cloud in the
 gate with devstack + tempest it was decided it was better to have
 these as a side tree in tempest instead of another QA effort which
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 0e1d6db..00e025d 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -22,7 +22,7 @@
 from oslo.config import cfg
 
 import tempest.cli.output_parser
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
 import tempest.test
 
 
@@ -82,6 +82,11 @@
         return self.cmd_with_auth(
             'cinder', action, flags, params, admin, fail_ok)
 
+    def neutron(self, action, flags='', params='', admin=True, fail_ok=False):
+        """Executes neutron command for the given action."""
+        return self.cmd_with_auth(
+            'neutron', action, flags, params, admin, fail_ok)
+
     def cmd_with_auth(self, cmd, action, flags='', params='',
                       admin=True, fail_ok=False):
         """Executes given command with auth attributes appended."""
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index 3ee3098..bfd7f9e 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -17,11 +17,10 @@
 
 """Collection of utilities for parsing CLI clients output."""
 
-
-from tempest.common import log as logging
-
 import re
 
+from tempest.openstack.common import log as logging
+
 
 LOG = logging.getLogger(__name__)
 
diff --git a/tempest/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
index 561fd00..e60e238 100644
--- a/tempest/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -21,8 +21,7 @@
 import testtools
 
 import tempest.cli
-from tempest.common import log as logging
-
+from tempest.openstack.common import log as logging
 
 CONF = cfg.CONF
 
@@ -69,6 +68,8 @@
     def test_admin_credentials(self):
         self.nova('credentials')
 
+    @testtools.skipIf(CONF.service_available.neutron,
+                      "Neutron does not provide this feature")
     def test_admin_dns_domains(self):
         self.nova('dns-domains')
 
diff --git a/tempest/cli/simple_read_only/test_compute_manage.py b/tempest/cli/simple_read_only/test_compute_manage.py
index 802a206..1848827 100644
--- a/tempest/cli/simple_read_only/test_compute_manage.py
+++ b/tempest/cli/simple_read_only/test_compute_manage.py
@@ -18,7 +18,7 @@
 import subprocess
 
 import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
 
 
 LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index fa77e8a..3d58451 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -19,7 +19,7 @@
 import subprocess
 
 import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
 
 
 LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 3bc8b3e..4002081 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -19,7 +19,7 @@
 import subprocess
 
 import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
 
 
 LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
new file mode 100644
index 0000000..7b8340d
--- /dev/null
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -0,0 +1,118 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import re
+import subprocess
+
+from oslo.config import cfg
+
+import tempest.cli
+from tempest.openstack.common import log as logging
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class SimpleReadOnlyNeutronClientTest(tempest.cli.ClientTestBase):
+    """Basic, read-only tests for Neutron CLI client.
+
+    Checks return values and output of read-only commands.
+    These tests do not presume any content, nor do they create
+    their own. They only verify the structure of output if present.
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        if (not CONF.service_available.neutron):
+            msg = "Skiping all Neutron cli tests because it is not available"
+            raise cls.skipException(msg)
+        super(SimpleReadOnlyNeutronClientTest, cls).setUpClass()
+
+    def test_neutron_fake_action(self):
+        self.assertRaises(subprocess.CalledProcessError,
+                          self.neutron,
+                          'this-does-not-exist')
+
+    def test_neutron_net_list(self):
+        self.neutron('net-list')
+
+    def test_neutron_ext_list(self):
+        ext = self.parser.listing(self.neutron('ext-list'))
+        self.assertTableStruct(ext, ['alias', 'name'])
+
+    def test_neutron_dhcp_agent_list_hosting_net(self):
+        self.neutron('dhcp-agent-list-hosting-net',
+                     params=CONF.compute.fixed_network_name)
+
+    def test_neutron_agent_list(self):
+        agents = self.parser.listing(self.neutron('agent-list'))
+        field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
+        self.assertTableStruct(agents, field_names)
+
+    def test_neutron_floatingip_list(self):
+        self.neutron('floatingip-list')
+
+    def test_neutron_net_external_list(self):
+        self.neutron('net-external-list')
+
+    def test_neutron_port_list(self):
+        self.neutron('port-list')
+
+    def test_neutron_quota_list(self):
+        self.neutron('quota-list')
+
+    def test_neutron_router_list(self):
+        self.neutron('router-list')
+
+    def test_neutron_security_group_list(self):
+        security_grp = self.parser.listing(self.neutron('security-group-list'))
+        self.assertTableStruct(security_grp, ['id', 'name', 'description'])
+
+    def test_neutron_security_group_rule_list(self):
+        self.neutron('security-group-rule-list')
+
+    def test_neutron_subnet_list(self):
+        self.neutron('subnet-list')
+
+    def test_neutron_help(self):
+        help_text = self.neutron('help')
+        lines = help_text.split('\n')
+        self.assertTrue(lines[0].startswith('usage: neutron'))
+
+        commands = []
+        cmds_start = lines.index('Commands for API v2.0:')
+        command_pattern = re.compile('^ {2}([a-z0-9\-\_]+)')
+        for line in lines[cmds_start:]:
+            match = command_pattern.match(line)
+            if match:
+                commands.append(match.group(1))
+        commands = set(commands)
+        wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
+                               'router-show', 'agent-update', 'help'))
+        self.assertFalse(wanted_commands - commands)
+
+     # Optional arguments:
+
+    def test_neutron_version(self):
+        self.neutron('', flags='--version')
+
+    def test_neutron_debug_net_list(self):
+        self.neutron('net-list', flags='--debug')
+
+    def test_neutron_quiet_net_list(self):
+        self.neutron('net-list', flags='--quiet')
diff --git a/tempest/clients.py b/tempest/clients.py
index 5efce98..195cb89 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -15,9 +15,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
 from tempest import config
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 from tempest.services import botoclients
 from tempest.services.compute.json.aggregates_client import \
     AggregatesClientJSON
@@ -274,8 +274,9 @@
 
         if None in (self.username, self.password, self.tenant_name):
             msg = ("Missing required credentials. "
-                   "username: %(username)s, password: %(password)s, "
-                   "tenant_name: %(tenant_name)s") % locals()
+                   "username: %(u)s, password: %(p)s, "
+                   "tenant_name: %(t)s" %
+                   {'u': username, 'p': password, 't': tenant_name})
             raise exceptions.InvalidConfiguration(msg)
 
         self.auth_url = self.config.identity.uri
@@ -295,7 +296,8 @@
         try:
             self.servers_client = SERVERS_CLIENTS[interface](*client_args)
             self.limits_client = LIMITS_CLIENTS[interface](*client_args)
-            self.images_client = IMAGES_CLIENTS[interface](*client_args)
+            if self.config.service_available.glance:
+                self.images_client = IMAGES_CLIENTS[interface](*client_args)
             self.keypairs_client = KEYPAIRS_CLIENTS[interface](*client_args)
             self.quotas_client = QUOTAS_CLIENTS[interface](*client_args)
             self.flavors_client = FLAVORS_CLIENTS[interface](*client_args)
@@ -340,8 +342,9 @@
         self.network_client = NetworkClient(*client_args)
         self.hosts_client = HostsClientJSON(*client_args)
         self.account_client = AccountClient(*client_args)
-        self.image_client = ImageClientJSON(*client_args)
-        self.image_client_v2 = ImageClientV2JSON(*client_args)
+        if self.config.service_available.glance:
+            self.image_client = ImageClientJSON(*client_args)
+            self.image_client_v2 = ImageClientV2JSON(*client_args)
         self.container_client = ContainerClient(*client_args)
         self.object_client = ObjectClient(*client_args)
         self.orchestration_client = OrchestrationClient(*client_args)
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index cd33a22..831874d 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -34,9 +34,8 @@
 
 import OpenSSL
 
-from tempest.common import log as logging
 from tempest import exceptions as exc
-
+from tempest.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 USER_AGENT = 'tempest'
@@ -125,11 +124,12 @@
                 conn.request(method, conn_url, **kwargs)
             resp = conn.getresponse()
         except socket.gaierror as e:
-            message = "Error finding address for %(url)s: %(e)s" % locals()
+            message = ("Error finding address for %(url)s: %(e)s" %
+                       {'url': url, 'e': e})
             raise exc.EndpointNotFound(message)
         except (socket.error, socket.timeout) as e:
-            endpoint = self.endpoint
-            message = "Error communicating with %(endpoint)s %(e)s" % locals()
+            message = ("Error communicating with %(endpoint)s %(e)s" %
+                       {'endpoint': self.endpoint, 'e': e})
             raise exc.TimeoutException(message)
 
         body_iter = ResponseBodyIterator(resp)
diff --git a/tempest/common/log.py b/tempest/common/log.py
deleted file mode 100644
index 2159bfe..0000000
--- a/tempest/common/log.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2013 NEC Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import ConfigParser
-import inspect
-import logging
-import logging.config
-import os
-import re
-
-from oslo.config import cfg
-
-
-_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
-_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-
-_loggers = {}
-
-
-def getLogger(name='unknown'):
-    if len(_loggers) == 0:
-        loaded = _load_log_config()
-        getLogger.adapter = TestsAdapter if loaded else None
-
-    if name not in _loggers:
-        logger = logging.getLogger(name)
-        if getLogger.adapter:
-            _loggers[name] = getLogger.adapter(logger, name)
-        else:
-            _loggers[name] = logger
-
-    return _loggers[name]
-
-
-def _load_log_config():
-    conf_dir = os.environ.get('TEMPEST_LOG_CONFIG_DIR', None)
-    conf_file = os.environ.get('TEMPEST_LOG_CONFIG', None)
-    if not conf_dir or not conf_file:
-        return False
-
-    log_config = os.path.join(conf_dir, conf_file)
-    try:
-        logging.config.fileConfig(log_config)
-    except ConfigParser.Error as exc:
-        raise cfg.ConfigFileParseError(log_config, str(exc))
-    return True
-
-
-class TestsAdapter(logging.LoggerAdapter):
-
-    def __init__(self, logger, project_name):
-        self.logger = logger
-        self.project = project_name
-        self.regexp = re.compile(r"test_\w+\.py")
-
-    def __getattr__(self, key):
-        return getattr(self.logger, key)
-
-    def _get_test_name(self):
-        frames = inspect.stack()
-        for frame in frames:
-            binary_name = frame[1]
-            if self.regexp.search(binary_name) and 'self' in frame[0].f_locals:
-                return frame[0].f_locals.get('self').id()
-            elif frame[3] == '_run_cleanups':
-                #NOTE(myamazaki): method calling addCleanup
-                return frame[0].f_locals.get('self').case.id()
-            elif frame[3] in ['setUpClass', 'tearDownClass']:
-                #NOTE(myamazaki): setUpClass or tearDownClass
-                return "%s.%s.%s" % (frame[0].f_locals['cls'].__module__,
-                                     frame[0].f_locals['cls'].__name__,
-                                     frame[3])
-        return None
-
-    def process(self, msg, kwargs):
-        if 'extra' not in kwargs:
-            kwargs['extra'] = {}
-        extra = kwargs['extra']
-
-        test_name = self._get_test_name()
-        if test_name:
-            extra.update({'testname': test_name})
-        extra['extra'] = extra.copy()
-
-        return msg, kwargs
-
-
-class TestsFormatter(logging.Formatter):
-    def __init__(self, fmt=None, datefmt=None):
-        super(TestsFormatter, self).__init__()
-        self.default_format = _DEFAULT_LOG_FORMAT
-        self.testname_format =\
-            "%(asctime)s %(levelname)8s [%(testname)s] %(message)s"
-        self.datefmt = _DEFAULT_LOG_DATE_FORMAT
-
-    def format(self, record):
-        extra = record.__dict__.get('extra', None)
-        if extra and 'testname' in extra:
-            self._fmt = self.testname_format
-        else:
-            self._fmt = self.default_format
-        return logging.Formatter.format(self, record)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index e94455d..09b87b2 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -24,8 +24,8 @@
 import re
 import time
 
-from tempest.common import log as logging
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 from tempest.services.compute.xml.common import xml_to_json
 
 # redrive rate limited calls at most twice
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index fd5d3d0..de2bf43 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -1,3 +1,17 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
 import re
 import time
 
diff --git a/tempest/config.py b/tempest/config.py
index 2e56628..19170ae 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -23,10 +23,9 @@
 
 from oslo.config import cfg
 
-from tempest.common import log as logging
 from tempest.common.utils.misc import singleton
+from tempest.openstack.common import log as logging
 
-LOG = logging.getLogger(__name__)
 
 identity_group = cfg.OptGroup(name='identity',
                               title="Keystone Configuration Options")
@@ -160,6 +159,10 @@
     cfg.StrOpt('ssh_user',
                default='root',
                help="User name used to authenticate to an instance."),
+    cfg.IntOpt('ping_timeout',
+               default=60,
+               help="Timeout in seconds to wait for ping to "
+                    "succeed."),
     cfg.IntOpt('ssh_timeout',
                default=300,
                help="Timeout in seconds to wait for authentication to "
@@ -288,7 +291,7 @@
                default="10.100.0.0/16",
                help="The cidr block to allocate tenant networks from"),
     cfg.IntOpt('tenant_network_mask_bits',
-               default=29,
+               default=28,
                help="The mask bits for tenant networks"),
     cfg.BoolOpt('tenant_networks_reachable',
                 default=False,
@@ -302,9 +305,6 @@
                default="",
                help="Id of the public router that provides external "
                     "connectivity"),
-    cfg.BoolOpt('neutron_available',
-                default=False,
-                help="Whether or not neutron is expected to be available"),
 ]
 
 
@@ -394,9 +394,6 @@
     cfg.IntOpt('build_timeout',
                default=300,
                help="Timeout in seconds to wait for a stack to build."),
-    cfg.BoolOpt('heat_available',
-                default=False,
-                help="Whether or not Heat is expected to be available"),
     cfg.StrOpt('instance_type',
                default='m1.micro',
                help="Instance type for tests. Needs to be big enough for a "
@@ -416,6 +413,26 @@
     for opt in OrchestrationGroup:
         conf.register_opt(opt, group='orchestration')
 
+
+dashboard_group = cfg.OptGroup(name="dashboard",
+                               title="Dashboard options")
+
+DashboardGroup = [
+    cfg.StrOpt('dashboard_url',
+               default='http://localhost/',
+               help="Where the dashboard can be found"),
+    cfg.StrOpt('login_url',
+               default='http://localhost/auth/login/',
+               help="Login page for the dashboard"),
+]
+
+
+def register_dashboard_opts(conf):
+    conf.register_group(scenario_group)
+    for opt in DashboardGroup:
+        conf.register_opt(opt, group='dashboard')
+
+
 boto_group = cfg.OptGroup(name='boto',
                           title='EC2/S3 options')
 BotoConfig = [
@@ -538,6 +555,40 @@
         conf.register_opt(opt, group='scenario')
 
 
+service_available_group = cfg.OptGroup(name="service_available",
+                                       title="Available OpenStack Services")
+
+ServiceAvailableGroup = [
+    cfg.BoolOpt('cinder',
+                default=True,
+                help="Whether or not cinder is expected to be available"),
+    cfg.BoolOpt('neutron',
+                default=False,
+                help="Whether or not neutron is expected to be available"),
+    cfg.BoolOpt('glance',
+                default=True,
+                help="Whether or not glance is expected to be available"),
+    cfg.BoolOpt('swift',
+                default=True,
+                help="Whether or not swift is expected to be available"),
+    cfg.BoolOpt('nova',
+                default=True,
+                help="Whether or not nova is expected to be available"),
+    cfg.BoolOpt('heat',
+                default=False,
+                help="Whether or not Heat is expected to be available"),
+    cfg.BoolOpt('horizon',
+                default=True,
+                help="Whether or not Horizon is expected to be available"),
+]
+
+
+def register_service_available_opts(conf):
+    conf.register_group(scenario_group)
+    for opt in ServiceAvailableGroup:
+        conf.register_opt(opt, group='service_available')
+
+
 @singleton
 class TempestConfig:
     """Provides OpenStack configuration information."""
@@ -551,7 +602,6 @@
     def __init__(self):
         """Initialize a configuration from a conf directory and conf file."""
         config_files = []
-
         failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
 
         # Environment variables override defaults...
@@ -566,15 +616,16 @@
                 'TEMPEST_CONFIG' in os.environ):
             path = failsafe_path
 
-        LOG.info("Using tempest config file %s" % path)
-
         if not os.path.exists(path):
-            msg = "Config file %(path)s not found" % locals()
+            msg = "Config file %s not found" % path
             print(RuntimeError(msg), file=sys.stderr)
         else:
             config_files.append(path)
 
         cfg.CONF([], project='tempest', default_config_files=config_files)
+        logging.setup('tempest')
+        LOG = logging.getLogger('tempest')
+        LOG.info("Using tempest config file %s" % path)
 
         register_compute_opts(cfg.CONF)
         register_identity_opts(cfg.CONF)
@@ -584,10 +635,12 @@
         register_volume_opts(cfg.CONF)
         register_object_storage_opts(cfg.CONF)
         register_orchestration_opts(cfg.CONF)
+        register_dashboard_opts(cfg.CONF)
         register_boto_opts(cfg.CONF)
         register_compute_admin_opts(cfg.CONF)
         register_stress_opts(cfg.CONF)
         register_scenario_opts(cfg.CONF)
+        register_service_available_opts(cfg.CONF)
         self.compute = cfg.CONF.compute
         self.whitebox = cfg.CONF.whitebox
         self.identity = cfg.CONF.identity
@@ -596,10 +649,12 @@
         self.volume = cfg.CONF.volume
         self.object_storage = cfg.CONF['object-storage']
         self.orchestration = cfg.CONF.orchestration
+        self.dashboard = cfg.CONF.dashboard
         self.boto = cfg.CONF.boto
         self.compute_admin = cfg.CONF['compute-admin']
         self.stress = cfg.CONF.stress
         self.scenario = cfg.CONF.scenario
+        self.service_available = cfg.CONF.service_available
         if not self.compute_admin.username:
             self.compute_admin.username = self.identity.admin_username
             self.compute_admin.password = self.identity.admin_password
diff --git a/tempest/manager.py b/tempest/manager.py
index 4a447f3..54a0dec 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -15,41 +15,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
 import tempest.config
 from tempest import exceptions
-# Tempest REST Fuzz testing client libs
-from tempest.services.compute.json import extensions_client
-from tempest.services.compute.json import flavors_client
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import hypervisor_client
-from tempest.services.compute.json import images_client
-from tempest.services.compute.json import keypairs_client
-from tempest.services.compute.json import limits_client
-from tempest.services.compute.json import quotas_client
-from tempest.services.compute.json import security_groups_client
-from tempest.services.compute.json import servers_client
-from tempest.services.compute.json import volumes_extensions_client
-from tempest.services.network.json import network_client
-from tempest.services.volume.json import snapshots_client
-from tempest.services.volume.json import volumes_client
-
-NetworkClient = network_client.NetworkClient
-ImagesClient = images_client.ImagesClientJSON
-FlavorsClient = flavors_client.FlavorsClientJSON
-ServersClient = servers_client.ServersClientJSON
-LimitsClient = limits_client.LimitsClientJSON
-ExtensionsClient = extensions_client.ExtensionsClientJSON
-FloatingIPsClient = floating_ips_client.FloatingIPsClientJSON
-SecurityGroupsClient = security_groups_client.SecurityGroupsClientJSON
-KeyPairsClient = keypairs_client.KeyPairsClientJSON
-VolumesExtensionsClient = volumes_extensions_client.VolumesExtensionsClientJSON
-VolumesClient = volumes_client.VolumesClientJSON
-SnapshotsClient = snapshots_client.SnapshotsClientJSON
-QuotasClient = quotas_client.QuotasClientJSON
-HypervisorClient = hypervisor_client.HypervisorClientJSON
-
-LOG = logging.getLogger(__name__)
 
 
 class Manager(object):
@@ -65,100 +32,11 @@
         self.config = tempest.config.TempestConfig()
         self.client_attr_names = []
 
-
-class FuzzClientManager(Manager):
-
-    """
-    Manager class that indicates the client provided by the manager
-    is a fuzz-testing client that Tempest contains. These fuzz-testing
-    clients are used to be able to throw random or invalid data at
-    an endpoint and check for appropriate error messages returned
-    from the endpoint.
-    """
-    pass
-
-
-class ComputeFuzzClientManager(FuzzClientManager):
-
-    """
-    Manager that uses the Tempest REST client that can send
-    random or invalid data at the OpenStack Compute API
-    """
-
-    def __init__(self, username=None, password=None, tenant_name=None):
-        """
-        We allow overriding of the credentials used within the various
-        client classes managed by the Manager object. Left as None, the
-        standard username/password/tenant_name is used.
-
-        :param username: Override of the username
-        :param password: Override of the password
-        :param tenant_name: Override of the tenant name
-        """
-        super(ComputeFuzzClientManager, self).__init__()
-
-        # If no creds are provided, we fall back on the defaults
-        # in the config file for the Compute API.
-        username = username or self.config.identity.username
-        password = password or self.config.identity.password
-        tenant_name = tenant_name or self.config.identity.tenant_name
-
+    # we do this everywhere, have it be part of the super class
+    def _validate_credentials(self, username, password, tenant_name):
         if None in (username, password, tenant_name):
             msg = ("Missing required credentials. "
-                   "username: %(username)s, password: %(password)s, "
-                   "tenant_name: %(tenant_name)s") % locals()
+                   "username: %(u)s, password: %(p)s, "
+                   "tenant_name: %(t)s" %
+                   {'u': username, 'p': password, 't': tenant_name})
             raise exceptions.InvalidConfiguration(msg)
-
-        auth_url = self.config.identity.uri
-
-        # Ensure /tokens is in the URL for Keystone...
-        if 'tokens' not in auth_url:
-            auth_url = auth_url.rstrip('/') + '/tokens'
-
-        client_args = (self.config, username, password, auth_url,
-                       tenant_name)
-
-        self.servers_client = ServersClient(*client_args)
-        self.flavors_client = FlavorsClient(*client_args)
-        self.images_client = ImagesClient(*client_args)
-        self.limits_client = LimitsClient(*client_args)
-        self.extensions_client = ExtensionsClient(*client_args)
-        self.keypairs_client = KeyPairsClient(*client_args)
-        self.security_groups_client = SecurityGroupsClient(*client_args)
-        self.floating_ips_client = FloatingIPsClient(*client_args)
-        self.volumes_extensions_client = VolumesExtensionsClient(*client_args)
-        self.volumes_client = VolumesClient(*client_args)
-        self.snapshots_client = SnapshotsClient(*client_args)
-        self.quotas_client = QuotasClient(*client_args)
-        self.network_client = NetworkClient(*client_args)
-        self.hypervisor_client = HypervisorClient(*client_args)
-
-
-class ComputeFuzzClientAltManager(Manager):
-
-    """
-    Manager object that uses the alt_XXX credentials for its
-    managed client objects
-    """
-
-    def __init__(self):
-        conf = tempest.config.TempestConfig()
-        super(ComputeFuzzClientAltManager, self).__init__(
-            conf.identity.alt_username,
-            conf.identity.alt_password,
-            conf.identity.alt_tenant_name)
-
-
-class ComputeFuzzClientAdminManager(Manager):
-
-    """
-    Manager object that uses the alt_XXX credentials for its
-    managed client objects
-    """
-
-    def __init__(self):
-        conf = tempest.config.TempestConfig()
-        super(ComputeFuzzClientAdminManager, self).__init__(
-            conf.compute_admin.username,
-            conf.compute_admin.password,
-            conf.compute_admin.tenant_name)
diff --git a/tempest/openstack/common/__init__.py b/tempest/openstack/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/openstack/common/__init__.py
diff --git a/tempest/openstack/common/excutils.py b/tempest/openstack/common/excutils.py
new file mode 100644
index 0000000..81aad14
--- /dev/null
+++ b/tempest/openstack/common/excutils.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# Copyright 2012, Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Exception related utilities.
+"""
+
+import logging
+import sys
+import time
+import traceback
+
+from tempest.openstack.common.gettextutils import _  # noqa
+
+
+class save_and_reraise_exception(object):
+    """Save current exception, run some code and then re-raise.
+
+    In some cases the exception context can be cleared, resulting in None
+    being attempted to be re-raised after an exception handler is run. This
+    can happen when eventlet switches greenthreads or when running an
+    exception handler, code raises and catches an exception. In both
+    cases the exception context will be cleared.
+
+    To work around this, we save the exception state, run handler code, and
+    then re-raise the original exception. If another exception occurs, the
+    saved exception is logged and the new exception is re-raised.
+
+    In some cases the caller may not want to re-raise the exception, and
+    for those circumstances this context provides a reraise flag that
+    can be used to suppress the exception.  For example:
+
+    except Exception:
+        with save_and_reraise_exception() as ctxt:
+            decide_if_need_reraise()
+            if not should_be_reraised:
+                ctxt.reraise = False
+    """
+    def __init__(self):
+        self.reraise = True
+
+    def __enter__(self):
+        self.type_, self.value, self.tb, = sys.exc_info()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if exc_type is not None:
+            logging.error(_('Original exception being dropped: %s'),
+                          traceback.format_exception(self.type_,
+                                                     self.value,
+                                                     self.tb))
+            return False
+        if self.reraise:
+            raise self.type_, self.value, self.tb
+
+
+def forever_retry_uncaught_exceptions(infunc):
+    def inner_func(*args, **kwargs):
+        last_log_time = 0
+        last_exc_message = None
+        exc_count = 0
+        while True:
+            try:
+                return infunc(*args, **kwargs)
+            except Exception as exc:
+                if exc.message == last_exc_message:
+                    exc_count += 1
+                else:
+                    exc_count = 1
+                # Do not log any more frequently than once a minute unless
+                # the exception message changes
+                cur_time = int(time.time())
+                if (cur_time - last_log_time > 60 or
+                        exc.message != last_exc_message):
+                    logging.exception(
+                        _('Unexpected exception occurred %d time(s)... '
+                          'retrying.') % exc_count)
+                    last_log_time = cur_time
+                    last_exc_message = exc.message
+                    exc_count = 0
+                # This should be a very rare event. In case it isn't, do
+                # a sleep.
+                time.sleep(1)
+    return inner_func
diff --git a/tempest/openstack/common/fileutils.py b/tempest/openstack/common/fileutils.py
new file mode 100644
index 0000000..d2e3d3e
--- /dev/null
+++ b/tempest/openstack/common/fileutils.py
@@ -0,0 +1,110 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import contextlib
+import errno
+import os
+
+from tempest.openstack.common import excutils
+from tempest.openstack.common.gettextutils import _  # noqa
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+_FILE_CACHE = {}
+
+
+def ensure_tree(path):
+    """Create a directory (and any ancestor directories required)
+
+    :param path: Directory to create
+    """
+    try:
+        os.makedirs(path)
+    except OSError as exc:
+        if exc.errno == errno.EEXIST:
+            if not os.path.isdir(path):
+                raise
+        else:
+            raise
+
+
+def read_cached_file(filename, force_reload=False):
+    """Read from a file if it has been modified.
+
+    :param force_reload: Whether to reload the file.
+    :returns: A tuple with a boolean specifying if the data is fresh
+              or not.
+    """
+    global _FILE_CACHE
+
+    if force_reload and filename in _FILE_CACHE:
+        del _FILE_CACHE[filename]
+
+    reloaded = False
+    mtime = os.path.getmtime(filename)
+    cache_info = _FILE_CACHE.setdefault(filename, {})
+
+    if not cache_info or mtime > cache_info.get('mtime', 0):
+        LOG.debug(_("Reloading cached file %s") % filename)
+        with open(filename) as fap:
+            cache_info['data'] = fap.read()
+        cache_info['mtime'] = mtime
+        reloaded = True
+    return (reloaded, cache_info['data'])
+
+
+def delete_if_exists(path):
+    """Delete a file, but ignore file not found error.
+
+    :param path: File to delete
+    """
+
+    try:
+        os.unlink(path)
+    except OSError as e:
+        if e.errno == errno.ENOENT:
+            return
+        else:
+            raise
+
+
+@contextlib.contextmanager
+def remove_path_on_error(path):
+    """Protect code that wants to operate on PATH atomically.
+    Any exception will cause PATH to be removed.
+
+    :param path: File to work with
+    """
+    try:
+        yield
+    except Exception:
+        with excutils.save_and_reraise_exception():
+            delete_if_exists(path)
+
+
+def file_open(*args, **kwargs):
+    """Open file
+
+    see built-in file() documentation for more details
+
+    Note: The reason this is kept in a separate module is to easily
+    be able to provide a stub module that doesn't alter system
+    state at all (for unit tests)
+    """
+    return file(*args, **kwargs)
diff --git a/tempest/openstack/common/gettextutils.py b/tempest/openstack/common/gettextutils.py
new file mode 100644
index 0000000..8594937
--- /dev/null
+++ b/tempest/openstack/common/gettextutils.py
@@ -0,0 +1,259 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+# All Rights Reserved.
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+gettext for openstack-common modules.
+
+Usual usage in an openstack.common module:
+
+    from tempest.openstack.common.gettextutils import _
+"""
+
+import copy
+import gettext
+import logging.handlers
+import os
+import re
+import UserString
+
+import six
+
+_localedir = os.environ.get('tempest'.upper() + '_LOCALEDIR')
+_t = gettext.translation('tempest', localedir=_localedir, fallback=True)
+
+
+def _(msg):
+    return _t.ugettext(msg)
+
+
+def install(domain):
+    """Install a _() function using the given translation domain.
+
+    Given a translation domain, install a _() function using gettext's
+    install() function.
+
+    The main difference from gettext.install() is that we allow
+    overriding the default localedir (e.g. /usr/share/locale) using
+    a translation-domain-specific environment variable (e.g.
+    NOVA_LOCALEDIR).
+    """
+    gettext.install(domain,
+                    localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
+                    unicode=True)
+
+
+"""
+Lazy gettext functionality.
+
+The following is an attempt to introduce a deferred way
+to do translations on messages in OpenStack. We attempt to
+override the standard _() function and % (format string) operation
+to build Message objects that can later be translated when we have
+more information. Also included is an example LogHandler that
+translates Messages to an associated locale, effectively allowing
+many logs, each with their own locale.
+"""
+
+
+def get_lazy_gettext(domain):
+    """Assemble and return a lazy gettext function for a given domain.
+
+    Factory method for a project/module to get a lazy gettext function
+    for its own translation domain (i.e. nova, glance, cinder, etc.)
+    """
+
+    def _lazy_gettext(msg):
+        """Create and return a Message object.
+
+        Message encapsulates a string so that we can translate it later when
+        needed.
+        """
+        return Message(msg, domain)
+
+    return _lazy_gettext
+
+
+class Message(UserString.UserString, object):
+    """Class used to encapsulate translatable messages."""
+    def __init__(self, msg, domain):
+        # _msg is the gettext msgid and should never change
+        self._msg = msg
+        self._left_extra_msg = ''
+        self._right_extra_msg = ''
+        self.params = None
+        self.locale = None
+        self.domain = domain
+
+    @property
+    def data(self):
+        # NOTE(mrodden): this should always resolve to a unicode string
+        # that best represents the state of the message currently
+
+        localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
+        if self.locale:
+            lang = gettext.translation(self.domain,
+                                       localedir=localedir,
+                                       languages=[self.locale],
+                                       fallback=True)
+        else:
+            # use system locale for translations
+            lang = gettext.translation(self.domain,
+                                       localedir=localedir,
+                                       fallback=True)
+
+        full_msg = (self._left_extra_msg +
+                    lang.ugettext(self._msg) +
+                    self._right_extra_msg)
+
+        if self.params is not None:
+            full_msg = full_msg % self.params
+
+        return six.text_type(full_msg)
+
+    def _save_dictionary_parameter(self, dict_param):
+        full_msg = self.data
+        # look for %(blah) fields in string;
+        # ignore %% and deal with the
+        # case where % is first character on the line
+        keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
+
+        # if we don't find any %(blah) blocks but have a %s
+        if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
+            # apparently the full dictionary is the parameter
+            params = copy.deepcopy(dict_param)
+        else:
+            params = {}
+            for key in keys:
+                try:
+                    params[key] = copy.deepcopy(dict_param[key])
+                except TypeError:
+                    # cast uncopyable thing to unicode string
+                    params[key] = unicode(dict_param[key])
+
+        return params
+
+    def _save_parameters(self, other):
+        # we check for None later to see if
+        # we actually have parameters to inject,
+        # so encapsulate if our parameter is actually None
+        if other is None:
+            self.params = (other, )
+        elif isinstance(other, dict):
+            self.params = self._save_dictionary_parameter(other)
+        else:
+            # fallback to casting to unicode,
+            # this will handle the problematic python code-like
+            # objects that cannot be deep-copied
+            try:
+                self.params = copy.deepcopy(other)
+            except TypeError:
+                self.params = unicode(other)
+
+        return self
+
+    # overrides to be more string-like
+    def __unicode__(self):
+        return self.data
+
+    def __str__(self):
+        return self.data.encode('utf-8')
+
+    def __getstate__(self):
+        to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
+                   'domain', 'params', 'locale']
+        new_dict = self.__dict__.fromkeys(to_copy)
+        for attr in to_copy:
+            new_dict[attr] = copy.deepcopy(self.__dict__[attr])
+
+        return new_dict
+
+    def __setstate__(self, state):
+        for (k, v) in state.items():
+            setattr(self, k, v)
+
+    # operator overloads
+    def __add__(self, other):
+        copied = copy.deepcopy(self)
+        copied._right_extra_msg += other.__str__()
+        return copied
+
+    def __radd__(self, other):
+        copied = copy.deepcopy(self)
+        copied._left_extra_msg += other.__str__()
+        return copied
+
+    def __mod__(self, other):
+        # do a format string to catch and raise
+        # any possible KeyErrors from missing parameters
+        self.data % other
+        copied = copy.deepcopy(self)
+        return copied._save_parameters(other)
+
+    def __mul__(self, other):
+        return self.data * other
+
+    def __rmul__(self, other):
+        return other * self.data
+
+    def __getitem__(self, key):
+        return self.data[key]
+
+    def __getslice__(self, start, end):
+        return self.data.__getslice__(start, end)
+
+    def __getattribute__(self, name):
+        # NOTE(mrodden): handle lossy operations that we can't deal with yet
+        # These override the UserString implementation, since UserString
+        # uses our __class__ attribute to try and build a new message
+        # after running the inner data string through the operation.
+        # At that point, we have lost the gettext message id and can just
+        # safely resolve to a string instead.
+        ops = ['capitalize', 'center', 'decode', 'encode',
+               'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
+               'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
+        if name in ops:
+            return getattr(self.data, name)
+        else:
+            return UserString.UserString.__getattribute__(self, name)
+
+
+class LocaleHandler(logging.Handler):
+    """Handler that can have a locale associated to translate Messages.
+
+    A quick example of how to utilize the Message class above.
+    LocaleHandler takes a locale and a target logging.Handler object
+    to forward LogRecord objects to after translating the internal Message.
+    """
+
+    def __init__(self, locale, target):
+        """Initialize a LocaleHandler
+
+        :param locale: locale to use for translating messages
+        :param target: logging.Handler object to forward
+                       LogRecord objects to after translation
+        """
+        logging.Handler.__init__(self)
+        self.locale = locale
+        self.target = target
+
+    def emit(self, record):
+        if isinstance(record.msg, Message):
+            # set the locale and resolve to a string
+            record.msg.locale = self.locale
+
+        self.target.emit(record)
diff --git a/tempest/openstack/common/importutils.py b/tempest/openstack/common/importutils.py
new file mode 100644
index 0000000..7a303f9
--- /dev/null
+++ b/tempest/openstack/common/importutils.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Import related utilities and helper functions.
+"""
+
+import sys
+import traceback
+
+
+def import_class(import_str):
+    """Returns a class from a string including module and class."""
+    mod_str, _sep, class_str = import_str.rpartition('.')
+    try:
+        __import__(mod_str)
+        return getattr(sys.modules[mod_str], class_str)
+    except (ValueError, AttributeError):
+        raise ImportError('Class %s cannot be found (%s)' %
+                          (class_str,
+                           traceback.format_exception(*sys.exc_info())))
+
+
+def import_object(import_str, *args, **kwargs):
+    """Import a class and return an instance of it."""
+    return import_class(import_str)(*args, **kwargs)
+
+
+def import_object_ns(name_space, import_str, *args, **kwargs):
+    """Tries to import object from default namespace.
+
+    Imports a class and return an instance of it, first by trying
+    to find the class in a default namespace, then failing back to
+    a full path if not found in the default namespace.
+    """
+    import_value = "%s.%s" % (name_space, import_str)
+    try:
+        return import_class(import_value)(*args, **kwargs)
+    except ImportError:
+        return import_class(import_str)(*args, **kwargs)
+
+
+def import_module(import_str):
+    """Import a module."""
+    __import__(import_str)
+    return sys.modules[import_str]
+
+
+def try_import(import_str, default=None):
+    """Try to import a module and if it fails return default."""
+    try:
+        return import_module(import_str)
+    except ImportError:
+        return default
diff --git a/tempest/openstack/common/jsonutils.py b/tempest/openstack/common/jsonutils.py
new file mode 100644
index 0000000..bd43e59
--- /dev/null
+++ b/tempest/openstack/common/jsonutils.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+'''
+JSON related utilities.
+
+This module provides a few things:
+
+    1) A handy function for getting an object down to something that can be
+    JSON serialized.  See to_primitive().
+
+    2) Wrappers around loads() and dumps().  The dumps() wrapper will
+    automatically use to_primitive() for you if needed.
+
+    3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
+    is available.
+'''
+
+
+import datetime
+import functools
+import inspect
+import itertools
+import json
+import types
+import xmlrpclib
+
+import netaddr
+import six
+
+from tempest.openstack.common import timeutils
+
+
+_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
+                     inspect.isfunction, inspect.isgeneratorfunction,
+                     inspect.isgenerator, inspect.istraceback, inspect.isframe,
+                     inspect.iscode, inspect.isbuiltin, inspect.isroutine,
+                     inspect.isabstract]
+
+_simple_types = (types.NoneType, int, basestring, bool, float, long)
+
+
+def to_primitive(value, convert_instances=False, convert_datetime=True,
+                 level=0, max_depth=3):
+    """Convert a complex object into primitives.
+
+    Handy for JSON serialization. We can optionally handle instances,
+    but since this is a recursive function, we could have cyclical
+    data structures.
+
+    To handle cyclical data structures we could track the actual objects
+    visited in a set, but not all objects are hashable. Instead we just
+    track the depth of the object inspections and don't go too deep.
+
+    Therefore, convert_instances=True is lossy ... be aware.
+
+    """
+    # handle obvious types first - order of basic types determined by running
+    # full tests on nova project, resulting in the following counts:
+    # 572754 <type 'NoneType'>
+    # 460353 <type 'int'>
+    # 379632 <type 'unicode'>
+    # 274610 <type 'str'>
+    # 199918 <type 'dict'>
+    # 114200 <type 'datetime.datetime'>
+    #  51817 <type 'bool'>
+    #  26164 <type 'list'>
+    #   6491 <type 'float'>
+    #    283 <type 'tuple'>
+    #     19 <type 'long'>
+    if isinstance(value, _simple_types):
+        return value
+
+    if isinstance(value, datetime.datetime):
+        if convert_datetime:
+            return timeutils.strtime(value)
+        else:
+            return value
+
+    # value of itertools.count doesn't get caught by nasty_type_tests
+    # and results in infinite loop when list(value) is called.
+    if type(value) == itertools.count:
+        return six.text_type(value)
+
+    # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
+    #              tests that raise an exception in a mocked method that
+    #              has a @wrap_exception with a notifier will fail. If
+    #              we up the dependency to 0.5.4 (when it is released) we
+    #              can remove this workaround.
+    if getattr(value, '__module__', None) == 'mox':
+        return 'mock'
+
+    if level > max_depth:
+        return '?'
+
+    # The try block may not be necessary after the class check above,
+    # but just in case ...
+    try:
+        recursive = functools.partial(to_primitive,
+                                      convert_instances=convert_instances,
+                                      convert_datetime=convert_datetime,
+                                      level=level,
+                                      max_depth=max_depth)
+        if isinstance(value, dict):
+            return dict((k, recursive(v)) for k, v in value.iteritems())
+        elif isinstance(value, (list, tuple)):
+            return [recursive(lv) for lv in value]
+
+        # It's not clear why xmlrpclib created their own DateTime type, but
+        # for our purposes, make it a datetime type which is explicitly
+        # handled
+        if isinstance(value, xmlrpclib.DateTime):
+            value = datetime.datetime(*tuple(value.timetuple())[:6])
+
+        if convert_datetime and isinstance(value, datetime.datetime):
+            return timeutils.strtime(value)
+        elif hasattr(value, 'iteritems'):
+            return recursive(dict(value.iteritems()), level=level + 1)
+        elif hasattr(value, '__iter__'):
+            return recursive(list(value))
+        elif convert_instances and hasattr(value, '__dict__'):
+            # Likely an instance of something. Watch for cycles.
+            # Ignore class member vars.
+            return recursive(value.__dict__, level=level + 1)
+        elif isinstance(value, netaddr.IPAddress):
+            return six.text_type(value)
+        else:
+            if any(test(value) for test in _nasty_type_tests):
+                return six.text_type(value)
+            return value
+    except TypeError:
+        # Class objects are tricky since they may define something like
+        # __iter__ defined but it isn't callable as list().
+        return six.text_type(value)
+
+
+def dumps(value, default=to_primitive, **kwargs):
+    return json.dumps(value, default=default, **kwargs)
+
+
+def loads(s):
+    return json.loads(s)
+
+
+def load(s):
+    return json.load(s)
+
+
+try:
+    import anyjson
+except ImportError:
+    pass
+else:
+    anyjson._modules.append((__name__, 'dumps', TypeError,
+                                       'loads', ValueError, 'load'))
+    anyjson.force_implementation(__name__)
diff --git a/tempest/openstack/common/local.py b/tempest/openstack/common/local.py
new file mode 100644
index 0000000..f1bfc82
--- /dev/null
+++ b/tempest/openstack/common/local.py
@@ -0,0 +1,48 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Greenthread local storage of variables using weak references"""
+
+import weakref
+
+from eventlet import corolocal
+
+
+class WeakLocal(corolocal.local):
+    def __getattribute__(self, attr):
+        rval = corolocal.local.__getattribute__(self, attr)
+        if rval:
+            # NOTE(mikal): this bit is confusing. What is stored is a weak
+            # reference, not the value itself. We therefore need to lookup
+            # the weak reference and return the inner value here.
+            rval = rval()
+        return rval
+
+    def __setattr__(self, attr, value):
+        value = weakref.ref(value)
+        return corolocal.local.__setattr__(self, attr, value)
+
+
+# NOTE(mikal): the name "store" should be deprecated in the future
+store = WeakLocal()
+
+# A "weak" store uses weak references and allows an object to fall out of scope
+# when it falls out of scope in the code that uses the thread local storage. A
+# "strong" store will hold a reference to the object so that it never falls out
+# of scope.
+weak_store = WeakLocal()
+strong_store = corolocal.local
diff --git a/tempest/openstack/common/lockutils.py b/tempest/openstack/common/lockutils.py
new file mode 100644
index 0000000..3ff1a7a
--- /dev/null
+++ b/tempest/openstack/common/lockutils.py
@@ -0,0 +1,276 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import contextlib
+import errno
+import functools
+import os
+import time
+import weakref
+
+from eventlet import semaphore
+from oslo.config import cfg
+
+from tempest.openstack.common import fileutils
+from tempest.openstack.common.gettextutils import _  # noqa
+from tempest.openstack.common import local
+from tempest.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+util_opts = [
+    cfg.BoolOpt('disable_process_locking', default=False,
+                help='Whether to disable inter-process locks'),
+    cfg.StrOpt('lock_path',
+               help=('Directory to use for lock files.'))
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(util_opts)
+
+
+def set_defaults(lock_path):
+    cfg.set_defaults(util_opts, lock_path=lock_path)
+
+
+class _InterProcessLock(object):
+    """Lock implementation which allows multiple locks, working around
+    issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
+    not require any cleanup. Since the lock is always held on a file
+    descriptor rather than outside of the process, the lock gets dropped
+    automatically if the process crashes, even if __exit__ is not executed.
+
+    There are no guarantees regarding usage by multiple green threads in a
+    single process here. This lock works only between processes. Exclusive
+    access between local threads should be achieved using the semaphores
+    in the @synchronized decorator.
+
+    Note these locks are released when the descriptor is closed, so it's not
+    safe to close the file descriptor while another green thread holds the
+    lock. Just opening and closing the lock file can break synchronisation,
+    so lock files must be accessed only using this abstraction.
+    """
+
+    def __init__(self, name):
+        self.lockfile = None
+        self.fname = name
+
+    def __enter__(self):
+        self.lockfile = open(self.fname, 'w')
+
+        while True:
+            try:
+                # Using non-blocking locks since green threads are not
+                # patched to deal with blocking locking calls.
+                # Also upon reading the MSDN docs for locking(), it seems
+                # to have a laughable 10 attempts "blocking" mechanism.
+                self.trylock()
+                return self
+            except IOError as e:
+                if e.errno in (errno.EACCES, errno.EAGAIN):
+                    # external locks synchronise things like iptables
+                    # updates - give it some time to prevent busy spinning
+                    time.sleep(0.01)
+                else:
+                    raise
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        try:
+            self.unlock()
+            self.lockfile.close()
+        except IOError:
+            LOG.exception(_("Could not release the acquired lock `%s`"),
+                          self.fname)
+
+    def trylock(self):
+        raise NotImplementedError()
+
+    def unlock(self):
+        raise NotImplementedError()
+
+
+class _WindowsLock(_InterProcessLock):
+    def trylock(self):
+        msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
+
+    def unlock(self):
+        msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
+
+
+class _PosixLock(_InterProcessLock):
+    def trylock(self):
+        fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+    def unlock(self):
+        fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
+
+
+if os.name == 'nt':
+    import msvcrt
+    InterProcessLock = _WindowsLock
+else:
+    import fcntl
+    InterProcessLock = _PosixLock
+
+_semaphores = weakref.WeakValueDictionary()
+
+
+@contextlib.contextmanager
+def lock(name, lock_file_prefix=None, external=False, lock_path=None):
+    """Context based lock
+
+    This function yields a `semaphore.Semaphore` instance unless external is
+    True, in which case, it'll yield an InterProcessLock instance.
+
+    :param lock_file_prefix: The lock_file_prefix argument is used to provide
+    lock files on disk with a meaningful prefix.
+
+    :param external: The external keyword argument denotes whether this lock
+    should work across multiple processes. This means that if two different
+    workers both run a a method decorated with @synchronized('mylock',
+    external=True), only one of them will execute at a time.
+
+    :param lock_path: The lock_path keyword argument is used to specify a
+    special location for external lock files to live. If nothing is set, then
+    CONF.lock_path is used as a default.
+    """
+    # NOTE(soren): If we ever go natively threaded, this will be racy.
+    #              See http://stackoverflow.com/questions/5390569/dyn
+    #              amically-allocating-and-destroying-mutexes
+    sem = _semaphores.get(name, semaphore.Semaphore())
+    if name not in _semaphores:
+        # this check is not racy - we're already holding ref locally
+        # so GC won't remove the item and there was no IO switch
+        # (only valid in greenthreads)
+        _semaphores[name] = sem
+
+    with sem:
+        LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
+
+        # NOTE(mikal): I know this looks odd
+        if not hasattr(local.strong_store, 'locks_held'):
+            local.strong_store.locks_held = []
+        local.strong_store.locks_held.append(name)
+
+        try:
+            if external and not CONF.disable_process_locking:
+                LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
+                          {'lock': name})
+
+                # We need a copy of lock_path because it is non-local
+                local_lock_path = lock_path or CONF.lock_path
+                if not local_lock_path:
+                    raise cfg.RequiredOptError('lock_path')
+
+                if not os.path.exists(local_lock_path):
+                    fileutils.ensure_tree(local_lock_path)
+                    LOG.info(_('Created lock path: %s'), local_lock_path)
+
+                def add_prefix(name, prefix):
+                    if not prefix:
+                        return name
+                    sep = '' if prefix.endswith('-') else '-'
+                    return '%s%s%s' % (prefix, sep, name)
+
+                # NOTE(mikal): the lock name cannot contain directory
+                # separators
+                lock_file_name = add_prefix(name.replace(os.sep, '_'),
+                                            lock_file_prefix)
+
+                lock_file_path = os.path.join(local_lock_path, lock_file_name)
+
+                try:
+                    lock = InterProcessLock(lock_file_path)
+                    with lock as lock:
+                        LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
+                                  {'lock': name, 'path': lock_file_path})
+                        yield lock
+                finally:
+                    LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
+                              {'lock': name, 'path': lock_file_path})
+            else:
+                yield sem
+
+        finally:
+            local.strong_store.locks_held.remove(name)
+
+
+def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
+    """Synchronization decorator.
+
+    Decorating a method like so::
+
+        @synchronized('mylock')
+        def foo(self, *args):
+           ...
+
+    ensures that only one thread will execute the foo method at a time.
+
+    Different methods can share the same lock::
+
+        @synchronized('mylock')
+        def foo(self, *args):
+           ...
+
+        @synchronized('mylock')
+        def bar(self, *args):
+           ...
+
+    This way only one of either foo or bar can be executing at a time.
+    """
+
+    def wrap(f):
+        @functools.wraps(f)
+        def inner(*args, **kwargs):
+            with lock(name, lock_file_prefix, external, lock_path):
+                LOG.debug(_('Got semaphore / lock "%(function)s"'),
+                          {'function': f.__name__})
+                return f(*args, **kwargs)
+
+            LOG.debug(_('Semaphore / lock released "%(function)s"'),
+                      {'function': f.__name__})
+        return inner
+    return wrap
+
+
+def synchronized_with_prefix(lock_file_prefix):
+    """Partial object generator for the synchronization decorator.
+
+    Redefine @synchronized in each project like so::
+
+        (in nova/utils.py)
+        from nova.openstack.common import lockutils
+
+        synchronized = lockutils.synchronized_with_prefix('nova-')
+
+
+        (in nova/foo.py)
+        from nova import utils
+
+        @utils.synchronized('mylock')
+        def bar(self, *args):
+           ...
+
+    The lock_file_prefix argument is used to provide lock files on disk with a
+    meaningful prefix.
+    """
+
+    return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
diff --git a/tempest/openstack/common/log.py b/tempest/openstack/common/log.py
new file mode 100644
index 0000000..4133c30
--- /dev/null
+++ b/tempest/openstack/common/log.py
@@ -0,0 +1,559 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Openstack logging handler.
+
+This module adds to logging functionality by adding the option to specify
+a context object when calling the various log methods.  If the context object
+is not specified, default formatting is used. Additionally, an instance uuid
+may be passed as part of the log message, which is intended to make it easier
+for admins to find messages related to a specific instance.
+
+It also allows setting of formatting information through conf.
+
+"""
+
+import inspect
+import itertools
+import logging
+import logging.config
+import logging.handlers
+import os
+import sys
+import traceback
+
+from oslo.config import cfg
+from six import moves
+
+from tempest.openstack.common.gettextutils import _  # noqa
+from tempest.openstack.common import importutils
+from tempest.openstack.common import jsonutils
+from tempest.openstack.common import local
+
+
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+common_cli_opts = [
+    cfg.BoolOpt('debug',
+                short='d',
+                default=False,
+                help='Print debugging output (set logging level to '
+                     'DEBUG instead of default WARNING level).'),
+    cfg.BoolOpt('verbose',
+                short='v',
+                default=False,
+                help='Print more verbose output (set logging level to '
+                     'INFO instead of default WARNING level).'),
+]
+
+logging_cli_opts = [
+    cfg.StrOpt('log-config',
+               metavar='PATH',
+               help='If this option is specified, the logging configuration '
+                    'file specified is used and overrides any other logging '
+                    'options specified. Please see the Python logging module '
+                    'documentation for details on logging configuration '
+                    'files.'),
+    cfg.StrOpt('log-format',
+               default=None,
+               metavar='FORMAT',
+               help='DEPRECATED. '
+                    'A logging.Formatter log message format string which may '
+                    'use any of the available logging.LogRecord attributes. '
+                    'This option is deprecated.  Please use '
+                    'logging_context_format_string and '
+                    'logging_default_format_string instead.'),
+    cfg.StrOpt('log-date-format',
+               default=_DEFAULT_LOG_DATE_FORMAT,
+               metavar='DATE_FORMAT',
+               help='Format string for %%(asctime)s in log records. '
+                    'Default: %(default)s'),
+    cfg.StrOpt('log-file',
+               metavar='PATH',
+               deprecated_name='logfile',
+               help='(Optional) Name of log file to output to. '
+                    'If no default is set, logging will go to stdout.'),
+    cfg.StrOpt('log-dir',
+               deprecated_name='logdir',
+               help='(Optional) The base directory used for relative '
+                    '--log-file paths'),
+    cfg.BoolOpt('use-syslog',
+                default=False,
+                help='Use syslog for logging.'),
+    cfg.StrOpt('syslog-log-facility',
+               default='LOG_USER',
+               help='syslog facility to receive log lines')
+]
+
+generic_log_opts = [
+    cfg.BoolOpt('use_stderr',
+                default=True,
+                help='Log output to standard error')
+]
+
+log_opts = [
+    cfg.StrOpt('logging_context_format_string',
+               default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
+                       '%(name)s [%(request_id)s %(user)s %(tenant)s] '
+                       '%(instance)s%(message)s',
+               help='format string to use for log messages with context'),
+    cfg.StrOpt('logging_default_format_string',
+               default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
+                       '%(name)s [-] %(instance)s%(message)s',
+               help='format string to use for log messages without context'),
+    cfg.StrOpt('logging_debug_format_suffix',
+               default='%(funcName)s %(pathname)s:%(lineno)d',
+               help='data to append to log format when level is DEBUG'),
+    cfg.StrOpt('logging_exception_prefix',
+               default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
+               '%(instance)s',
+               help='prefix each line of exception output with this format'),
+    cfg.ListOpt('default_log_levels',
+                default=[
+                    'amqplib=WARN',
+                    'sqlalchemy=WARN',
+                    'boto=WARN',
+                    'suds=INFO',
+                    'keystone=INFO',
+                    'eventlet.wsgi.server=WARN'
+                ],
+                help='list of logger=LEVEL pairs'),
+    cfg.BoolOpt('publish_errors',
+                default=False,
+                help='publish error events'),
+    cfg.BoolOpt('fatal_deprecations',
+                default=False,
+                help='make deprecations fatal'),
+
+    # NOTE(mikal): there are two options here because sometimes we are handed
+    # a full instance (and could include more information), and other times we
+    # are just handed a UUID for the instance.
+    cfg.StrOpt('instance_format',
+               default='[instance: %(uuid)s] ',
+               help='If an instance is passed with the log message, format '
+                    'it like this'),
+    cfg.StrOpt('instance_uuid_format',
+               default='[instance: %(uuid)s] ',
+               help='If an instance UUID is passed with the log message, '
+                    'format it like this'),
+]
+
+CONF = cfg.CONF
+CONF.register_cli_opts(common_cli_opts)
+CONF.register_cli_opts(logging_cli_opts)
+CONF.register_opts(generic_log_opts)
+CONF.register_opts(log_opts)
+
+# our new audit level
+# NOTE(jkoelker) Since we synthesized an audit level, make the logging
+#                module aware of it so it acts like other levels.
+logging.AUDIT = logging.INFO + 1
+logging.addLevelName(logging.AUDIT, 'AUDIT')
+
+
+try:
+    NullHandler = logging.NullHandler
+except AttributeError:  # NOTE(jkoelker) NullHandler added in Python 2.7
+    class NullHandler(logging.Handler):
+        def handle(self, record):
+            pass
+
+        def emit(self, record):
+            pass
+
+        def createLock(self):
+            self.lock = None
+
+
+def _dictify_context(context):
+    if context is None:
+        return None
+    if not isinstance(context, dict) and getattr(context, 'to_dict', None):
+        context = context.to_dict()
+    return context
+
+
+def _get_binary_name():
+    return os.path.basename(inspect.stack()[-1][1])
+
+
+def _get_log_file_path(binary=None):
+    logfile = CONF.log_file
+    logdir = CONF.log_dir
+
+    if logfile and not logdir:
+        return logfile
+
+    if logfile and logdir:
+        return os.path.join(logdir, logfile)
+
+    if logdir:
+        binary = binary or _get_binary_name()
+        return '%s.log' % (os.path.join(logdir, binary),)
+
+
+class BaseLoggerAdapter(logging.LoggerAdapter):
+
+    def audit(self, msg, *args, **kwargs):
+        self.log(logging.AUDIT, msg, *args, **kwargs)
+
+
+class LazyAdapter(BaseLoggerAdapter):
+    def __init__(self, name='unknown', version='unknown'):
+        self._logger = None
+        self.extra = {}
+        self.name = name
+        self.version = version
+
+    @property
+    def logger(self):
+        if not self._logger:
+            self._logger = getLogger(self.name, self.version)
+        return self._logger
+
+
+class ContextAdapter(BaseLoggerAdapter):
+    warn = logging.LoggerAdapter.warning
+
+    def __init__(self, logger, project_name, version_string):
+        self.logger = logger
+        self.project = project_name
+        self.version = version_string
+
+    @property
+    def handlers(self):
+        return self.logger.handlers
+
+    def deprecated(self, msg, *args, **kwargs):
+        stdmsg = _("Deprecated: %s") % msg
+        if CONF.fatal_deprecations:
+            self.critical(stdmsg, *args, **kwargs)
+            raise DeprecatedConfig(msg=stdmsg)
+        else:
+            self.warn(stdmsg, *args, **kwargs)
+
+    def process(self, msg, kwargs):
+        if 'extra' not in kwargs:
+            kwargs['extra'] = {}
+        extra = kwargs['extra']
+
+        context = kwargs.pop('context', None)
+        if not context:
+            context = getattr(local.store, 'context', None)
+        if context:
+            extra.update(_dictify_context(context))
+
+        instance = kwargs.pop('instance', None)
+        instance_extra = ''
+        if instance:
+            instance_extra = CONF.instance_format % instance
+        else:
+            instance_uuid = kwargs.pop('instance_uuid', None)
+            if instance_uuid:
+                instance_extra = (CONF.instance_uuid_format
+                                  % {'uuid': instance_uuid})
+        extra.update({'instance': instance_extra})
+
+        extra.update({"project": self.project})
+        extra.update({"version": self.version})
+        extra['extra'] = extra.copy()
+        return msg, kwargs
+
+
+class JSONFormatter(logging.Formatter):
+    def __init__(self, fmt=None, datefmt=None):
+        # NOTE(jkoelker) we ignore the fmt argument, but its still there
+        #                since logging.config.fileConfig passes it.
+        self.datefmt = datefmt
+
+    def formatException(self, ei, strip_newlines=True):
+        lines = traceback.format_exception(*ei)
+        if strip_newlines:
+            lines = [itertools.ifilter(
+                lambda x: x,
+                line.rstrip().splitlines()) for line in lines]
+            lines = list(itertools.chain(*lines))
+        return lines
+
+    def format(self, record):
+        message = {'message': record.getMessage(),
+                   'asctime': self.formatTime(record, self.datefmt),
+                   'name': record.name,
+                   'msg': record.msg,
+                   'args': record.args,
+                   'levelname': record.levelname,
+                   'levelno': record.levelno,
+                   'pathname': record.pathname,
+                   'filename': record.filename,
+                   'module': record.module,
+                   'lineno': record.lineno,
+                   'funcname': record.funcName,
+                   'created': record.created,
+                   'msecs': record.msecs,
+                   'relative_created': record.relativeCreated,
+                   'thread': record.thread,
+                   'thread_name': record.threadName,
+                   'process_name': record.processName,
+                   'process': record.process,
+                   'traceback': None}
+
+        if hasattr(record, 'extra'):
+            message['extra'] = record.extra
+
+        if record.exc_info:
+            message['traceback'] = self.formatException(record.exc_info)
+
+        return jsonutils.dumps(message)
+
+
+def _create_logging_excepthook(product_name):
+    def logging_excepthook(type, value, tb):
+        extra = {}
+        if CONF.verbose:
+            extra['exc_info'] = (type, value, tb)
+        getLogger(product_name).critical(str(value), **extra)
+    return logging_excepthook
+
+
+class LogConfigError(Exception):
+
+    message = _('Error loading logging config %(log_config)s: %(err_msg)s')
+
+    def __init__(self, log_config, err_msg):
+        self.log_config = log_config
+        self.err_msg = err_msg
+
+    def __str__(self):
+        return self.message % dict(log_config=self.log_config,
+                                   err_msg=self.err_msg)
+
+
+def _load_log_config(log_config):
+    try:
+        logging.config.fileConfig(log_config)
+    except moves.configparser.Error as exc:
+        raise LogConfigError(log_config, str(exc))
+
+
+def setup(product_name):
+    """Setup logging."""
+    if CONF.log_config:
+        _load_log_config(CONF.log_config)
+    else:
+        _setup_logging_from_conf()
+    sys.excepthook = _create_logging_excepthook(product_name)
+
+
+def set_defaults(logging_context_format_string):
+    cfg.set_defaults(log_opts,
+                     logging_context_format_string=
+                     logging_context_format_string)
+
+
+def _find_facility_from_conf():
+    facility_names = logging.handlers.SysLogHandler.facility_names
+    facility = getattr(logging.handlers.SysLogHandler,
+                       CONF.syslog_log_facility,
+                       None)
+
+    if facility is None and CONF.syslog_log_facility in facility_names:
+        facility = facility_names.get(CONF.syslog_log_facility)
+
+    if facility is None:
+        valid_facilities = facility_names.keys()
+        consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
+                  'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
+                  'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
+                  'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
+                  'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
+        valid_facilities.extend(consts)
+        raise TypeError(_('syslog facility must be one of: %s') %
+                        ', '.join("'%s'" % fac
+                                  for fac in valid_facilities))
+
+    return facility
+
+
+def _setup_logging_from_conf():
+    log_root = getLogger(None).logger
+    for handler in log_root.handlers:
+        log_root.removeHandler(handler)
+
+    if CONF.use_syslog:
+        facility = _find_facility_from_conf()
+        syslog = logging.handlers.SysLogHandler(address='/dev/log',
+                                                facility=facility)
+        log_root.addHandler(syslog)
+
+    logpath = _get_log_file_path()
+    if logpath:
+        filelog = logging.handlers.WatchedFileHandler(logpath)
+        log_root.addHandler(filelog)
+
+    if CONF.use_stderr:
+        streamlog = ColorHandler()
+        log_root.addHandler(streamlog)
+
+    elif not CONF.log_file:
+        # pass sys.stdout as a positional argument
+        # python2.6 calls the argument strm, in 2.7 it's stream
+        streamlog = logging.StreamHandler(sys.stdout)
+        log_root.addHandler(streamlog)
+
+    if CONF.publish_errors:
+        handler = importutils.import_object(
+            "tempest.openstack.common.log_handler.PublishErrorsHandler",
+            logging.ERROR)
+        log_root.addHandler(handler)
+
+    datefmt = CONF.log_date_format
+    for handler in log_root.handlers:
+        # NOTE(alaski): CONF.log_format overrides everything currently.  This
+        # should be deprecated in favor of context aware formatting.
+        if CONF.log_format:
+            handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
+                                                   datefmt=datefmt))
+            log_root.info('Deprecated: log_format is now deprecated and will '
+                          'be removed in the next release')
+        else:
+            handler.setFormatter(ContextFormatter(datefmt=datefmt))
+
+    if CONF.debug:
+        log_root.setLevel(logging.DEBUG)
+    elif CONF.verbose:
+        log_root.setLevel(logging.INFO)
+    else:
+        log_root.setLevel(logging.WARNING)
+
+    for pair in CONF.default_log_levels:
+        mod, _sep, level_name = pair.partition('=')
+        level = logging.getLevelName(level_name)
+        logger = logging.getLogger(mod)
+        logger.setLevel(level)
+
+_loggers = {}
+
+
+def getLogger(name='unknown', version='unknown'):
+    if name not in _loggers:
+        _loggers[name] = ContextAdapter(logging.getLogger(name),
+                                        name,
+                                        version)
+    return _loggers[name]
+
+
+def getLazyLogger(name='unknown', version='unknown'):
+    """Returns lazy logger.
+
+    Creates a pass-through logger that does not create the real logger
+    until it is really needed and delegates all calls to the real logger
+    once it is created.
+    """
+    return LazyAdapter(name, version)
+
+
+class WritableLogger(object):
+    """A thin wrapper that responds to `write` and logs."""
+
+    def __init__(self, logger, level=logging.INFO):
+        self.logger = logger
+        self.level = level
+
+    def write(self, msg):
+        self.logger.log(self.level, msg)
+
+
+class ContextFormatter(logging.Formatter):
+    """A context.RequestContext aware formatter configured through flags.
+
+    The flags used to set format strings are: logging_context_format_string
+    and logging_default_format_string.  You can also specify
+    logging_debug_format_suffix to append extra formatting if the log level is
+    debug.
+
+    For information about what variables are available for the formatter see:
+    http://docs.python.org/library/logging.html#formatter
+
+    """
+
+    def format(self, record):
+        """Uses contextstring if request_id is set, otherwise default."""
+        # NOTE(sdague): default the fancier formating params
+        # to an empty string so we don't throw an exception if
+        # they get used
+        for key in ('instance', 'color'):
+            if key not in record.__dict__:
+                record.__dict__[key] = ''
+
+        if record.__dict__.get('request_id', None):
+            self._fmt = CONF.logging_context_format_string
+        else:
+            self._fmt = CONF.logging_default_format_string
+
+        if (record.levelno == logging.DEBUG and
+                CONF.logging_debug_format_suffix):
+            self._fmt += " " + CONF.logging_debug_format_suffix
+
+        # Cache this on the record, Logger will respect our formated copy
+        if record.exc_info:
+            record.exc_text = self.formatException(record.exc_info, record)
+        return logging.Formatter.format(self, record)
+
+    def formatException(self, exc_info, record=None):
+        """Format exception output with CONF.logging_exception_prefix."""
+        if not record:
+            return logging.Formatter.formatException(self, exc_info)
+
+        stringbuffer = moves.StringIO()
+        traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
+                                  None, stringbuffer)
+        lines = stringbuffer.getvalue().split('\n')
+        stringbuffer.close()
+
+        if CONF.logging_exception_prefix.find('%(asctime)') != -1:
+            record.asctime = self.formatTime(record, self.datefmt)
+
+        formatted_lines = []
+        for line in lines:
+            pl = CONF.logging_exception_prefix % record.__dict__
+            fl = '%s%s' % (pl, line)
+            formatted_lines.append(fl)
+        return '\n'.join(formatted_lines)
+
+
+class ColorHandler(logging.StreamHandler):
+    LEVEL_COLORS = {
+        logging.DEBUG: '\033[00;32m',  # GREEN
+        logging.INFO: '\033[00;36m',  # CYAN
+        logging.AUDIT: '\033[01;36m',  # BOLD CYAN
+        logging.WARN: '\033[01;33m',  # BOLD YELLOW
+        logging.ERROR: '\033[01;31m',  # BOLD RED
+        logging.CRITICAL: '\033[01;31m',  # BOLD RED
+    }
+
+    def format(self, record):
+        record.color = self.LEVEL_COLORS[record.levelno]
+        return logging.StreamHandler.format(self, record)
+
+
+class DeprecatedConfig(Exception):
+    message = _("Fatal call to deprecated config: %(msg)s")
+
+    def __init__(self, msg):
+        super(Exception, self).__init__(self.message % dict(msg=msg))
diff --git a/tempest/openstack/common/timeutils.py b/tempest/openstack/common/timeutils.py
new file mode 100644
index 0000000..bd60489
--- /dev/null
+++ b/tempest/openstack/common/timeutils.py
@@ -0,0 +1,188 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Time related utilities and helper functions.
+"""
+
+import calendar
+import datetime
+
+import iso8601
+import six
+
+
+# ISO 8601 extended time format with microseconds
+_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
+_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
+PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
+
+
+def isotime(at=None, subsecond=False):
+    """Stringify time in ISO 8601 format."""
+    if not at:
+        at = utcnow()
+    st = at.strftime(_ISO8601_TIME_FORMAT
+                     if not subsecond
+                     else _ISO8601_TIME_FORMAT_SUBSECOND)
+    tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
+    st += ('Z' if tz == 'UTC' else tz)
+    return st
+
+
+def parse_isotime(timestr):
+    """Parse time from ISO 8601 format."""
+    try:
+        return iso8601.parse_date(timestr)
+    except iso8601.ParseError as e:
+        raise ValueError(e.message)
+    except TypeError as e:
+        raise ValueError(e.message)
+
+
+def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
+    """Returns formatted utcnow."""
+    if not at:
+        at = utcnow()
+    return at.strftime(fmt)
+
+
+def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
+    """Turn a formatted time back into a datetime."""
+    return datetime.datetime.strptime(timestr, fmt)
+
+
+def normalize_time(timestamp):
+    """Normalize time in arbitrary timezone to UTC naive object."""
+    offset = timestamp.utcoffset()
+    if offset is None:
+        return timestamp
+    return timestamp.replace(tzinfo=None) - offset
+
+
+def is_older_than(before, seconds):
+    """Return True if before is older than seconds."""
+    if isinstance(before, six.string_types):
+        before = parse_strtime(before).replace(tzinfo=None)
+    return utcnow() - before > datetime.timedelta(seconds=seconds)
+
+
+def is_newer_than(after, seconds):
+    """Return True if after is newer than seconds."""
+    if isinstance(after, six.string_types):
+        after = parse_strtime(after).replace(tzinfo=None)
+    return after - utcnow() > datetime.timedelta(seconds=seconds)
+
+
+def utcnow_ts():
+    """Timestamp version of our utcnow function."""
+    return calendar.timegm(utcnow().timetuple())
+
+
+def utcnow():
+    """Overridable version of utils.utcnow."""
+    if utcnow.override_time:
+        try:
+            return utcnow.override_time.pop(0)
+        except AttributeError:
+            return utcnow.override_time
+    return datetime.datetime.utcnow()
+
+
+def iso8601_from_timestamp(timestamp):
+    """Returns a iso8601 formated date from timestamp."""
+    return isotime(datetime.datetime.utcfromtimestamp(timestamp))
+
+
+utcnow.override_time = None
+
+
+def set_time_override(override_time=datetime.datetime.utcnow()):
+    """Overrides utils.utcnow.
+
+    Make it return a constant time or a list thereof, one at a time.
+    """
+    utcnow.override_time = override_time
+
+
+def advance_time_delta(timedelta):
+    """Advance overridden time using a datetime.timedelta."""
+    assert(not utcnow.override_time is None)
+    try:
+        for dt in utcnow.override_time:
+            dt += timedelta
+    except TypeError:
+        utcnow.override_time += timedelta
+
+
+def advance_time_seconds(seconds):
+    """Advance overridden time by seconds."""
+    advance_time_delta(datetime.timedelta(0, seconds))
+
+
+def clear_time_override():
+    """Remove the overridden time."""
+    utcnow.override_time = None
+
+
+def marshall_now(now=None):
+    """Make an rpc-safe datetime with microseconds.
+
+    Note: tzinfo is stripped, but not required for relative times.
+    """
+    if not now:
+        now = utcnow()
+    return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
+                minute=now.minute, second=now.second,
+                microsecond=now.microsecond)
+
+
+def unmarshall_time(tyme):
+    """Unmarshall a datetime dict."""
+    return datetime.datetime(day=tyme['day'],
+                             month=tyme['month'],
+                             year=tyme['year'],
+                             hour=tyme['hour'],
+                             minute=tyme['minute'],
+                             second=tyme['second'],
+                             microsecond=tyme['microsecond'])
+
+
+def delta_seconds(before, after):
+    """Return the difference between two timing objects.
+
+    Compute the difference in seconds between two date, time, or
+    datetime objects (as a float, to microsecond resolution).
+    """
+    delta = after - before
+    try:
+        return delta.total_seconds()
+    except AttributeError:
+        return ((delta.days * 24 * 3600) + delta.seconds +
+                float(delta.microseconds) / (10 ** 6))
+
+
+def is_soon(dt, window):
+    """Determines if time is going to happen in the next window seconds.
+
+    :params dt: the time
+    :params window: minimum seconds to remain to consider the time not soon
+
+    :return: True if expiration is within the given duration
+    """
+    soon = (utcnow() + datetime.timedelta(seconds=window))
+    return normalize_time(dt) <= soon
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index fe6fbf5..e785299 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -29,11 +29,10 @@
 
 
 from tempest.api.network import common as net_common
-from tempest.common import log as logging
 from tempest.common import ssh
 from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
 import tempest.manager
+from tempest.openstack.common import log as logging
 import tempest.test
 
 
@@ -76,11 +75,7 @@
         if not tenant_name:
             tenant_name = self.config.identity.tenant_name
 
-        if None in (username, password, tenant_name):
-            msg = ("Missing required credentials for compute client. "
-                   "username: %(username)s, password: %(password)s, "
-                   "tenant_name: %(tenant_name)s") % locals()
-            raise exceptions.InvalidConfiguration(msg)
+        self._validate_credentials(username, password, tenant_name)
 
         auth_url = self.config.identity.uri
         dscv = self.config.identity.disable_ssl_certificate_validation
@@ -131,11 +126,7 @@
         if not tenant_name:
             tenant_name = self.config.identity.admin_tenant_name
 
-        if None in (username, password, tenant_name):
-            msg = ("Missing required credentials for identity client. "
-                   "username: %(username)s, password: %(password)s, "
-                   "tenant_name: %(tenant_name)s") % locals()
-            raise exceptions.InvalidConfiguration(msg)
+        self._validate_credentials(username, password, tenant_name)
 
         auth_url = self.config.identity.uri
         dscv = self.config.identity.disable_ssl_certificate_validation
@@ -157,11 +148,7 @@
         password = self.config.identity.admin_password
         tenant_name = self.config.identity.admin_tenant_name
 
-        if None in (username, password, tenant_name):
-            msg = ("Missing required credentials for network client. "
-                   "username: %(username)s, password: %(password)s, "
-                   "tenant_name: %(tenant_name)s") % locals()
-            raise exceptions.InvalidConfiguration(msg)
+        self._validate_credentials(username, password, tenant_name)
 
         auth_url = self.config.identity.uri
         dscv = self.config.identity.disable_ssl_certificate_validation
@@ -236,7 +223,7 @@
 
     @classmethod
     def check_preconditions(cls):
-        if (cls.config.network.neutron_available):
+        if (cls.config.service_available.neutron):
             cls.enabled = True
             #verify that neutron_available is telling the truth
             try:
@@ -438,24 +425,24 @@
             if proc.returncode == 0:
                 return True
 
-        # TODO(mnewby) Allow configuration of execution and sleep duration.
-        return tempest.test.call_until_true(ping, 20, 1)
+        return tempest.test.call_until_true(
+            ping, self.config.compute.ping_timeout, 1)
 
     def _is_reachable_via_ssh(self, ip_address, username, private_key,
-                              timeout=120):
+                              timeout):
         ssh_client = ssh.Client(ip_address, username,
                                 pkey=private_key,
                                 timeout=timeout)
         return ssh_client.test_connection_auth()
 
-    def _check_vm_connectivity(self, ip_address, username, private_key,
-                               timeout=120):
+    def _check_vm_connectivity(self, ip_address, username, private_key):
         self.assertTrue(self._ping_ip_address(ip_address),
                         "Timed out waiting for %s to become "
                         "reachable" % ip_address)
-        self.assertTrue(self._is_reachable_via_ssh(ip_address,
-                                                   username,
-                                                   private_key,
-                                                   timeout=timeout),
-                        'Auth failure in connecting to %s@%s via ssh' %
-                        (username, ip_address))
+        self.assertTrue(self._is_reachable_via_ssh(
+            ip_address,
+            username,
+            private_key,
+            timeout=self.config.compute.ssh_timeout),
+            'Auth failure in connecting to %s@%s via ssh' %
+            (username, ip_address))
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
new file mode 100644
index 0000000..9a45572
--- /dev/null
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -0,0 +1,72 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import urllib
+import urllib2
+
+from lxml import html
+
+from tempest.scenario import manager
+
+
+class TestDashboardBasicOps(manager.OfficialClientTest):
+
+    """
+    This is a basic scenario test:
+    * checks that the login page is available
+    * logs in as a regular user
+    * checks that the user home page loads without error
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(TestDashboardBasicOps, cls).setUpClass()
+
+        if not cls.config.service_available.horizon:
+            raise cls.skipException("Horizon support is required")
+
+    def check_login_page(self):
+        response = urllib2.urlopen(self.config.dashboard.dashboard_url)
+        self.assertIn("<h3>Log In</h3>", response.read())
+
+    def user_login(self):
+        self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
+        response = self.opener.open(self.config.dashboard.dashboard_url).read()
+
+        # Grab the CSRF token and default region
+        csrf_token = html.fromstring(response).xpath(
+            '//input[@name="csrfmiddlewaretoken"]/@value')[0]
+        region = html.fromstring(response).xpath(
+            '//input[@name="region"]/@value')[0]
+
+        # Prepare login form request
+        req = urllib2.Request(self.config.dashboard.login_url)
+        req.add_header('Content-type', 'application/x-www-form-urlencoded')
+        req.add_header('Referer', self.config.dashboard.dashboard_url)
+        params = {'username': self.config.identity.username,
+                  'password': self.config.identity.password,
+                  'region': region,
+                  'csrfmiddlewaretoken': csrf_token}
+        self.opener.open(req, urllib.urlencode(params))
+
+    def check_home_page(self):
+        response = self.opener.open(self.config.dashboard.dashboard_url)
+        self.assertIn('Overview', response.read())
+
+    def test_basic_scenario(self):
+        self.check_login_page()
+        self.user_login()
+        self.check_home_page()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 1f75e2f..39b1e10 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -15,8 +15,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
 from tempest.scenario import manager
 
 
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2097f50..13b31ec 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -15,10 +15,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
-
 from tempest.common.utils.data_utils import rand_name
 from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.openstack.common import log as logging
 from tempest.scenario import manager
 
 
@@ -104,7 +103,7 @@
     def nova_list(self):
         servers = self.compute_client.servers.list()
         LOG.debug("server_list:%s" % servers)
-        self.assertTrue(self.server in servers)
+        self.assertIn(self.server, servers)
 
     def nova_show(self):
         got_server = self.compute_client.servers.get(self.server)
@@ -124,7 +123,7 @@
 
     def cinder_list(self):
         volumes = self.volume_client.volumes.list()
-        self.assertTrue(self.volume in volumes)
+        self.assertIn(self.volume, volumes)
 
     def cinder_show(self):
         volume = self.volume_client.volumes.get(self.volume.id)
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 6202e91..8ee740e 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -15,8 +15,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
 from tempest.scenario import manager
 
 LOG = logging.getLogger(__name__)
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d318dd9..0ec3a1d 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -15,8 +15,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
 from tempest.scenario import manager
 
 LOG = logging.getLogger(__name__)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 76fac82..6e305c1 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -15,10 +15,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import log as logging
-
 from tempest.common.utils.data_utils import rand_name
 from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.openstack.common import log as logging
 from tempest.scenario import manager
 
 
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
new file mode 100644
index 0000000..4434604
--- /dev/null
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -0,0 +1,272 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+
+from cinderclient import exceptions as cinder_exceptions
+import testtools
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+import tempest.test
+
+LOG = logging.getLogger(__name__)
+
+
+class TestStampPattern(manager.OfficialClientTest):
+    """
+    This test is for snapshotting an instance/volume and attaching the volume
+    created from snapshot to the instance booted from snapshot.
+    The following is the scenario outline:
+    1. Boot an instance "instance1"
+    2. Create a volume "volume1"
+    3. Attach volume1 to instance1
+    4. Create a filesystem on volume1
+    5. Mount volume1
+    6. Create a file which timestamp is written in volume1
+    7. Unmount volume1
+    8. Detach volume1 from instance1
+    9. Get a snapshot "snapshot_from_volume" of volume1
+    10. Get a snapshot "snapshot_from_instance" of instance1
+    11. Boot an instance "instance2" from snapshot_from_instance
+    12. Create a volume "volume2"  from snapshot_from_volume
+    13. Attach volume2 to instance2
+    14. Check the existence of a file which created at 6. in volume2
+    """
+
+    def _wait_for_server_status(self, server, status):
+        self.status_timeout(self.compute_client.servers,
+                            server.id,
+                            status)
+
+    def _wait_for_image_status(self, image_id, status):
+        self.status_timeout(self.image_client.images, image_id, status)
+
+    def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
+        self.status_timeout(self.volume_client.volume_snapshots,
+                            volume_snapshot.id, status)
+
+    def _boot_image(self, image_id):
+        name = rand_name('scenario-server-')
+        client = self.compute_client
+        flavor_id = self.config.compute.flavor_ref
+        LOG.debug("name:%s, image:%s" % (name, image_id))
+        server = client.servers.create(name=name,
+                                       image=image_id,
+                                       flavor=flavor_id,
+                                       key_name=self.keypair.name)
+        self.addCleanup(self.compute_client.servers.delete, server)
+        self.assertEqual(name, server.name)
+        self._wait_for_server_status(server, 'ACTIVE')
+        server = client.servers.get(server)  # getting network information
+        LOG.debug("server:%s" % server)
+        return server
+
+    def _add_keypair(self):
+        name = rand_name('scenario-keypair-')
+        self.keypair = self.compute_client.keypairs.create(name=name)
+        self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
+        self.assertEqual(name, self.keypair.name)
+
+    def _create_floating_ip(self):
+        floating_ip = self.compute_client.floating_ips.create()
+        self.addCleanup(floating_ip.delete)
+        return floating_ip
+
+    def _add_floating_ip(self, server, floating_ip):
+        server.add_floating_ip(floating_ip)
+
+    def _create_security_group_rule(self):
+        sgs = self.compute_client.security_groups.list()
+        for sg in sgs:
+            if sg.name == 'default':
+                secgroup = sg
+
+        ruleset = {
+            # ssh
+            'ip_protocol': 'tcp',
+            'from_port': 22,
+            'to_port': 22,
+            'cidr': '0.0.0.0/0',
+            'group_id': None
+        }
+        sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
+                                                                  **ruleset)
+        self.addCleanup(self.compute_client.security_group_rules.delete,
+                        sg_rule.id)
+
+    def _remote_client_to_server(self, server_or_ip):
+        if isinstance(server_or_ip, basestring):
+            ip = server_or_ip
+        else:
+            network_name_for_ssh = self.config.compute.network_for_ssh
+            ip = server_or_ip.networks[network_name_for_ssh][0]
+        username = self.config.scenario.ssh_user
+        linux_client = RemoteClient(ip,
+                                    username,
+                                    pkey=self.keypair.private_key)
+        return linux_client
+
+    def _ssh_to_server(self, server_or_ip):
+        linux_client = self._remote_client_to_server(server_or_ip)
+        return linux_client.ssh_client
+
+    def _create_image(self, server):
+        snapshot_name = rand_name('scenario-snapshot-')
+        create_image_client = self.compute_client.servers.create_image
+        image_id = create_image_client(server, snapshot_name)
+        self.addCleanup(self.image_client.images.delete, image_id)
+        self._wait_for_server_status(server, 'ACTIVE')
+        self._wait_for_image_status(image_id, 'active')
+        snapshot_image = self.image_client.images.get(image_id)
+        self.assertEquals(snapshot_name, snapshot_image.name)
+        return image_id
+
+    def _create_volume_snapshot(self, volume):
+        snapshot_name = rand_name('scenario-snapshot-')
+        volume_snapshots = self.volume_client.volume_snapshots
+        snapshot = volume_snapshots.create(
+            volume.id, display_name=snapshot_name)
+
+        def cleaner():
+            volume_snapshots.delete(snapshot)
+            try:
+                while volume_snapshots.get(snapshot.id):
+                    time.sleep(1)
+            except cinder_exceptions.NotFound:
+                pass
+        self.addCleanup(cleaner)
+        self._wait_for_volume_status(volume, 'available')
+        self._wait_for_volume_snapshot_status(snapshot, 'available')
+        self.assertEquals(snapshot_name, snapshot.display_name)
+        return snapshot
+
+    def _wait_for_volume_status(self, volume, status):
+        self.status_timeout(
+            self.volume_client.volumes, volume.id, status)
+
+    def _create_volume(self, snapshot_id=None):
+        name = rand_name('scenario-volume-')
+        LOG.debug("volume display-name:%s" % name)
+        volume = self.volume_client.volumes.create(size=1,
+                                                   display_name=name,
+                                                   snapshot_id=snapshot_id)
+        LOG.debug("volume created:%s" % volume.display_name)
+
+        def cleaner():
+            self._wait_for_volume_status(volume, 'available')
+            self.volume_client.volumes.delete(volume)
+        self.addCleanup(cleaner)
+        self._wait_for_volume_status(volume, 'available')
+        self.assertEqual(name, volume.display_name)
+        return volume
+
+    def _attach_volume(self, server, volume):
+        attach_volume_client = self.compute_client.volumes.create_server_volume
+        attached_volume = attach_volume_client(server.id,
+                                               volume.id,
+                                               '/dev/vdb')
+        self.assertEqual(volume.id, attached_volume.id)
+        self._wait_for_volume_status(attached_volume, 'in-use')
+
+    def _detach_volume(self, server, volume):
+        detach_volume_client = self.compute_client.volumes.delete_server_volume
+        detach_volume_client(server.id, volume.id)
+        self._wait_for_volume_status(volume, 'available')
+
+    def _wait_for_volume_availible_on_the_system(self, server_or_ip):
+        ssh = self._remote_client_to_server(server_or_ip)
+        conf = self.config
+
+        def _func():
+            part = ssh.get_partitions()
+            LOG.debug("Partitions:%s" % part)
+            return 'vdb' in part
+
+        if not tempest.test.call_until_true(_func,
+                                            conf.compute.build_timeout,
+                                            conf.compute.build_interval):
+            raise exceptions.TimeoutException
+
+    def _create_timestamp(self, server_or_ip):
+        ssh_client = self._ssh_to_server(server_or_ip)
+        ssh_client.exec_command('sudo /usr/sbin/mkfs.ext4 /dev/vdb')
+        ssh_client.exec_command('sudo mount /dev/vdb /mnt')
+        ssh_client.exec_command('sudo sh -c "date > /mnt/timestamp;sync"')
+        self.timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
+        ssh_client.exec_command('sudo umount /mnt')
+
+    def _check_timestamp(self, server_or_ip):
+        ssh_client = self._ssh_to_server(server_or_ip)
+        ssh_client.exec_command('sudo mount /dev/vdb /mnt')
+        got_timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
+        self.assertEqual(self.timestamp, got_timestamp)
+
+    @testtools.skip("Until Bug #1205344 is fixed")
+    def test_stamp_pattern(self):
+        # prepare for booting a instance
+        self._add_keypair()
+        self._create_security_group_rule()
+
+        # boot an instance and create a timestamp file in it
+        volume = self._create_volume()
+        server = self._boot_image(self.config.compute.image_ref)
+
+        # create and add floating IP to server1
+        if self.config.compute.use_floatingip_for_ssh:
+            floating_ip_for_server = self._create_floating_ip()
+            self._add_floating_ip(server, floating_ip_for_server)
+            ip_for_server = floating_ip_for_server.ip
+        else:
+            ip_for_server = server
+
+        self._attach_volume(server, volume)
+        self._wait_for_volume_availible_on_the_system(ip_for_server)
+        self._create_timestamp(ip_for_server)
+        self._detach_volume(server, volume)
+
+        # snapshot the volume
+        volume_snapshot = self._create_volume_snapshot(volume)
+
+        # snapshot the instance
+        snapshot_image_id = self._create_image(server)
+
+        # create second volume from the snapshot(volume2)
+        volume_from_snapshot = self._create_volume(
+            snapshot_id=volume_snapshot.id)
+
+        # boot second instance from the snapshot(instance2)
+        server_from_snapshot = self._boot_image(snapshot_image_id)
+
+        # create and add floating IP to server_from_snapshot
+        if self.config.compute.use_floatingip_for_ssh:
+            floating_ip_for_snapshot = self._create_floating_ip()
+            self._add_floating_ip(server_from_snapshot,
+                                  floating_ip_for_snapshot)
+            ip_for_snapshot = floating_ip_for_snapshot.ip
+        else:
+            ip_for_snapshot = server_from_snapshot
+
+        # attach volume2 to instance2
+        self._attach_volume(server_from_snapshot, volume_from_snapshot)
+        self._wait_for_volume_availible_on_the_system(ip_for_snapshot)
+
+        # check the existence of the timestamp file in the volume2
+        self._check_timestamp(ip_for_snapshot)
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
index 628151a..32ec109 100644
--- a/tempest/services/botoclients.py
+++ b/tempest/services/botoclients.py
@@ -132,6 +132,7 @@
     ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
                            'delete_key_pair', 'import_key_pair',
                            'get_all_key_pairs',
+                           'get_all_tags',
                            'create_image', 'get_image',
                            'register_image', 'deregister_image',
                            'get_all_images', 'get_image_attribute',
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
index d054f72..4db7596 100644
--- a/tempest/services/compute/json/services_client.py
+++ b/tempest/services/compute/json/services_client.py
@@ -1,6 +1,7 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
 # Copyright 2013 NEC Corporation
+# Copyright 2013 IBM Corp.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,6 +17,7 @@
 #    under the License.
 
 import json
+import urllib
 
 from tempest.common.rest_client import RestClient
 
@@ -27,7 +29,33 @@
                                                  auth_url, tenant_name)
         self.service = self.config.compute.catalog_type
 
-    def list_services(self):
-        resp, body = self.get("os-services")
+    def list_services(self, params=None):
+        url = 'os-services'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
         body = json.loads(body)
         return resp, body['services']
+
+    def enable_service(self, host_name, binary):
+        """
+        Enable service on a host
+        host_name: Name of host
+        binary: Service binary
+        """
+        post_body = json.dumps({'binary': binary, 'host': host_name})
+        resp, body = self.put('os-services/enable', post_body, self.headers)
+        body = json.loads(body)
+        return resp, body['service']
+
+    def disable_service(self, host_name, binary):
+        """
+        Disable service on a host
+        host_name: Name of host
+        binary: Service binary
+        """
+        post_body = json.dumps({'binary': binary, 'host': host_name})
+        resp, body = self.put('os-services/disable', post_body, self.headers)
+        body = json.loads(body)
+        return resp, body['service']
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index f2cca72..12e7034 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -21,9 +21,9 @@
 
 from lxml import etree
 
-from tempest.common import log as logging
 from tempest.common.rest_client import RestClientXML
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 from tempest.services.compute.xml.common import Document
 from tempest.services.compute.xml.common import Element
 from tempest.services.compute.xml.common import Text
@@ -437,6 +437,12 @@
     def revert_resize(self, server_id, **kwargs):
         return self.action(server_id, 'revertResize', None, **kwargs)
 
+    def stop(self, server_id, **kwargs):
+        return self.action(server_id, 'os-stop', None, **kwargs)
+
+    def start(self, server_id, **kwargs):
+        return self.action(server_id, 'os-start', None, **kwargs)
+
     def create_image(self, server_id, name):
         return self.action(server_id, 'createImage', None, name=name)
 
diff --git a/tempest/services/compute/xml/services_client.py b/tempest/services/compute/xml/services_client.py
index ce23403..ac304e2 100644
--- a/tempest/services/compute/xml/services_client.py
+++ b/tempest/services/compute/xml/services_client.py
@@ -1,6 +1,7 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
 # Copyright 2013 NEC Corporation
+# Copyright 2013 IBM Corp.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,8 +16,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import urllib
+
 from lxml import etree
 from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
 from tempest.services.compute.xml.common import xml_to_json
 
 
@@ -27,8 +32,42 @@
                                                 auth_url, tenant_name)
         self.service = self.config.compute.catalog_type
 
-    def list_services(self):
-        resp, body = self.get("os-services", self.headers)
+    def list_services(self, params=None):
+        url = 'os-services'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url, self.headers)
         node = etree.fromstring(body)
         body = [xml_to_json(x) for x in node.getchildren()]
         return resp, body
+
+    def enable_service(self, host_name, binary):
+        """
+        Enable service on a host
+        host_name: Name of host
+        binary: Service binary
+        """
+        post_body = Element("service")
+        post_body.add_attr('binary', binary)
+        post_body.add_attr('host', host_name)
+
+        resp, body = self.put('os-services/enable', str(Document(post_body)),
+                              self.headers)
+        body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def disable_service(self, host_name, binary):
+        """
+        Disable service on a host
+        host_name: Name of host
+        binary: Service binary
+        """
+        post_body = Element("service")
+        post_body.add_attr('binary', binary)
+        post_body.add_attr('host', host_name)
+
+        resp, body = self.put('os-services/disable', str(Document(post_body)),
+                              self.headers)
+        body = xml_to_json(etree.fromstring(body))
+        return resp, body
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index a216b55..90e64e7 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -1,3 +1,17 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
 import httplib2
 import json
 
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index dac77a2..bd48068 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -23,9 +23,9 @@
 import urllib
 
 from tempest.common import glance_http
-from tempest.common import log as logging
 from tempest.common.rest_client import RestClient
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index c4fe6b1..2c808a9 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -1,4 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
 import json
+
 from tempest.common.rest_client import RestClient
 
 
@@ -8,13 +23,11 @@
     Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
     V1 API has been removed from the code base.
 
-    Implements the following operations for each one of the basic Neutron
+    Implements create, delete, list and show for the basic Neutron
     abstractions (networks, sub-networks and ports):
 
-    create
-    delete
-    list
-    show
+    It also implements list, show, update and reset for OpenStack Networking
+    quotas
     """
 
     def __init__(self, config, username, password, auth_url, tenant_name=None):
@@ -113,3 +126,64 @@
         resp, body = self.get(uri, self.headers)
         body = json.loads(body)
         return resp, body
+
+    def update_quotas(self, tenant_id, **kwargs):
+        put_body = {'quota': kwargs}
+        body = json.dumps(put_body)
+        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+        resp, body = self.put(uri, body, self.headers)
+        body = json.loads(body)
+        return resp, body['quota']
+
+    def show_quotas(self, tenant_id):
+        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+        resp, body = self.get(uri, self.headers)
+        body = json.loads(body)
+        return resp, body['quota']
+
+    def reset_quotas(self, tenant_id):
+        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+        resp, body = self.delete(uri, self.headers)
+        return resp, body
+
+    def list_quotas(self):
+        uri = '%s/quotas' % (self.uri_prefix)
+        resp, body = self.get(uri, self.headers)
+        body = json.loads(body)
+        return resp, body['quotas']
+
+    def update_subnet(self, subnet_id, new_name):
+        put_body = {
+            'subnet': {
+                'name': new_name,
+            }
+        }
+        body = json.dumps(put_body)
+        uri = '%s/subnets/%s' % (self.uri_prefix, subnet_id)
+        resp, body = self.put(uri, body=body, headers=self.headers)
+        body = json.loads(body)
+        return resp, body
+
+    def update_port(self, port_id, new_name):
+        put_body = {
+            'port': {
+                'name': new_name,
+            }
+        }
+        body = json.dumps(put_body)
+        uri = '%s/ports/%s' % (self.uri_prefix, port_id)
+        resp, body = self.put(uri, body=body, headers=self.headers)
+        body = json.loads(body)
+        return resp, body
+
+    def update_network(self, network_id, new_name):
+        put_body = {
+            "network": {
+                "name": new_name,
+            }
+        }
+        body = json.dumps(put_body)
+        uri = '%s/networks/%s' % (self.uri_prefix, network_id)
+        resp, body = self.put(uri, body=body, headers=self.headers)
+        body = json.loads(body)
+        return resp, body
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index 17f6cba..034b452 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -16,9 +16,9 @@
 import time
 import urllib
 
-from tempest.common import log as logging
 from tempest.common.rest_client import RestClient
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index b35c43e..017ca95 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -17,9 +17,9 @@
 
 from lxml import etree
 
-from tempest.common import log as logging
 from tempest.common.rest_client import RestClientXML
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 from tempest.services.compute.xml.common import Document
 from tempest.services.compute.xml.common import Element
 from tempest.services.compute.xml.common import xml_to_json
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index 661763c..31642b0 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -30,7 +30,7 @@
 
 To test installation, do the following (from the tempest/stress directory):
 
-	./run_stress.py etc/sample-test.json -d 30
+	./run_stress.py etc/server-create-destroy-test.json -d 30
 
 This sample test tries to create a few VMs and kill a few VMs.
 
diff --git a/tempest/stress/actions/create_destroy_server.py b/tempest/stress/actions/server_create_destroy.py
similarity index 96%
rename from tempest/stress/actions/create_destroy_server.py
rename to tempest/stress/actions/server_create_destroy.py
index 68dc148..1a1e30b 100644
--- a/tempest/stress/actions/create_destroy_server.py
+++ b/tempest/stress/actions/server_create_destroy.py
@@ -16,7 +16,7 @@
 import tempest.stress.stressaction as stressaction
 
 
-class CreateDestroyServerTest(stressaction.StressAction):
+class ServerCreateDestroyTest(stressaction.StressAction):
 
     def setUp(self, **kwargs):
         self.image = self.manager.config.compute.image_ref
diff --git a/tempest/stress/actions/volume_attach_delete.py b/tempest/stress/actions/volume_attach_delete.py
new file mode 100644
index 0000000..a7b872f
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_delete.py
@@ -0,0 +1,70 @@
+# (c) 2013 Deutsche Telekom AG
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common.utils.data_utils import rand_name
+import tempest.stress.stressaction as stressaction
+
+
+class VolumeAttachDeleteTest(stressaction.StressAction):
+
+    def setUp(self, **kwargs):
+        self.image = self.manager.config.compute.image_ref
+        self.flavor = self.manager.config.compute.flavor_ref
+
+    def run(self):
+        # Step 1: create volume
+        name = rand_name("volume")
+        self.logger.info("creating volume: %s" % name)
+        resp, volume = self.manager.volumes_client.create_volume(size=1,
+                                                                 display_name=
+                                                                 name)
+        assert(resp.status == 200)
+        self.manager.volumes_client.wait_for_volume_status(volume['id'],
+                                                           'available')
+        self.logger.info("created volume: %s" % volume['id'])
+
+        # Step 2: create vm instance
+        vm_name = rand_name("instance")
+        self.logger.info("creating vm: %s" % vm_name)
+        resp, server = self.manager.servers_client.create_server(
+            vm_name, self.image, self.flavor)
+        server_id = server['id']
+        assert(resp.status == 202)
+        self.manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+        self.logger.info("created vm %s" % server_id)
+
+        # Step 3: attach volume to vm
+        self.logger.info("attach volume (%s) to vm %s" %
+                        (volume['id'], server_id))
+        resp, body = self.manager.servers_client.attach_volume(server_id,
+                                                               volume['id'],
+                                                               '/dev/vdc')
+        assert(resp.status == 200)
+        self.manager.volumes_client.wait_for_volume_status(volume['id'],
+                                                           'in-use')
+        self.logger.info("volume (%s) attached to vm %s" %
+                         (volume['id'], server_id))
+
+        # Step 4: delete vm
+        self.logger.info("deleting vm: %s" % vm_name)
+        resp, _ = self.manager.servers_client.delete_server(server_id)
+        assert(resp.status == 204)
+        self.manager.servers_client.wait_for_server_termination(server_id)
+        self.logger.info("deleted vm: %s" % server_id)
+
+        # Step 5: delete volume
+        self.logger.info("deleting volume: %s" % volume['id'])
+        resp, _ = self.manager.volumes_client.delete_volume(volume['id'])
+        assert(resp.status == 202)
+        self.manager.volumes_client.wait_for_resource_deletion(volume['id'])
+        self.logger.info("deleted volume: %s" % volume['id'])
diff --git a/tempest/stress/actions/volume_create_delete.py b/tempest/stress/actions/volume_create_delete.py
index 184f870..e29d9c4 100644
--- a/tempest/stress/actions/volume_create_delete.py
+++ b/tempest/stress/actions/volume_create_delete.py
@@ -14,20 +14,20 @@
 import tempest.stress.stressaction as stressaction
 
 
-class CreateDeleteTest(stressaction.StressAction):
+class VolumeCreateDeleteTest(stressaction.StressAction):
 
     def run(self):
         name = rand_name("volume")
         self.logger.info("creating %s" % name)
-        resp, volume = self.manager.volumes_client.\
-            create_volume(size=1, display_name=name)
+        volumes_client = self.manager.volumes_client
+        resp, volume = volumes_client.create_volume(size=1,
+                                                    display_name=name)
         assert(resp.status == 200)
         vol_id = volume['id']
-        status = 'available'
-        self.manager.volumes_client.wait_for_volume_status(vol_id, status)
+        volumes_client.wait_for_volume_status(vol_id, 'available')
         self.logger.info("created %s" % volume['id'])
         self.logger.info("deleting %s" % name)
-        resp, _ = self.manager.volumes_client.delete_volume(vol_id)
+        resp, _ = volumes_client.delete_volume(vol_id)
         assert(resp.status == 202)
-        self.manager.volumes_client.wait_for_resource_deletion(vol_id)
+        volumes_client.wait_for_resource_deletion(vol_id)
         self.logger.info("deleted %s" % vol_id)
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 785da7d..d9b95e0 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -12,15 +12,16 @@
 #    See the License for the specific language governing permissions and
 #    limitations under the License.
 
-import importlib
 import logging
 import multiprocessing
+import signal
 import time
 
 from tempest import clients
 from tempest.common import ssh
 from tempest.common.utils.data_utils import rand_name
 from tempest import exceptions
+from tempest.openstack.common import importutils
 from tempest.stress import cleanup
 
 admin_manager = clients.AdminManager()
@@ -45,6 +46,7 @@
 # add the handler to the root logger
 logger = logging.getLogger('tempest.stress')
 logger.addHandler(_console)
+processes = []
 
 
 def do_ssh(command, host):
@@ -93,15 +95,29 @@
     return None
 
 
-def get_action_object(path):
-    (module_part, _, obj_name) = path.rpartition('.')
-    return getattr(importlib.import_module(module_part), obj_name)
+def sigchld_handler(signal, frame):
+    """
+    Signal handler (only active if stop_on_error is True).
+    """
+    terminate_all_processes()
 
 
-def stress_openstack(tests, duration):
+def terminate_all_processes():
+    """
+    Goes through the process list and terminates all child processes.
+    """
+    for process in processes:
+        if process['process'].is_alive():
+            try:
+                process['process'].terminate()
+            except Exception:
+                pass
+        process['process'].join()
+
+
+def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
     """
     Workload driver. Executes an action function against a nova-cluster.
-
     """
     logfiles = admin_manager.config.stress.target_logfiles
     log_check_interval = int(admin_manager.config.stress.log_check_interval)
@@ -110,13 +126,12 @@
         computes = _get_compute_nodes(controller)
         for node in computes:
             do_ssh("rm -f %s" % logfiles, node)
-    processes = []
     for test in tests:
         if test.get('use_admin', False):
             manager = admin_manager
         else:
             manager = clients.Manager()
-        for _ in xrange(test.get('threads', 1)):
+        for p_number in xrange(test.get('threads', 1)):
             if test.get('use_isolated_tenants', False):
                 username = rand_name("stress_user")
                 tenant_name = rand_name("stress_tenant")
@@ -131,26 +146,56 @@
                                           password="pass",
                                           tenant_name=tenant_name)
 
-            test_obj = get_action_object(test['action'])
-            test_run = test_obj(manager, logger)
+            test_obj = importutils.import_class(test['action'])
+            test_run = test_obj(manager, logger, max_runs, stop_on_error)
 
             kwargs = test.get('kwargs', {})
             test_run.setUp(**dict(kwargs.iteritems()))
 
             logger.debug("calling Target Object %s" %
                          test_run.__class__.__name__)
-            p = multiprocessing.Process(target=test_run.execute,
-                                        args=())
 
-            processes.append(p)
+            mp_manager = multiprocessing.Manager()
+            shared_statistic = mp_manager.dict()
+            shared_statistic['runs'] = 0
+            shared_statistic['fails'] = 0
+
+            p = multiprocessing.Process(target=test_run.execute,
+                                        args=(shared_statistic,))
+
+            process = {'process': p,
+                       'p_number': p_number,
+                       'action': test['action'],
+                       'statistic': shared_statistic}
+
+            processes.append(process)
             p.start()
+    if stop_on_error:
+        # NOTE(mkoderer): only the parent should register the handler
+        signal.signal(signal.SIGCHLD, sigchld_handler)
     end_time = time.time() + duration
     had_errors = False
     while True:
-        remaining = end_time - time.time()
-        if remaining <= 0:
-            break
+        if max_runs is None:
+            remaining = end_time - time.time()
+            if remaining <= 0:
+                break
+        else:
+            remaining = log_check_interval
+            all_proc_term = True
+            for process in processes:
+                if process['process'].is_alive():
+                    all_proc_term = False
+                    break
+            if all_proc_term:
+                break
+
         time.sleep(min(remaining, log_check_interval))
+        if stop_on_error:
+            for process in processes:
+                if process['statistic']['fails'] > 0:
+                    break
+
         if not logfiles:
             continue
         errors = _error_in_logs(logfiles, computes)
@@ -158,10 +203,30 @@
             had_errors = True
             break
 
-    for p in processes:
-        p.terminate()
-        p.join()
+    terminate_all_processes()
+
+    sum_fails = 0
+    sum_runs = 0
+
+    logger.info("Statistics (per process):")
+    for process in processes:
+        if process['statistic']['fails'] > 0:
+            had_errors = True
+        sum_runs += process['statistic']['runs']
+        sum_fails += process['statistic']['fails']
+        logger.info(" Process %d (%s): Run %d actions (%d failed)" %
+                    (process['p_number'],
+                     process['action'],
+                     process['statistic']['runs'],
+                     process['statistic']['fails']))
+    logger.info("Summary:")
+    logger.info("Run %d actions (%d failed)" %
+                (sum_runs, sum_fails))
 
     if not had_errors:
         logger.info("cleaning up")
         cleanup.cleanup(logger)
+    if had_errors:
+        return 1
+    else:
+        return 0
diff --git a/tempest/stress/etc/sample-test.json b/tempest/stress/etc/sample-test.json
deleted file mode 100644
index 494c823..0000000
--- a/tempest/stress/etc/sample-test.json
+++ /dev/null
@@ -1,7 +0,0 @@
-[{"action": "tempest.stress.actions.create_destroy_server.CreateDestroyServerTest",
-  "threads": 8,
-  "use_admin": false,
-  "use_isolated_tenants": false,
-  "kwargs": {}
-  }
-]
diff --git a/tempest/stress/etc/server-create-destroy-test.json b/tempest/stress/etc/server-create-destroy-test.json
new file mode 100644
index 0000000..17d5e1a
--- /dev/null
+++ b/tempest/stress/etc/server-create-destroy-test.json
@@ -0,0 +1,7 @@
+[{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
+  "threads": 8,
+  "use_admin": false,
+  "use_isolated_tenants": false,
+  "kwargs": {}
+  }
+]
diff --git a/tempest/stress/etc/stress-tox-job.json b/tempest/stress/etc/stress-tox-job.json
index 159794b..dffc469 100644
--- a/tempest/stress/etc/stress-tox-job.json
+++ b/tempest/stress/etc/stress-tox-job.json
@@ -1,13 +1,19 @@
-[{"action": "tempest.stress.actions.create_destroy_server.CreateDestroyServerTest",
+[{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
   "threads": 8,
   "use_admin": false,
   "use_isolated_tenants": false,
   "kwargs": {}
   },
-  {"action": "tempest.stress.actions.volume_create_delete.CreateDeleteTest",
+  {"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
   "threads": 4,
   "use_admin": false,
   "use_isolated_tenants": false,
   "kwargs": {}
+  },
+  {"action": "tempest.stress.actions.volume_attach_delete.VolumeAttachDeleteTest",
+  "threads": 2,
+  "use_admin": false,
+  "use_isolated_tenants": false,
+  "kwargs": {}
   }
 ]
diff --git a/tempest/stress/etc/volume-attach-delete-test.json b/tempest/stress/etc/volume-attach-delete-test.json
new file mode 100644
index 0000000..4553ff8
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-delete-test.json
@@ -0,0 +1,7 @@
+[{"action": "tempest.stress.actions.volume_attach_delete.VolumeAttachDeleteTest",
+  "threads": 4,
+  "use_admin": false,
+  "use_isolated_tenants": false,
+  "kwargs": {}
+  }
+]
diff --git a/tempest/stress/etc/volume-create-delete-test.json b/tempest/stress/etc/volume-create-delete-test.json
index 6325bdc..e8a58f7 100644
--- a/tempest/stress/etc/volume-create-delete-test.json
+++ b/tempest/stress/etc/volume-create-delete-test.json
@@ -1,4 +1,4 @@
-[{"action": "tempest.stress.actions.volume_create_delete.CreateDeleteTest",
+[{"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
   "threads": 4,
   "use_admin": false,
   "use_isolated_tenants": false,
diff --git a/tempest/stress/run_stress.py b/tempest/stress/run_stress.py
index 06dee0f..32e3ae0 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/stress/run_stress.py
@@ -18,17 +18,26 @@
 
 import argparse
 import json
-
-from tempest.stress import driver
+import sys
 
 
 def main(ns):
+    # NOTE(mkoderer): moved import to make "-h" possible without OpenStack
+    from tempest.stress import driver
+    result = 0
     tests = json.load(open(ns.tests, 'r'))
     if ns.serial:
         for test in tests:
-            driver.stress_openstack([test], ns.duration)
+            step_result = driver.stress_openstack([test],
+                                                  ns.duration,
+                                                  ns.number,
+                                                  ns.stop)
+            # NOTE(mkoderer): we just save the last result code
+            if (step_result != 0):
+                result = step_result
     else:
-        driver.stress_openstack(tests, ns.duration)
+        driver.stress_openstack(tests, ns.duration, ns.number, ns.stop)
+    return result
 
 
 parser = argparse.ArgumentParser(description='Run stress tests. ')
@@ -36,5 +45,11 @@
                     help="Duration of test in secs.")
 parser.add_argument('-s', '--serial', action='store_true',
                     help="Trigger running tests serially.")
+parser.add_argument('-S', '--stop', action='store_true',
+                    default=False, help="Stop on first error.")
+parser.add_argument('-n', '--number', type=int,
+                    help="How often an action is executed for each process.")
 parser.add_argument('tests', help="Name of the file with test description.")
-main(parser.parse_args())
+
+if __name__ == "__main__":
+    sys.exit(main(parser.parse_args()))
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index f45ef17..ab09adc 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -20,10 +20,11 @@
 
 class StressAction(object):
 
-    def __init__(self, manager, logger):
+    def __init__(self, manager, logger, max_runs=None, stop_on_error=False):
         self.manager = manager
         self.logger = logger
-        self.runs = 0
+        self.max_runs = max_runs
+        self.stop_on_error = stop_on_error
 
     def _shutdown_handler(self, signal, frame):
         self.tearDown()
@@ -45,7 +46,7 @@
         """
         self.logger.debug("tearDown")
 
-    def execute(self):
+    def execute(self, shared_statistic):
         """This is the main execution entry point called
         by the driver.   We register a signal handler to
         allow us to gracefull tearDown, and then exit.
@@ -53,9 +54,21 @@
         """
         signal.signal(signal.SIGHUP, self._shutdown_handler)
         signal.signal(signal.SIGTERM, self._shutdown_handler)
-        while True:
-            self.run()
-            self.runs = self.runs + 1
+
+        while self.max_runs is None or (shared_statistic['runs'] <
+                                        self.max_runs):
+            try:
+                self.run()
+            except Exception:
+                shared_statistic['fails'] += 1
+                self.logger.exception("Failure in run")
+            finally:
+                shared_statistic['runs'] += 1
+                if self.stop_on_error and (shared_statistic['fails'] > 1):
+                    self.logger.warn("Stop process due to"
+                                     "\"stop-on-error\" argument")
+                    self.tearDown()
+                    sys.exit(1)
 
     def run(self):
         """This method is where the stress test code runs."""
diff --git a/tempest/test.py b/tempest/test.py
index d7008a7..6c304c3 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -15,15 +15,19 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import os
 import time
 
+import fixtures
 import nose.plugins.attrib
 import testresources
 import testtools
 
-from tempest.common import log as logging
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
 from tempest import config
-from tempest import manager
+from tempest import exceptions
+from tempest.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 
@@ -54,6 +58,42 @@
     return decorator
 
 
+# there is a mis-match between nose and testtools for older pythons.
+# testtools will set skipException to be either
+# unittest.case.SkipTest, unittest2.case.SkipTest or an internal skip
+# exception, depending on what it can find. Python <2.7 doesn't have
+# unittest.case.SkipTest; so if unittest2 is not installed it falls
+# back to the internal class.
+#
+# The current nose skip plugin will decide to raise either
+# unittest.case.SkipTest or its own internal exception; it does not
+# look for unittest2 or the internal unittest exception.  Thus we must
+# monkey-patch testtools.TestCase.skipException to be the exception
+# the nose skip plugin expects.
+#
+# However, with the switch to testr nose may not be available, so we
+# require you to opt-in to this fix with an environment variable.
+#
+# This is temporary until upstream nose starts looking for unittest2
+# as testtools does; we can then remove this and ensure unittest2 is
+# available for older pythons; then nose and testtools will agree
+# unittest2.case.SkipTest is the one-true skip test exception.
+#
+#   https://review.openstack.org/#/c/33056
+#   https://github.com/nose-devs/nose/pull/699
+if 'TEMPEST_PY26_NOSE_COMPAT' in os.environ:
+    try:
+        import unittest.case.SkipTest
+        # convince pep8 we're using the import...
+        if unittest.case.SkipTest:
+            pass
+        raise RuntimeError("You have unittest.case.SkipTest; "
+                           "no need to override")
+    except ImportError:
+        LOG.info("Overriding skipException to nose SkipTest")
+        testtools.TestCase.skipException = nose.plugins.skip.SkipTest
+
+
 class BaseTestCase(testtools.TestCase,
                    testtools.testcase.WithAttributes,
                    testresources.ResourcedTestCase):
@@ -65,6 +105,123 @@
         if hasattr(super(BaseTestCase, cls), 'setUpClass'):
             super(BaseTestCase, cls).setUpClass()
 
+    def setUp(cls):
+        super(BaseTestCase, cls).setUp()
+        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
+        try:
+            test_timeout = int(test_timeout)
+        except ValueError:
+            test_timeout = 0
+        if test_timeout > 0:
+            cls.useFixture(fixtures.Timeout(test_timeout, gentle=True))
+
+        if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
+                os.environ.get('OS_STDOUT_CAPTURE') == '1'):
+            stdout = cls.useFixture(fixtures.StringStream('stdout')).stream
+            cls.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
+        if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
+                os.environ.get('OS_STDERR_CAPTURE') == '1'):
+            stderr = cls.useFixture(fixtures.StringStream('stderr')).stream
+            cls.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
+
+    @classmethod
+    def _get_identity_admin_client(cls):
+        """
+        Returns an instance of the Identity Admin API client
+        """
+        os = clients.AdminManager(interface=cls._interface)
+        admin_client = os.identity_client
+        return admin_client
+
+    @classmethod
+    def _get_client_args(cls):
+
+        return (
+            cls.config,
+            cls.config.identity.admin_username,
+            cls.config.identity.admin_password,
+            cls.config.identity.uri
+        )
+
+    @classmethod
+    def _get_isolated_creds(cls, admin=False):
+        """
+        Creates a new set of user/tenant/password credentials for a
+        **regular** user of the Compute API so that a test case can
+        operate in an isolated tenant container.
+        """
+        admin_client = cls._get_identity_admin_client()
+        password = "pass"
+
+        while True:
+            try:
+                rand_name_root = rand_name(cls.__name__)
+                if cls.isolated_creds:
+                # Main user already created. Create the alt or admin one...
+                    if admin:
+                        rand_name_root += '-admin'
+                    else:
+                        rand_name_root += '-alt'
+                tenant_name = rand_name_root + "-tenant"
+                tenant_desc = tenant_name + "-desc"
+
+                resp, tenant = admin_client.create_tenant(
+                    name=tenant_name, description=tenant_desc)
+                break
+            except exceptions.Duplicate:
+                if cls.config.compute.allow_tenant_reuse:
+                    tenant = admin_client.get_tenant_by_name(tenant_name)
+                    LOG.info('Re-using existing tenant %s', tenant)
+                    break
+
+        while True:
+            try:
+                rand_name_root = rand_name(cls.__name__)
+                if cls.isolated_creds:
+                # Main user already created. Create the alt one...
+                    rand_name_root += '-alt'
+                username = rand_name_root + "-user"
+                email = rand_name_root + "@example.com"
+                resp, user = admin_client.create_user(username,
+                                                      password,
+                                                      tenant['id'],
+                                                      email)
+                break
+            except exceptions.Duplicate:
+                if cls.config.compute.allow_tenant_reuse:
+                    user = admin_client.get_user_by_username(tenant['id'],
+                                                             username)
+                    LOG.info('Re-using existing user %s', user)
+                    break
+        # Store the complete creds (including UUID ids...) for later
+        # but return just the username, tenant_name, password tuple
+        # that the various clients will use.
+        cls.isolated_creds.append((user, tenant))
+
+        # Assign admin role if this is for admin creds
+        if admin:
+            _, roles = admin_client.list_roles()
+            role = None
+            try:
+                _, roles = admin_client.list_roles()
+                role = next(r for r in roles if r['name'] == 'admin')
+            except StopIteration:
+                msg = "No admin role found"
+                raise exceptions.NotFound(msg)
+            admin_client.assign_user_role(tenant['id'], user['id'], role['id'])
+
+        return username, tenant_name, password
+
+    @classmethod
+    def _clear_isolated_creds(cls):
+        if not cls.isolated_creds:
+            return
+        admin_client = cls._get_identity_admin_client()
+
+        for user, tenant in cls.isolated_creds:
+            admin_client.delete_user(user['id'])
+            admin_client.delete_tenant(tenant['id'])
+
 
 def call_until_true(func, duration, sleep_for):
     """
@@ -109,19 +266,22 @@
         cls.resource_keys = {}
         cls.os_resources = []
 
-    def set_resource(self, key, thing):
+    @classmethod
+    def set_resource(cls, key, thing):
         LOG.debug("Adding %r to shared resources of %s" %
-                  (thing, self.__class__.__name__))
-        self.resource_keys[key] = thing
-        self.os_resources.append(thing)
+                  (thing, cls.__name__))
+        cls.resource_keys[key] = thing
+        cls.os_resources.append(thing)
 
-    def get_resource(self, key):
-        return self.resource_keys[key]
+    @classmethod
+    def get_resource(cls, key):
+        return cls.resource_keys[key]
 
-    def remove_resource(self, key):
-        thing = self.resource_keys[key]
-        self.os_resources.remove(thing)
-        del self.resource_keys[key]
+    @classmethod
+    def remove_resource(cls, key):
+        thing = cls.resource_keys[key]
+        cls.os_resources.remove(thing)
+        del cls.resource_keys[key]
 
     def status_timeout(self, things, thing_id, expected_status):
         """
@@ -151,13 +311,3 @@
                                conf.compute.build_interval):
             self.fail("Timed out waiting for thing %s to become %s"
                       % (thing_id, expected_status))
-
-
-class ComputeFuzzClientTest(TestCase):
-
-    """
-    Base test case class for OpenStack Compute API (Nova)
-    that uses the Tempest REST fuzz client libs for calling the API.
-    """
-
-    manager_class = manager.ComputeFuzzClientManager
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
index 41d31f3..b775817 100644
--- a/tempest/thirdparty/README.rst
+++ b/tempest/thirdparty/README.rst
@@ -1,9 +1,9 @@
 Tempest Guide to Third Party API tests
-========
+======================================
 
 
 What are these tests?
---------
+---------------------
 
 Third party tests are tests for non native OpenStack APIs that are
 part of OpenStack projects. If we ship an API, we're really required
@@ -14,14 +14,14 @@
 
 
 Why are these tests in tempest?
---------
+-------------------------------
 
 If we ship an API in an OpenStack component, there should be tests in
 tempest to exercise it in some way.
 
 
 Scope of these tests
---------
+--------------------
 
 Third party API testing should be limited to the functional testing of
 third party API compliance. Complex scenarios should be avoided, and
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 9ff628c..ba627e3 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -28,10 +28,10 @@
 import keystoneclient.exceptions
 
 import tempest.clients
-from tempest.common import log as logging
 from tempest.common.utils.file_utils import have_effective_read_access
 import tempest.config
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 import tempest.test
 from tempest.thirdparty.boto.utils.wait import re_search_wait
 from tempest.thirdparty.boto.utils.wait import state_wait
@@ -58,8 +58,9 @@
 
     A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
     boto_logger = logging.getLogger('boto')
-    level = boto_logger.level
-    boto_logger.setLevel(orig_logging.CRITICAL)  # suppress logging for these
+    level = boto_logger.logger.level
+    boto_logger.logger.setLevel(orig_logging.CRITICAL)  # suppress logging
+                                                        # for these
 
     def _cred_sub_check(connection_data):
         if not id_matcher.match(connection_data["aws_access_key_id"]):
@@ -99,7 +100,7 @@
     except keystoneclient.exceptions.Unauthorized:
         S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
                                " faild to get them even by keystoneclient"
-    boto_logger.setLevel(level)
+    boto_logger.logger.setLevel(level)
     return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
             'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
             'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 89891d2..df2ff6a 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -19,10 +19,10 @@
 import testtools
 
 from tempest import clients
-from tempest.common import log as logging
 from tempest.common.utils.data_utils import rand_name
 from tempest.common.utils.linux.remote_client import RemoteClient
 from tempest import exceptions
+from tempest.openstack.common import log as logging
 from tempest.test import attr
 from tempest.thirdparty.boto.test import BotoTestCase
 from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
@@ -88,6 +88,53 @@
                                                            image["image_id"])
 
     @attr(type='smoke')
+    def test_run_idempotent_instances(self):
+        # EC2 run instances idempotently
+
+        def _run_instance(client_token):
+            reservation = self.ec2_client.run_instances(
+                image_id=self.images["ami"]["image_id"],
+                kernel_id=self.images["aki"]["image_id"],
+                ramdisk_id=self.images["ari"]["image_id"],
+                instance_type=self.instance_type,
+                client_token=client_token)
+            rcuk = self.addResourceCleanUp(self.destroy_reservation,
+                                           reservation)
+            return (reservation, rcuk)
+
+        def _terminate_reservation(reservation, rcuk):
+            for instance in reservation.instances:
+                instance.terminate()
+            self.cancelResourceCleanUp(rcuk)
+
+        reservation_1, rcuk_1 = _run_instance('token_1')
+        reservation_2, rcuk_2 = _run_instance('token_2')
+        reservation_1a, rcuk_1a = _run_instance('token_1')
+
+        self.assertIsNotNone(reservation_1)
+        self.assertIsNotNone(reservation_2)
+        self.assertIsNotNone(reservation_1a)
+
+        # same reservation for token_1
+        self.assertEqual(reservation_1.id, reservation_1a.id)
+
+        # Cancel cleanup -- since it's a duplicate, it's
+        # handled by rcuk1
+        self.cancelResourceCleanUp(rcuk_1a)
+
+        _terminate_reservation(reservation_1, rcuk_1)
+        _terminate_reservation(reservation_2, rcuk_2)
+
+        reservation_3, rcuk_3 = _run_instance('token_1')
+        self.assertIsNotNone(reservation_3)
+
+        # make sure we don't get the old reservation back
+        self.assertNotEqual(reservation_1.id, reservation_3.id)
+
+        # clean up
+        _terminate_reservation(reservation_3, rcuk_3)
+
+    @attr(type='smoke')
     def test_run_stop_terminate_instance(self):
         # EC2 run, stop and terminate instance
         image_ami = self.ec2_client.get_image(self.images["ami"]
@@ -113,6 +160,53 @@
         self.cancelResourceCleanUp(rcuk)
 
     @attr(type='smoke')
+    def test_run_stop_terminate_instance_with_tags(self):
+        # EC2 run, stop and terminate instance with tags
+        image_ami = self.ec2_client.get_image(self.images["ami"]
+                                              ["image_id"])
+        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
+                                    ramdisk_id=self.images["ari"]["image_id"],
+                                    instance_type=self.instance_type)
+        rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
+
+        for instance in reservation.instances:
+            LOG.info("state: %s", instance.state)
+            if instance.state != "running":
+                self.assertInstanceStateWait(instance, "running")
+            instance.add_tag('key1', value='value1')
+
+        tags = self.ec2_client.get_all_tags()
+        self.assertEquals(tags[0].name, 'key1')
+        self.assertEquals(tags[0].value, 'value1')
+
+        tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
+        self.assertEquals(tags[0].name, 'key1')
+        self.assertEquals(tags[0].value, 'value1')
+
+        tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
+        self.assertEquals(tags[0].name, 'key1')
+        self.assertEquals(tags[0].value, 'value1')
+
+        tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
+        self.assertEquals(len(tags), 0)
+
+        for instance in reservation.instances:
+            instance.remove_tag('key1', value='value1')
+
+        tags = self.ec2_client.get_all_tags()
+        self.assertEquals(len(tags), 0)
+
+        for instance in reservation.instances:
+            instance.stop()
+            LOG.info("state: %s", instance.state)
+            if instance.state != "stopped":
+                self.assertInstanceStateWait(instance, "stopped")
+
+        for instance in reservation.instances:
+            instance.terminate()
+        self.cancelResourceCleanUp(rcuk)
+
+    @attr(type='smoke')
     @testtools.skip("Skipped until the Bug #1098891 is resolved")
     def test_run_terminate_instance(self):
         # EC2 run, terminate immediately
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
index c90c586..dbb3104 100644
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ b/tempest/thirdparty/boto/test_ec2_volumes.py
@@ -16,7 +16,7 @@
 #    under the License.
 
 from tempest import clients
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
 from tempest.test import attr
 from tempest.thirdparty.boto.test import BotoTestCase
 
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
index 0f836d0..e2ca15f 100644
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -17,14 +17,11 @@
 
 import os
 
-import testtools
-
 from tempest import clients
 from tempest.common.utils.data_utils import rand_name
 from tempest.test import attr
 from tempest.thirdparty.boto.test import BotoTestCase
 from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
-from tempest.thirdparty.boto.utils.wait import state_wait
 
 
 class S3ImagesTest(BotoTestCase):
@@ -53,8 +50,6 @@
                                cls.bucket_name)
         s3_upload_dir(bucket, cls.materials_path)
 
-    #Note(afazekas): Without the normal status change test!
-    # otherwise I would skip it too
     @attr(type='smoke')
     def test_register_get_deregister_ami_image(self):
         # Register and deregister ami image
@@ -72,13 +67,8 @@
         retrieved_image = self.images_client.get_image(image["image_id"])
         self.assertTrue(retrieved_image.name == image["name"])
         self.assertTrue(retrieved_image.id == image["image_id"])
-        state = retrieved_image.state
-        if state != "available":
-            def _state():
-                retr = self.images_client.get_image(image["image_id"])
-                return retr.state
-            state = state_wait(_state, "available")
-        self.assertEqual("available", state)
+        if retrieved_image.state != "available":
+            self.assertImageStateWait(retrieved_image, "available")
         self.images_client.deregister_image(image["image_id"])
         self.assertNotIn(image["image_id"], str(
             self.images_client.get_all_images()))
@@ -107,7 +97,6 @@
             self.images_client.get_all_images()))
         self.cancelResourceCleanUp(image["cleanUp"])
 
-    @testtools.skip("Skipped until the Bug #1074908 and #1074904 is resolved")
     def test_register_get_deregister_ari_image(self):
         # Register and deregister ari image
         image = {"name": rand_name("ari-name-"),
diff --git a/tempest/thirdparty/boto/utils/s3.py b/tempest/thirdparty/boto/utils/s3.py
index a309a12..f8fa61b 100644
--- a/tempest/thirdparty/boto/utils/s3.py
+++ b/tempest/thirdparty/boto/utils/s3.py
@@ -22,7 +22,7 @@
 import boto
 import boto.s3.key
 
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 
diff --git a/tempest/thirdparty/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
index 6b3ef27..d8fca3b 100644
--- a/tempest/thirdparty/boto/utils/wait.py
+++ b/tempest/thirdparty/boto/utils/wait.py
@@ -21,8 +21,8 @@
 import boto.exception
 from testtools import TestCase
 
-from tempest.common import log as logging
 import tempest.config
+from tempest.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
index 3bd057c..b2632f1 100644
--- a/tempest/whitebox/manager.py
+++ b/tempest/whitebox/manager.py
@@ -22,11 +22,11 @@
 
 from sqlalchemy import create_engine, MetaData
 
-from tempest.common import log as logging
 from tempest.common.ssh import Client
 from tempest.common.utils.data_utils import rand_name
 from tempest import exceptions
-from tempest import test
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
 
 LOG = logging.getLogger(__name__)
 
@@ -47,7 +47,7 @@
     pass
 
 
-class ComputeWhiteboxTest(test.ComputeFuzzClientTest, WhiteboxTest):
+class ComputeWhiteboxTest(manager.OfficialClientTest):
 
     """
     Base smoke test case class for OpenStack Compute API (Nova)
@@ -64,15 +64,6 @@
         cls.nova_dir = cls.config.whitebox.source_dir
         cls.compute_bin_dir = cls.config.whitebox.bin_dir
         cls.compute_config_path = cls.config.whitebox.config_path
-        cls.servers_client = cls.manager.servers_client
-        cls.images_client = cls.manager.images_client
-        cls.flavors_client = cls.manager.flavors_client
-        cls.extensions_client = cls.manager.extensions_client
-        cls.floating_ips_client = cls.manager.floating_ips_client
-        cls.keypairs_client = cls.manager.keypairs_client
-        cls.security_groups_client = cls.manager.security_groups_client
-        cls.limits_client = cls.manager.limits_client
-        cls.volumes_client = cls.manager.volumes_client
         cls.build_interval = cls.config.compute.build_interval
         cls.build_timeout = cls.config.compute.build_timeout
         cls.ssh_user = cls.config.compute.ssh_user
@@ -80,38 +71,27 @@
         cls.image_ref_alt = cls.config.compute.image_ref_alt
         cls.flavor_ref = cls.config.compute.flavor_ref
         cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
-        cls.servers = []
 
+    #NOTE(afazekas): Mimics the helper method used in the api tests
     @classmethod
-    def tearDownClass(cls):
-        # NOTE(jaypipes): Tests often add things in a particular order
-        # so we destroy resources in the reverse order in which resources
-        # are added to the test class object
-        if not cls.os_resources:
-            return
-        thing = cls.os_resources.pop()
-        while True:
-            LOG.debug("Deleting %r from shared resources of %s" %
-                      (thing, cls.__name__))
-            # Resources in novaclient all have a delete() method
-            # which destroys the resource...
-            thing.delete()
-            if not cls.os_resources:
-                return
-            thing = cls.os_resources.pop()
+    def create_server(cls, **kwargs):
+        flavor_ref = cls.config.compute.flavor_ref
+        image_ref = cls.config.compute.image_ref
+        name = rand_name(cls.__name__ + "-instance")
+        if 'name' in kwargs:
+            name = kwargs.pop('name')
+        flavor = kwargs.get('flavor', flavor_ref)
+        image_id = kwargs.get('image_id', image_ref)
 
-    @classmethod
-    def create_server(cls, image_id=None):
-        """Wrapper utility that returns a test server."""
-        server_name = rand_name(cls.__name__ + "-instance")
-        flavor = cls.flavor_ref
-        if not image_id:
-            image_id = cls.image_ref
+        server = cls.compute_client.servers.create(
+            name, image_id, flavor, **kwargs)
 
-        resp, server = cls.servers_client.create_server(
-            server_name, image_id, flavor)
-        cls.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
-        cls.servers.append(server)
+        if 'wait_until' in kwargs:
+            cls.status_timeout(cls.compute_client.servers, server.id,
+                               server['id'], kwargs['wait_until'])
+
+        server = cls.compute_client.servers.get(server.id)
+        cls.set_resource(name, server)
         return server
 
     @classmethod
diff --git a/tempest/whitebox/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
index dc68336..0afb17e 100644
--- a/tempest/whitebox/test_images_whitebox.py
+++ b/tempest/whitebox/test_images_whitebox.py
@@ -15,23 +15,19 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.api.compute import base
 from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
 from tempest.whitebox import manager
 
-#TODO(afazekas): The whitebox tests are using complex testclass/manager
-# hierarchy, without a real need. It is difficult to maintain.
-# They could share more code with scenario tests.
+from novaclient import exceptions
 
 
-class ImagesWhiteboxTest(manager.ComputeWhiteboxTest, base.BaseComputeTest):
+class ImagesWhiteboxTest(manager.ComputeWhiteboxTest):
     _interface = 'json'
 
     @classmethod
     def setUpClass(cls):
         super(ImagesWhiteboxTest, cls).setUpClass()
-        cls.client = cls.images_client
+        cls.create_image = cls.compute_client.servers.create_image
         cls.connection, cls.meta = cls.get_db_handle_and_meta()
         cls.shared_server = cls.create_server()
         cls.image_ids = []
@@ -39,7 +35,6 @@
     @classmethod
     def tearDownClass(cls):
         """Delete images and server after a test is executed."""
-        cls.servers_client.delete_server(cls.shared_server['id'])
         for image_id in cls.image_ids:
             cls.client.delete_image(image_id)
             cls.image_ids.remove(image_id)
@@ -62,18 +57,18 @@
     def _test_create_image_409_base(self, vm_state, task_state, deleted=0):
         """Base method for create image tests based on vm and task states."""
         try:
-            self.update_state(self.shared_server['id'], vm_state,
+            self.update_state(self.shared_server.id, vm_state,
                               task_state, deleted)
 
             image_name = rand_name('snap-')
-            self.assertRaises(exceptions.Duplicate,
-                              self.client.create_image,
-                              self.shared_server['id'], image_name)
+            self.assertRaises(exceptions.Conflict,
+                              self.create_image,
+                              self.shared_server.id, image_name)
         except Exception:
             self.fail("Should not allow create image when vm_state=%s and "
                       "task_state=%s" % (vm_state, task_state))
         finally:
-            self.update_state(self.shared_server['id'], 'active', None)
+            self.update_state(self.shared_server.id, 'active', None)
 
     def test_create_image_when_vm_eq_building_task_eq_scheduling(self):
         # 409 error when instance states are building,scheduling
diff --git a/test-requirements.txt b/test-requirements.txt
index 2185997..236a473 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,7 @@
 pep8==1.4.5
 pyflakes==0.7.2
 flake8==2.0
-hacking>=0.5.3,<0.6
+hacking>=0.5.6,<0.7
 # needed for doc build
+docutils==0.9.1
 sphinx>=1.1.2
-
diff --git a/tools/colorizer.py b/tools/colorizer.py
new file mode 100755
index 0000000..76a3bd3
--- /dev/null
+++ b/tools/colorizer.py
@@ -0,0 +1,333 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013, Nebula, Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+# Colorizer Code is borrowed from Twisted:
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
+#
+#    Permission is hereby granted, free of charge, to any person obtaining
+#    a copy of this software and associated documentation files (the
+#    "Software"), to deal in the Software without restriction, including
+#    without limitation the rights to use, copy, modify, merge, publish,
+#    distribute, sublicense, and/or sell copies of the Software, and to
+#    permit persons to whom the Software is furnished to do so, subject to
+#    the following conditions:
+#
+#    The above copyright notice and this permission notice shall be
+#    included in all copies or substantial portions of the Software.
+#
+#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+#    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+#    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+#    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+#    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""Display a subunit stream through a colorized unittest test runner."""
+
+import heapq
+import subunit
+import sys
+import unittest
+
+import testtools
+
+
+class _AnsiColorizer(object):
+    """
+    A colorizer is an object that loosely wraps around a stream, allowing
+    callers to write text to the stream in a particular color.
+
+    Colorizer classes must implement C{supported()} and C{write(text, color)}.
+    """
+    _colors = dict(black=30, red=31, green=32, yellow=33,
+                   blue=34, magenta=35, cyan=36, white=37)
+
+    def __init__(self, stream):
+        self.stream = stream
+
+    def supported(cls, stream=sys.stdout):
+        """
+        A class method that returns True if the current platform supports
+        coloring terminal output using this method. Returns False otherwise.
+        """
+        if not stream.isatty():
+            return False  # auto color only on TTYs
+        try:
+            import curses
+        except ImportError:
+            return False
+        else:
+            try:
+                try:
+                    return curses.tigetnum("colors") > 2
+                except curses.error:
+                    curses.setupterm()
+                    return curses.tigetnum("colors") > 2
+            except Exception:
+                # guess false in case of error
+                return False
+    supported = classmethod(supported)
+
+    def write(self, text, color):
+        """
+        Write the given text to the stream in the given color.
+
+        @param text: Text to be written to the stream.
+
+        @param color: A string label for a color. e.g. 'red', 'white'.
+        """
+        color = self._colors[color]
+        self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
+
+
+class _Win32Colorizer(object):
+    """
+    See _AnsiColorizer docstring.
+    """
+    def __init__(self, stream):
+        import win32console
+        red, green, blue, bold = (win32console.FOREGROUND_RED,
+                                  win32console.FOREGROUND_GREEN,
+                                  win32console.FOREGROUND_BLUE,
+                                  win32console.FOREGROUND_INTENSITY)
+        self.stream = stream
+        self.screenBuffer = win32console.GetStdHandle(
+            win32console.STD_OUT_HANDLE)
+        self._colors = {'normal': red | green | blue,
+                        'red': red | bold,
+                        'green': green | bold,
+                        'blue': blue | bold,
+                        'yellow': red | green | bold,
+                        'magenta': red | blue | bold,
+                        'cyan': green | blue | bold,
+                        'white': red | green | blue | bold}
+
+    def supported(cls, stream=sys.stdout):
+        try:
+            import win32console
+            screenBuffer = win32console.GetStdHandle(
+                win32console.STD_OUT_HANDLE)
+        except ImportError:
+            return False
+        import pywintypes
+        try:
+            screenBuffer.SetConsoleTextAttribute(
+                win32console.FOREGROUND_RED |
+                win32console.FOREGROUND_GREEN |
+                win32console.FOREGROUND_BLUE)
+        except pywintypes.error:
+            return False
+        else:
+            return True
+    supported = classmethod(supported)
+
+    def write(self, text, color):
+        color = self._colors[color]
+        self.screenBuffer.SetConsoleTextAttribute(color)
+        self.stream.write(text)
+        self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
+
+
+class _NullColorizer(object):
+    """
+    See _AnsiColorizer docstring.
+    """
+    def __init__(self, stream):
+        self.stream = stream
+
+    def supported(cls, stream=sys.stdout):
+        return True
+    supported = classmethod(supported)
+
+    def write(self, text, color):
+        self.stream.write(text)
+
+
+def get_elapsed_time_color(elapsed_time):
+    if elapsed_time > 1.0:
+        return 'red'
+    elif elapsed_time > 0.25:
+        return 'yellow'
+    else:
+        return 'green'
+
+
+class NovaTestResult(testtools.TestResult):
+    def __init__(self, stream, descriptions, verbosity):
+        super(NovaTestResult, self).__init__()
+        self.stream = stream
+        self.showAll = verbosity > 1
+        self.num_slow_tests = 10
+        self.slow_tests = []  # this is a fixed-sized heap
+        self.colorizer = None
+        # NOTE(vish): reset stdout for the terminal check
+        stdout = sys.stdout
+        sys.stdout = sys.__stdout__
+        for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
+            if colorizer.supported():
+                self.colorizer = colorizer(self.stream)
+                break
+        sys.stdout = stdout
+        self.start_time = None
+        self.last_time = {}
+        self.results = {}
+        self.last_written = None
+
+    def _writeElapsedTime(self, elapsed):
+        color = get_elapsed_time_color(elapsed)
+        self.colorizer.write("  %.2f" % elapsed, color)
+
+    def _addResult(self, test, *args):
+        try:
+            name = test.id()
+        except AttributeError:
+            name = 'Unknown.unknown'
+        test_class, test_name = name.rsplit('.', 1)
+
+        elapsed = (self._now() - self.start_time).total_seconds()
+        item = (elapsed, test_class, test_name)
+        if len(self.slow_tests) >= self.num_slow_tests:
+            heapq.heappushpop(self.slow_tests, item)
+        else:
+            heapq.heappush(self.slow_tests, item)
+
+        self.results.setdefault(test_class, [])
+        self.results[test_class].append((test_name, elapsed) + args)
+        self.last_time[test_class] = self._now()
+        self.writeTests()
+
+    def _writeResult(self, test_name, elapsed, long_result, color,
+                     short_result, success):
+        if self.showAll:
+            self.stream.write('    %s' % str(test_name).ljust(66))
+            self.colorizer.write(long_result, color)
+            if success:
+                self._writeElapsedTime(elapsed)
+            self.stream.writeln()
+        else:
+            self.colorizer.write(short_result, color)
+
+    def addSuccess(self, test):
+        super(NovaTestResult, self).addSuccess(test)
+        self._addResult(test, 'OK', 'green', '.', True)
+
+    def addFailure(self, test, err):
+        if test.id() == 'process-returncode':
+            return
+        super(NovaTestResult, self).addFailure(test, err)
+        self._addResult(test, 'FAIL', 'red', 'F', False)
+
+    def addError(self, test, err):
+        super(NovaTestResult, self).addFailure(test, err)
+        self._addResult(test, 'ERROR', 'red', 'E', False)
+
+    def addSkip(self, test, reason=None, details=None):
+        super(NovaTestResult, self).addSkip(test, reason, details)
+        self._addResult(test, 'SKIP', 'blue', 'S', True)
+
+    def startTest(self, test):
+        self.start_time = self._now()
+        super(NovaTestResult, self).startTest(test)
+
+    def writeTestCase(self, cls):
+        if not self.results.get(cls):
+            return
+        if cls != self.last_written:
+            self.colorizer.write(cls, 'white')
+            self.stream.writeln()
+        for result in self.results[cls]:
+            self._writeResult(*result)
+        del self.results[cls]
+        self.stream.flush()
+        self.last_written = cls
+
+    def writeTests(self):
+        time = self.last_time.get(self.last_written, self._now())
+        if not self.last_written or (self._now() - time).total_seconds() > 2.0:
+            diff = 3.0
+            while diff > 2.0:
+                classes = self.results.keys()
+                oldest = min(classes, key=lambda x: self.last_time[x])
+                diff = (self._now() - self.last_time[oldest]).total_seconds()
+                self.writeTestCase(oldest)
+        else:
+            self.writeTestCase(self.last_written)
+
+    def done(self):
+        self.stopTestRun()
+
+    def stopTestRun(self):
+        for cls in list(self.results.iterkeys()):
+            self.writeTestCase(cls)
+        self.stream.writeln()
+        self.writeSlowTests()
+
+    def writeSlowTests(self):
+        # Pare out 'fast' tests
+        slow_tests = [item for item in self.slow_tests
+                      if get_elapsed_time_color(item[0]) != 'green']
+        if slow_tests:
+            slow_total_time = sum(item[0] for item in slow_tests)
+            slow = ("Slowest %i tests took %.2f secs:"
+                    % (len(slow_tests), slow_total_time))
+            self.colorizer.write(slow, 'yellow')
+            self.stream.writeln()
+            last_cls = None
+            # sort by name
+            for elapsed, cls, name in sorted(slow_tests,
+                                             key=lambda x: x[1] + x[2]):
+                if cls != last_cls:
+                    self.colorizer.write(cls, 'white')
+                    self.stream.writeln()
+                last_cls = cls
+                self.stream.write('    %s' % str(name).ljust(68))
+                self._writeElapsedTime(elapsed)
+                self.stream.writeln()
+
+    def printErrors(self):
+        if self.showAll:
+            self.stream.writeln()
+        self.printErrorList('ERROR', self.errors)
+        self.printErrorList('FAIL', self.failures)
+
+    def printErrorList(self, flavor, errors):
+        for test, err in errors:
+            self.colorizer.write("=" * 70, 'red')
+            self.stream.writeln()
+            self.colorizer.write(flavor, 'red')
+            self.stream.writeln(": %s" % test.id())
+            self.colorizer.write("-" * 70, 'red')
+            self.stream.writeln()
+            self.stream.writeln("%s" % err)
+
+
+test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
+
+if sys.version_info[0:2] <= (2, 6):
+    runner = unittest.TextTestRunner(verbosity=2)
+else:
+    runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult)
+
+if runner.run(test).wasSuccessful():
+    exit_code = 0
+else:
+    exit_code = 1
+sys.exit(exit_code)
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
new file mode 100755
index 0000000..a5a6076
--- /dev/null
+++ b/tools/pretty_tox.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+TESTRARGS=$1
+python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit2pyunit
diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh
new file mode 100755
index 0000000..490d263
--- /dev/null
+++ b/tools/pretty_tox_serial.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+TESTRARGS=$@
+
+if [ ! -d .testrepository ]; then
+    testr init
+fi
+testr run --subunit $TESTRARGS | subunit-2to1 | tools/colorizer.py
+testr slowest
diff --git a/tools/run_test_classes.py b/tools/run_test_classes.py
deleted file mode 100755
index c01b046..0000000
--- a/tools/run_test_classes.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sys
-
-
-def filter_classes(test_ids):
-    test_classes = map(lambda x: x.rsplit('.', 1)[0], test_ids)
-
-    #Remove duplicates from the list
-    uniq_class = {}
-    result = []
-    for test_class in test_classes:
-        if test_class in uniq_class:
-            continue
-        uniq_class[test_class] = 1
-        result.append(test_class)
-    return result
-
-
-def usage():
-    msg = """
-    This command is used to filter out the unique list of test cases (classes)
-    from a list of testr test_ids.
-
-    Usage: run_test_classes.py <test id file>
-          """
-    print(msg)
-    sys.exit(1)
-
-
-def main():
-    if len(sys.argv) == 2:
-        test_list_path = sys.argv[1]
-        test_list_file = open(test_list_path, 'r')
-        test_list = test_list_file.readlines()
-        for test_class in filter_classes(test_list):
-            print test_class
-        test_list_file.close()
-    else:
-        usage()
-
-if __name__ == '__main__':
-    main()
diff --git a/tox.ini b/tox.ini
index 04b845a..7eae948 100644
--- a/tox.ini
+++ b/tox.ini
@@ -26,11 +26,17 @@
 commands =
   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest/api tempest/scenario tempest/thirdparty tempest/cli
 
+[testenv:testr-serial]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+commands =
+  sh tools/pretty_tox_serial.sh '{posargs} tempest.api tempest.scenario tempest.thirdparty tempest.cli'
+
 [testenv:testr-full]
 sitepackages = True
 setenv = VIRTUAL_ENV={envdir}
 commands =
-  python setup.py testr --slowest --testr-args='tempest.api tempest.scenario tempest.thirdparty tempest.cli'
+  sh tools/pretty_tox.sh 'tempest.api tempest.scenario tempest.thirdparty tempest.cli {posargs}'
 
 [testenv:smoke]
 sitepackages = True
@@ -78,6 +84,7 @@
 local-check-factory = tempest.hacking.checks.factory
 
 [flake8]
+# E125 is a won't fix until https://github.com/jcrocholl/pep8/issues/126 is resolved.  For further detail see https://review.openstack.org/#/c/36788/
 ignore = E125,H302,H404
 show-source = True
 exclude = .git,.venv,.tox,dist,doc,openstack,*egg