Merge "Access credential fields as attributes"
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 8734624..f0e70bd 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -175,7 +175,6 @@
                 pass
             except Exception:
                 LOG.exception('Exception raised deleting image %s' % image_id)
-                pass
 
     @classmethod
     def clear_security_groups(cls):
@@ -190,7 +189,6 @@
                 LOG.info('Exception raised deleting security group %s',
                          sg['id'])
                 LOG.exception(exc)
-                pass
 
     @classmethod
     def tearDownClass(cls):
@@ -322,7 +320,6 @@
                 cls.servers_client.wait_for_server_termination(server_id)
             except Exception:
                 LOG.exception('Failed to delete server %s' % server_id)
-                pass
         resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
         if cls._api_version == 2:
             cls.password = server['adminPass']
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index fc313f2..74444d7 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -38,6 +38,7 @@
         # add lists for watched resources
         cls._node_group_templates = []
         cls._cluster_templates = []
+        cls._data_sources = []
 
     @classmethod
     def tearDownClass(cls):
@@ -45,6 +46,8 @@
                               cls.client.delete_cluster_template)
         cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
                               cls.client.delete_node_group_template)
+        cls.cleanup_resources(getattr(cls, '_data_sources', []),
+                              cls.client.delete_data_source)
         cls.clear_isolated_creds()
         super(BaseDataProcessingTest, cls).tearDownClass()
 
@@ -96,3 +99,17 @@
         cls._cluster_templates.append(body['id'])
 
         return resp, body
+
+    @classmethod
+    def create_data_source(cls, name, type, url, **kwargs):
+        """Creates watched data source with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object. All resources created in this method will be automatically
+        removed in tearDownClass method.
+        """
+        resp, body = cls.client.create_data_source(name, type, url, **kwargs)
+        # store id of created data source
+        cls._data_sources.append(body['id'])
+
+        return resp, body
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 8466c7b..b90891b 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -290,6 +290,7 @@
         return image_id
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_server_id(self):
         # The images should contain images filtered by server id
         resp, images = self.client.image_list_detail(
@@ -299,6 +300,7 @@
         self.assertEqual(self.snapshot_set, result_set)
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_type(self):
         # The list of servers should be filtered by image type
         params = {'image_type': 'snapshot'}
@@ -309,6 +311,7 @@
         self.assertIn(self.snapshot, result_set)
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_limit(self):
         # Verify only the expected number of results are returned
         resp, images = self.client.image_list_detail(limit=1)
@@ -317,6 +320,7 @@
         self.assertEqual(1, len(images))
 
     @test.attr(type='gate')
+    @test.services('compute')
     def test_index_by_change_since(self):
         # Verify an update image is returned
         # Becoming ACTIVE will modify the updated time
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 5ab581b..cf0a2b6 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -294,7 +294,7 @@
     def create_vpnservice(cls, subnet_id, router_id):
         """Wrapper utility that returns a test vpn service."""
         resp, body = cls.client.create_vpnservice(
-            subnet_id, router_id, admin_state_up=True,
+            subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
             name=data_utils.rand_name("vpnservice-"))
         vpnservice = body['vpnservice']
         cls.vpnservices.append(vpnservice)
@@ -303,7 +303,7 @@
     @classmethod
     def create_ikepolicy(cls, name):
         """Wrapper utility that returns a test ike policy."""
-        resp, body = cls.client.create_ikepolicy(name)
+        resp, body = cls.client.create_ikepolicy(name=name)
         ikepolicy = body['ikepolicy']
         cls.ikepolicies.append(ikepolicy)
         return ikepolicy
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 4cc0338..7605b8a 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -262,10 +262,25 @@
         self.addCleanup(
             self._delete_extra_routes,
             self.router['id'])
-        # Update router extra route
+        # Update router extra route, second ip of the range is
+        # used as next hop
         cidr = netaddr.IPNetwork(self.subnet['cidr'])
+        next_hop = str(cidr[2])
+        destination = str(self.subnet['cidr'])
         resp, extra_route = self.client.update_extra_routes(
-            self.router['id'], str(cidr[0]), str(self.subnet['cidr']))
+            self.router['id'], next_hop, destination)
+        self.assertEqual('200', resp['status'])
+        self.assertEqual(1, len(extra_route['router']['routes']))
+        self.assertEqual(destination,
+                         extra_route['router']['routes'][0]['destination'])
+        self.assertEqual(next_hop,
+                         extra_route['router']['routes'][0]['nexthop'])
+        resp, show_body = self.client.show_router(self.router['id'])
+        self.assertEqual('200', resp['status'])
+        self.assertEqual(destination,
+                         show_body['router']['routes'][0]['destination'])
+        self.assertEqual(next_hop,
+                         show_body['router']['routes'][0]['nexthop'])
 
     def _delete_extra_routes(self, router_id):
         resp, _ = self.client.delete_extra_routes(router_id)
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 7edaaf8..a49e944 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -82,8 +82,8 @@
     def test_create_update_delete_vpn_service(self):
         # Creates a VPN service
         name = data_utils.rand_name('vpn-service-')
-        resp, body = self.client.create_vpnservice(self.subnet['id'],
-                                                   self.router['id'],
+        resp, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
+                                                   router_id=self.router['id'],
                                                    name=name,
                                                    admin_state_up=True)
         self.assertEqual('201', resp['status'])
@@ -134,7 +134,7 @@
         # Creates a IKE policy
         name = data_utils.rand_name('ike-policy-')
         resp, body = (self.client.create_ikepolicy(
-                      name,
+                      name=name,
                       ike_version="v1",
                       encryption_algorithm="aes-128",
                       auth_algorithm="sha1"))
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 60b8dc1..7f088de 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -70,13 +70,13 @@
             output_map[outputs['output_key']] = outputs['output_value']
         #Test that first key generated public and private keys
         self.assertTrue('KeyPair_PublicKey' in output_map)
-        self.assertTrue("Generated by" in output_map['KeyPair_PublicKey'])
+        self.assertTrue("Generated" in output_map['KeyPair_PublicKey'])
         self.assertTrue('KeyPair_PrivateKey' in output_map)
         self.assertTrue('-----BEGIN' in output_map['KeyPair_PrivateKey'])
         #Test that second key generated public key, and private key is not
         #in the output due to save_private_key = false
         self.assertTrue('KeyPairDontSavePrivate_PublicKey' in output_map)
-        self.assertTrue('Generated by' in
+        self.assertTrue('Generated' in
                         output_map['KeyPairDontSavePrivate_PublicKey'])
         self.assertTrue(u'KeyPairDontSavePrivate_PrivateKey' in output_map)
         private_key = output_map['KeyPairDontSavePrivate_PrivateKey']
diff --git a/tempest/api/telemetry/test_telemetry_alarming_api.py b/tempest/api/telemetry/test_telemetry_alarming_api.py
index a59d3ae..3472b31 100644
--- a/tempest/api/telemetry/test_telemetry_alarming_api.py
+++ b/tempest/api/telemetry/test_telemetry_alarming_api.py
@@ -11,6 +11,7 @@
 #    under the License.
 
 from tempest.api.telemetry import base
+from tempest.common.utils import data_utils
 from tempest import exceptions
 from tempest.test import attr
 
@@ -18,46 +19,96 @@
 class TelemetryAlarmingAPITestJSON(base.BaseTelemetryTest):
     _interface = 'json'
 
+    @classmethod
+    def setUpClass(cls):
+        super(TelemetryAlarmingAPITestJSON, cls).setUpClass()
+        cls.rule = {'meter_name': 'cpu_util',
+                    'comparison_operator': 'gt',
+                    'threshold': 80.0,
+                    'period': 70}
+        for i in range(2):
+            cls.create_alarm(threshold_rule=cls.rule)
+
     @attr(type="gate")
     def test_alarm_list(self):
-        # Create an alarm to verify in the list of alarms
-        created_alarm_ids = list()
-        fetched_ids = list()
-        rules = {'meter_name': 'cpu_util',
-                 'comparison_operator': 'gt',
-                 'threshold': 80.0,
-                 'period': 70}
-        for i in range(3):
-            resp, body = self.create_alarm(threshold_rule=rules)
-            created_alarm_ids.append(body['alarm_id'])
-
         # List alarms
         resp, alarm_list = self.telemetry_client.list_alarms()
-        self.assertEqual(int(resp['status']), 200)
+        self.assertEqual(200, resp.status)
 
         # Verify created alarm in the list
         fetched_ids = [a['alarm_id'] for a in alarm_list]
-        missing_alarms = [a for a in created_alarm_ids if a not in fetched_ids]
+        missing_alarms = [a for a in self.alarm_ids if a not in fetched_ids]
         self.assertEqual(0, len(missing_alarms),
                          "Failed to find the following created alarm(s)"
                          " in a fetched list: %s" %
                          ', '.join(str(a) for a in missing_alarms))
 
     @attr(type="gate")
-    def test_create_alarm(self):
-        rules = {'meter_name': 'cpu_util',
-                 'comparison_operator': 'gt',
-                 'threshold': 80.0,
-                 'period': 70}
-        resp, body = self.create_alarm(threshold_rule=rules)
-        self.alarm_id = body['alarm_id']
-        self.assertEqual(int(resp['status']), 201)
-        self.assertDictContainsSubset(rules, body['threshold_rule'])
-        resp, body = self.telemetry_client.get_alarm(self.alarm_id)
-        self.assertEqual(int(resp['status']), 200)
-        self.assertDictContainsSubset(rules, body['threshold_rule'])
-        resp, _ = self.telemetry_client.delete_alarm(self.alarm_id)
-        self.assertEqual(int(resp['status']), 204)
+    def test_create_update_get_delete_alarm(self):
+        # Create an alarm
+        alarm_name = data_utils.rand_name('telemetry_alarm')
+        resp, body = self.telemetry_client.create_alarm(
+            name=alarm_name, type='threshold', threshold_rule=self.rule)
+        self.assertEqual(201, resp.status)
+        self.assertEqual(alarm_name, body['name'])
+        alarm_id = body['alarm_id']
+        self.assertDictContainsSubset(self.rule, body['threshold_rule'])
+        # Update alarm with new rule and new name
+        new_rule = {'meter_name': 'cpu',
+                    'comparison_operator': 'eq',
+                    'threshold': 70.0,
+                    'period': 60}
+        alarm_name = data_utils.rand_name('telemetry-alarm-update')
+        resp, body = self.telemetry_client.update_alarm(
+            alarm_id,
+            threshold_rule=new_rule,
+            name=alarm_name,
+            type='threshold')
+        self.assertEqual(200, resp.status)
+        self.assertEqual(alarm_name, body['name'])
+        self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+        # Get and verify details of an alarm after update
+        resp, body = self.telemetry_client.get_alarm(alarm_id)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(alarm_name, body['name'])
+        self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+        # Delete alarm and verify if deleted
+        resp, _ = self.telemetry_client.delete_alarm(alarm_id)
+        self.assertEqual(204, resp.status)
         self.assertRaises(exceptions.NotFound,
-                          self.telemetry_client.get_alarm,
-                          self.alarm_id)
+                          self.telemetry_client.get_alarm, alarm_id)
+
+    @attr(type="gate")
+    def test_set_get_alarm_state(self):
+        alarm_states = ['ok', 'alarm', 'insufficient data']
+        _, alarm = self.create_alarm(threshold_rule=self.rule)
+        # Set alarm state and verify
+        new_state =\
+            [elem for elem in alarm_states if elem != alarm['state']][0]
+        resp, state = self.telemetry_client.alarm_set_state(alarm['alarm_id'],
+                                                            new_state)
+        self.assertEqual(200, resp.status)
+        self.assertEqual(new_state, state)
+        # Get alarm state and verify
+        resp, state = self.telemetry_client.alarm_get_state(alarm['alarm_id'])
+        self.assertEqual(200, resp.status)
+        self.assertEqual(new_state, state)
+
+    @attr(type="gate")
+    def test_create_delete_alarm_with_combination_rule(self):
+        rule = {"alarm_ids": self.alarm_ids,
+                "operator": "or"}
+        # Verifies alarm create
+        alarm_name = data_utils.rand_name('combination_alarm')
+        resp, body = self.telemetry_client.create_alarm(name=alarm_name,
+                                                        combination_rule=rule,
+                                                        type='combination')
+        self.assertEqual(201, resp.status)
+        self.assertEqual(alarm_name, body['name'])
+        alarm_id = body['alarm_id']
+        self.assertDictContainsSubset(rule, body['combination_rule'])
+        # Verify alarm delete
+        resp, _ = self.telemetry_client.delete_alarm(alarm_id)
+        self.assertEqual(204, resp.status)
+        self.assertRaises(exceptions.NotFound,
+                          self.telemetry_client.get_alarm, alarm_id)
diff --git a/tempest/api_schema/compute/aggregates.py b/tempest/api_schema/compute/aggregates.py
index a3ab3c8..402ad4e 100644
--- a/tempest/api_schema/compute/aggregates.py
+++ b/tempest/api_schema/compute/aggregates.py
@@ -64,3 +64,21 @@
     'updated_at'] = {
         'type': 'string'
     }
+
+common_create_aggregate = {
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'aggregate': aggregate
+        },
+        'required': ['aggregate']
+    }
+}
+# create-aggregate api doesn't have 'hosts' and 'metadata' attributes.
+del common_create_aggregate['response_body']['properties']['aggregate'][
+    'properties']['hosts']
+del common_create_aggregate['response_body']['properties']['aggregate'][
+    'properties']['metadata']
+common_create_aggregate['response_body']['properties']['aggregate'][
+    'required'] = ['availability_zone', 'created_at', 'deleted', 'deleted_at',
+                   'id', 'name', 'updated_at']
diff --git a/tempest/api_schema/compute/hosts.py b/tempest/api_schema/compute/hosts.py
index a9d6567..2596c27 100644
--- a/tempest/api_schema/compute/hosts.py
+++ b/tempest/api_schema/compute/hosts.py
@@ -73,3 +73,13 @@
         'required': ['host']
     }
 }
+
+update_host_common = {
+    'type': 'object',
+    'properties': {
+        'host': {'type': 'string'},
+        'maintenance_mode': {'enum': ['on_maintenance', 'off_maintenance']},
+        'status': {'enum': ['enabled', 'disabled']}
+    },
+    'required': ['host', 'maintenance_mode', 'status']
+}
diff --git a/tempest/api_schema/compute/quotas.py b/tempest/api_schema/compute/quotas.py
new file mode 100644
index 0000000..f49771e
--- /dev/null
+++ b/tempest/api_schema/compute/quotas.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+common_quota_set = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'quota_set': {
+                'type': 'object',
+                'properties': {
+                    'instances': {'type': 'integer'},
+                    'cores': {'type': 'integer'},
+                    'ram': {'type': 'integer'},
+                    'floating_ips': {'type': 'integer'},
+                    'fixed_ips': {'type': 'integer'},
+                    'metadata_items': {'type': 'integer'},
+                    'key_pairs': {'type': 'integer'},
+                    'security_groups': {'type': 'integer'},
+                    'security_group_rules': {'type': 'integer'}
+                },
+                'required': ['instances', 'cores', 'ram',
+                             'floating_ips', 'fixed_ips',
+                             'metadata_items', 'key_pairs',
+                             'security_groups', 'security_group_rules']
+            }
+        },
+        'required': ['quota_set']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/aggregates.py b/tempest/api_schema/compute/v2/aggregates.py
index de3e12b..bc36044 100644
--- a/tempest/api_schema/compute/v2/aggregates.py
+++ b/tempest/api_schema/compute/v2/aggregates.py
@@ -12,6 +12,14 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
+from tempest.api_schema.compute import aggregates
+
 delete_aggregate = {
     'status_code': [200]
 }
+
+create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
+# V2 API's response status_code is 200
+create_aggregate['status_code'] = [200]
diff --git a/tempest/api_schema/compute/v2/hosts.py b/tempest/api_schema/compute/v2/hosts.py
index 9ec8848..86efadf 100644
--- a/tempest/api_schema/compute/v2/hosts.py
+++ b/tempest/api_schema/compute/v2/hosts.py
@@ -35,3 +35,8 @@
 reboot_host['response_body']['properties']['power_action'] = {
     'enum': ['reboot']
 }
+
+update_host = {
+    'status_code': [200],
+    'response_body': hosts.update_host_common
+}
diff --git a/tempest/api_schema/compute/v2/quotas.py b/tempest/api_schema/compute/v2/quotas.py
index 17dc4dd..31c0458 100644
--- a/tempest/api_schema/compute/v2/quotas.py
+++ b/tempest/api_schema/compute/v2/quotas.py
@@ -12,39 +12,36 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-quota_set = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'instances': {'type': 'integer'},
-                    'cores': {'type': 'integer'},
-                    'ram': {'type': 'integer'},
-                    'floating_ips': {'type': 'integer'},
-                    'fixed_ips': {'type': 'integer'},
-                    'metadata_items': {'type': 'integer'},
-                    'injected_files': {'type': 'integer'},
-                    'injected_file_content_bytes': {'type': 'integer'},
-                    'injected_file_path_bytes': {'type': 'integer'},
-                    'key_pairs': {'type': 'integer'},
-                    'security_groups': {'type': 'integer'},
-                    'security_group_rules': {'type': 'integer'}
-                },
-                'required': ['id', 'instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'injected_files',
-                             'injected_file_content_bytes',
-                             'injected_file_path_bytes', 'key_pairs',
-                             'security_groups', 'security_group_rules']
-            }
-        },
-        'required': ['quota_set']
-    }
-}
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_files'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['required'].extend([
+    'id',
+    'injected_files',
+    'injected_file_content_bytes',
+    'injected_file_path_bytes'])
+
+quota_set_update = copy.deepcopy(quotas.common_quota_set)
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_files'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+    'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set'][
+    'required'].extend(['injected_files',
+                        'injected_file_content_bytes',
+                        'injected_file_path_bytes'])
 
 delete_quota = {
     'status_code': [202]
diff --git a/tempest/api_schema/compute/v3/aggregates.py b/tempest/api_schema/compute/v3/aggregates.py
index 358e455..b9f9915 100644
--- a/tempest/api_schema/compute/v3/aggregates.py
+++ b/tempest/api_schema/compute/v3/aggregates.py
@@ -12,6 +12,14 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
+from tempest.api_schema.compute import aggregates
+
 delete_aggregate = {
     'status_code': [204]
 }
+
+create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
+# V3 API's response status_code is 201
+create_aggregate['status_code'] = [201]
diff --git a/tempest/api_schema/compute/v3/hosts.py b/tempest/api_schema/compute/v3/hosts.py
index 575a6e2..eb689d1 100644
--- a/tempest/api_schema/compute/v3/hosts.py
+++ b/tempest/api_schema/compute/v3/hosts.py
@@ -16,7 +16,6 @@
 
 from tempest.api_schema.compute import hosts
 
-
 startup_host = {
     'status_code': [200],
     'response_body': {
@@ -41,3 +40,14 @@
 reboot_host['response_body']['properties']['power_action'] = {
     'enum': ['reboot']
 }
+
+update_host = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'host': hosts.update_host_common
+        },
+        'required': ['host']
+    }
+}
diff --git a/tempest/api_schema/compute/v3/quotas.py b/tempest/api_schema/compute/v3/quotas.py
index aec1e80..a3212ed 100644
--- a/tempest/api_schema/compute/v3/quotas.py
+++ b/tempest/api_schema/compute/v3/quotas.py
@@ -12,34 +12,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-quota_set = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'instances': {'type': 'integer'},
-                    'cores': {'type': 'integer'},
-                    'ram': {'type': 'integer'},
-                    'floating_ips': {'type': 'integer'},
-                    'fixed_ips': {'type': 'integer'},
-                    'metadata_items': {'type': 'integer'},
-                    'key_pairs': {'type': 'integer'},
-                    'security_groups': {'type': 'integer'},
-                    'security_group_rules': {'type': 'integer'}
-                },
-                'required': ['id', 'instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'key_pairs',
-                             'security_groups', 'security_group_rules']
-            }
-        },
-        'required': ['quota_set']
-    }
-}
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set'][
+    'required'].extend(['id'])
 
 quota_common_info = {
     'type': 'object',
@@ -51,34 +32,27 @@
     'required': ['reserved', 'limit', 'in_use']
 }
 
-quota_set_detail = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'quota_set': {
-                'type': 'object',
-                'properties': {
-                    'id': {'type': 'string'},
-                    'instances': quota_common_info,
-                    'cores': quota_common_info,
-                    'ram': quota_common_info,
-                    'floating_ips': quota_common_info,
-                    'fixed_ips': quota_common_info,
-                    'metadata_items': quota_common_info,
-                    'key_pairs': quota_common_info,
-                    'security_groups': quota_common_info,
-                    'security_group_rules': quota_common_info
-                },
-                'required': ['id', 'instances', 'cores', 'ram',
-                             'floating_ips', 'fixed_ips',
-                             'metadata_items', 'key_pairs',
-                             'security_groups', 'security_group_rules']
-            }
-        },
-        'required': ['quota_set']
-    }
-}
+quota_set_detail = copy.deepcopy(quotas.common_quota_set)
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'id'] = {'type': 'string'}
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'instances'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'cores'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'ram'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'floating_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'fixed_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'metadata_items'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'key_pairs'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'security_groups'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+    'security_group_rules'] = quota_common_info
 
 delete_quota = {
     'status_code': [204]
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 8fe3a17..5059a5b 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -383,7 +383,6 @@
         except exceptions.NotFound:
             LOG.warn('router with name: %s not found for delete' %
                      router_name)
-            pass
 
     def _clear_isolated_subnet(self, subnet_id, subnet_name):
         net_client = self.network_admin_client
@@ -392,7 +391,6 @@
         except exceptions.NotFound:
             LOG.warn('subnet with name: %s not found for delete' %
                      subnet_name)
-            pass
 
     def _clear_isolated_network(self, network_id, network_name):
         net_client = self.network_admin_client
@@ -401,7 +399,6 @@
         except exceptions.NotFound:
             LOG.warn('network with name: %s not found for delete' %
                      network_name)
-            pass
 
     def _cleanup_ports(self, network_id):
         # TODO(mlavalle) This method will be removed once patch
@@ -447,7 +444,6 @@
                 except exceptions.NotFound:
                     LOG.warn('router with name: %s not found for delete' %
                              router['name'])
-                    pass
                 self._clear_isolated_router(router['id'], router['name'])
             if (not self.network_resources or
                 self.network_resources.get('network')):
@@ -471,10 +467,8 @@
             except exceptions.NotFound:
                 LOG.warn("user with name: %s not found for delete" %
                          creds.username)
-                pass
             try:
                 self._delete_tenant(creds.tenant_id)
             except exceptions.NotFound:
                 LOG.warn("tenant with name: %s not found for delete" %
                          creds.tenant_name)
-                pass
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 434904f..02a8c65 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -15,7 +15,6 @@
 #    under the License.
 
 import collections
-import inspect
 import json
 from lxml import etree
 import re
@@ -24,6 +23,7 @@
 import jsonschema
 
 from tempest.common import http
+from tempest.common.utils import misc as misc_utils
 from tempest.common import xml_utils as common
 from tempest import config
 from tempest import exceptions
@@ -228,57 +228,6 @@
         versions = map(lambda x: x['id'], body)
         return resp, versions
 
-    def _find_caller(self):
-        """Find the caller class and test name.
-
-        Because we know that the interesting things that call us are
-        test_* methods, and various kinds of setUp / tearDown, we
-        can look through the call stack to find appropriate methods,
-        and the class we were in when those were called.
-        """
-        caller_name = None
-        names = []
-        frame = inspect.currentframe()
-        is_cleanup = False
-        # Start climbing the ladder until we hit a good method
-        while True:
-            try:
-                frame = frame.f_back
-                name = frame.f_code.co_name
-                names.append(name)
-                if re.search("^(test_|setUp|tearDown)", name):
-                    cname = ""
-                    if 'self' in frame.f_locals:
-                        cname = frame.f_locals['self'].__class__.__name__
-                    if 'cls' in frame.f_locals:
-                        cname = frame.f_locals['cls'].__name__
-                    caller_name = cname + ":" + name
-                    break
-                elif re.search("^_run_cleanup", name):
-                    is_cleanup = True
-                else:
-                    cname = ""
-                    if 'self' in frame.f_locals:
-                        cname = frame.f_locals['self'].__class__.__name__
-                    if 'cls' in frame.f_locals:
-                        cname = frame.f_locals['cls'].__name__
-
-                    # the fact that we are running cleanups is indicated pretty
-                    # deep in the stack, so if we see that we want to just
-                    # start looking for a real class name, and declare victory
-                    # once we do.
-                    if is_cleanup and cname:
-                        if not re.search("^RunTest", cname):
-                            caller_name = cname + ":_run_cleanups"
-                            break
-            except Exception:
-                break
-        # prevents frame leaks
-        del frame
-        if caller_name is None:
-            self.LOG.debug("Sane call name not found in %s" % names)
-        return caller_name
-
     def _get_request_id(self, resp):
         for i in ('x-openstack-request-id', 'x-compute-request-id'):
             if i in resp:
@@ -294,7 +243,7 @@
         # we're going to just provide work around on who is actually
         # providing timings by gracefully adding no content if they don't.
         # Once we're down to 1 caller, clean this up.
-        caller_name = self._find_caller()
+        caller_name = misc_utils.find_test_caller()
         if secs:
             secs = " %.3fs" % secs
         self.LOG.info(
diff --git a/tempest/common/utils/misc.py b/tempest/common/utils/misc.py
index a0b0c0a..b9f411b 100644
--- a/tempest/common/utils/misc.py
+++ b/tempest/common/utils/misc.py
@@ -13,6 +13,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import inspect
+import re
+
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
 
 def singleton(cls):
     """Simple wrapper for classes that should only have a single instance."""
@@ -23,3 +30,55 @@
             instances[cls] = cls()
         return instances[cls]
     return getinstance
+
+
+def find_test_caller():
+    """Find the caller class and test name.
+
+    Because we know that the interesting things that call us are
+    test_* methods, and various kinds of setUp / tearDown, we
+    can look through the call stack to find appropriate methods,
+    and the class we were in when those were called.
+    """
+    caller_name = None
+    names = []
+    frame = inspect.currentframe()
+    is_cleanup = False
+    # Start climbing the ladder until we hit a good method
+    while True:
+        try:
+            frame = frame.f_back
+            name = frame.f_code.co_name
+            names.append(name)
+            if re.search("^(test_|setUp|tearDown)", name):
+                cname = ""
+                if 'self' in frame.f_locals:
+                    cname = frame.f_locals['self'].__class__.__name__
+                if 'cls' in frame.f_locals:
+                    cname = frame.f_locals['cls'].__name__
+                caller_name = cname + ":" + name
+                break
+            elif re.search("^_run_cleanup", name):
+                is_cleanup = True
+            else:
+                cname = ""
+                if 'self' in frame.f_locals:
+                    cname = frame.f_locals['self'].__class__.__name__
+                if 'cls' in frame.f_locals:
+                    cname = frame.f_locals['cls'].__name__
+
+                # the fact that we are running cleanups is indicated pretty
+                # deep in the stack, so if we see that we want to just
+                # start looking for a real class name, and declare victory
+                # once we do.
+                if is_cleanup and cname:
+                    if not re.search("^RunTest", cname):
+                        caller_name = cname + ":_run_cleanups"
+                        break
+        except Exception:
+            break
+    # prevents frame leaks
+    del frame
+    if caller_name is None:
+        LOG.debug("Sane call name not found in %s" % names)
+    return caller_name
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 8e6b9fb..d52ed7c 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -13,6 +13,7 @@
 
 import time
 
+from tempest.common.utils import misc as misc_utils
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log as logging
@@ -86,6 +87,9 @@
                         'timeout': timeout})
             message += ' Current status: %s.' % server_status
             message += ' Current task state: %s.' % task_state
+            caller = misc_utils.find_test_caller()
+            if caller:
+                message = '(%s) %s' % (caller, message)
             raise exceptions.TimeoutException(message)
         old_status = server_status
         old_task_state = task_state
@@ -119,4 +123,7 @@
                         'status': status,
                         'timeout': client.build_timeout})
             message += ' Current status: %s.' % image['status']
+            caller = misc_utils.find_test_caller()
+            if caller:
+                message = '(%s) %s' % (caller, message)
             raise exceptions.TimeoutException(message)
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 5c0b5d3..fa04bfe 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -50,6 +50,7 @@
         resp, body = self.post('os-aggregates', post_body)
 
         body = json.loads(body)
+        self.validate_response(v2_schema.create_aggregate, resp, body)
         return resp, body['aggregate']
 
     def update_aggregate(self, aggregate_id, name, availability_zone=None):
diff --git a/tempest/services/compute/json/hosts_client.py b/tempest/services/compute/json/hosts_client.py
index e148572..342f946 100644
--- a/tempest/services/compute/json/hosts_client.py
+++ b/tempest/services/compute/json/hosts_client.py
@@ -61,6 +61,7 @@
 
         resp, body = self.put("os-hosts/%s" % str(hostname), request_body)
         body = json.loads(body)
+        self.validate_response(v2_schema.update_host, resp, body)
         return resp, body
 
     def startup_host(self, hostname):
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 9bddf2c..7e828d8 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -110,6 +110,7 @@
                                   post_body)
 
         body = json.loads(body)
+        self.validate_response(schema.quota_set_update, resp, body)
         return resp, body['quota_set']
 
     def delete_quota_set(self, tenant_id):
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 2487ee7..ee15c52 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -50,6 +50,7 @@
         resp, body = self.post('os-aggregates', post_body)
 
         body = json.loads(body)
+        self.validate_response(v3_schema.create_aggregate, resp, body)
         return resp, body['aggregate']
 
     def update_aggregate(self, aggregate_id, name, availability_zone=None):
diff --git a/tempest/services/compute/v3/json/hosts_client.py b/tempest/services/compute/v3/json/hosts_client.py
index 24d43d0..d2eb43d 100644
--- a/tempest/services/compute/v3/json/hosts_client.py
+++ b/tempest/services/compute/v3/json/hosts_client.py
@@ -61,6 +61,7 @@
 
         resp, body = self.put("os-hosts/%s" % str(hostname), request_body)
         body = json.loads(body)
+        self.validate_response(v3_schema.update_host, resp, body)
         return resp, body
 
     def startup_host(self, hostname):
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index c7b5f93..194e300 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -128,3 +128,37 @@
 
         uri = 'cluster-templates/%s' % tmpl_id
         return self.delete(uri)
+
+    def list_data_sources(self):
+        """List all data sources for a user."""
+
+        uri = 'data-sources'
+        return self._request_and_parse(self.get, uri, 'data_sources')
+
+    def get_data_source(self, source_id):
+        """Returns the details of a single data source."""
+
+        uri = 'data-sources/%s' % source_id
+        return self._request_and_parse(self.get, uri, 'data_source')
+
+    def create_data_source(self, name, data_source_type, url, **kwargs):
+        """Creates data source with specified params.
+
+        It supports passing additional params using kwargs and returns created
+        object.
+        """
+        uri = 'data-sources'
+        body = kwargs.copy()
+        body.update({
+            'name': name,
+            'type': data_source_type,
+            'url': url
+        })
+        return self._request_and_parse(self.post, uri, 'data_source',
+                                       body=json.dumps(body))
+
+    def delete_data_source(self, source_id):
+        """Deletes the specified data source by id."""
+
+        uri = 'data-sources/%s' % source_id
+        return self.delete(uri)
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index f9dd8ef..8e53b8d 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -165,21 +165,6 @@
         resp, body = self.delete(uri)
         return resp, body
 
-    def create_vpnservice(self, subnet_id, router_id, **kwargs):
-        post_body = {
-            "vpnservice": {
-                "subnet_id": subnet_id,
-                "router_id": router_id
-            }
-        }
-        for key, val in kwargs.items():
-            post_body['vpnservice'][key] = val
-        body = json.dumps(post_body)
-        uri = '%s/vpn/vpnservices' % (self.uri_prefix)
-        resp, body = self.post(uri, body)
-        body = json.loads(body)
-        return resp, body
-
     def list_router_interfaces(self, uuid):
         uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
         resp, body = self.get(uri)
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 50a1954..a9d4880 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -257,38 +257,6 @@
         body = _root_tag_fetcher_and_xml_to_json_parse(body)
         return resp, body
 
-    def create_vpnservice(self, subnet_id, router_id, **kwargs):
-        uri = '%s/vpn/vpnservices' % (self.uri_prefix)
-        vpnservice = common.Element("vpnservice")
-        p1 = common.Element("subnet_id", subnet_id)
-        p2 = common.Element("router_id", router_id)
-        vpnservice.append(p1)
-        vpnservice.append(p2)
-        common.deep_dict_to_xml(vpnservice, kwargs)
-        resp, body = self.post(uri, str(common.Document(vpnservice)))
-        body = _root_tag_fetcher_and_xml_to_json_parse(body)
-        return resp, body
-
-    def create_ikepolicy(self, name, **kwargs):
-        uri = '%s/vpn/ikepolicies' % (self.uri_prefix)
-        ikepolicy = common.Element("ikepolicy")
-        p1 = common.Element("name", name)
-        ikepolicy.append(p1)
-        common.deep_dict_to_xml(ikepolicy, kwargs)
-        resp, body = self.post(uri, str(common.Document(ikepolicy)))
-        body = _root_tag_fetcher_and_xml_to_json_parse(body)
-        return resp, body
-
-    def create_ipsecpolicy(self, name, **kwargs):
-        uri = '%s/vpn/ipsecpolicies' % (self.uri_prefix)
-        ipsecpolicy = common.Element("ipsecpolicy")
-        p1 = common.Element("name", name)
-        ipsecpolicy.append(p1)
-        common.deep_dict_to_xml(ipsecpolicy, kwargs)
-        resp, body = self.post(uri, str(common.Document(ipsecpolicy)))
-        body = _root_tag_fetcher_and_xml_to_json_parse(body)
-        return resp, body
-
 
 def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
     body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/telemetry/telemetry_client_base.py b/tempest/services/telemetry/telemetry_client_base.py
index 610f07b..a073f54 100644
--- a/tempest/services/telemetry/telemetry_client_base.py
+++ b/tempest/services/telemetry/telemetry_client_base.py
@@ -73,7 +73,10 @@
         return resp, body
 
     def put(self, uri, body):
-        return self.rest_client.put(uri, body)
+        body = self.serialize(body)
+        resp, body = self.rest_client.put(uri, body)
+        body = self.deserialize(body)
+        return resp, body
 
     def get(self, uri):
         resp, body = self.rest_client.get(uri)
@@ -133,3 +136,15 @@
     def create_alarm(self, **kwargs):
         uri = "%s/alarms" % self.uri_prefix
         return self.post(uri, kwargs)
+
+    def update_alarm(self, alarm_id, **kwargs):
+        uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
+        return self.put(uri, kwargs)
+
+    def alarm_get_state(self, alarm_id):
+        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+        return self.get(uri)
+
+    def alarm_set_state(self, alarm_id, state):
+        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+        return self.put(uri, state)
diff --git a/tempest/tests/common/utils/test_misc.py b/tempest/tests/common/utils/test_misc.py
index b8c6184..aee9805 100644
--- a/tempest/tests/common/utils/test_misc.py
+++ b/tempest/tests/common/utils/test_misc.py
@@ -50,3 +50,39 @@
         self.assertEqual(test, test2)
         test3 = TestBar()
         self.assertNotEqual(test, test3)
+
+    def test_find_test_caller_test_case(self):
+        # Calling it from here should give us the method we're in.
+        self.assertEqual('TestMisc:test_find_test_caller_test_case',
+                         misc.find_test_caller())
+
+    def test_find_test_caller_setup_self(self):
+        def setUp(self):
+            return misc.find_test_caller()
+        self.assertEqual('TestMisc:setUp', setUp(self))
+
+    def test_find_test_caller_setup_no_self(self):
+        def setUp():
+            return misc.find_test_caller()
+        self.assertEqual(':setUp', setUp())
+
+    def test_find_test_caller_setupclass_cls(self):
+        def setUpClass(cls):  # noqa
+            return misc.find_test_caller()
+        self.assertEqual('TestMisc:setUpClass', setUpClass(self.__class__))
+
+    def test_find_test_caller_teardown_self(self):
+        def tearDown(self):
+            return misc.find_test_caller()
+        self.assertEqual('TestMisc:tearDown', tearDown(self))
+
+    def test_find_test_caller_teardown_no_self(self):
+        def tearDown():
+            return misc.find_test_caller()
+        self.assertEqual(':tearDown', tearDown())
+
+    def test_find_test_caller_teardown_class(self):
+        def tearDownClass(cls):
+            return misc.find_test_caller()
+        self.assertEqual('TestMisc:tearDownClass',
+                         tearDownClass(self.__class__))
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index f584cb9..ab81836 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -17,6 +17,36 @@
 
 
 class HackingTestCase(base.TestCase):
+    """
+    This class tests the hacking checks in tempest.hacking.checks by passing
+    strings to the check methods like the pep8/flake8 parser would. The parser
+    loops over each line in the file and then passes the parameters to the
+    check method. The parameter names in the check method dictate what type of
+    object is passed to the check method. The parameter types are::
+
+        logical_line: A processed line with the following modifications:
+            - Multi-line statements converted to a single line.
+            - Stripped left and right.
+            - Contents of strings replaced with "xxx" of same length.
+            - Comments removed.
+        physical_line: Raw line of text from the input file.
+        lines: a list of the raw lines from the input file
+        tokens: the tokens that contribute to this logical line
+        line_number: line number in the input file
+        total_lines: number of lines in the input file
+        blank_lines: blank lines before this one
+        indent_char: indentation character in this file (" " or "\t")
+        indent_level: indentation (with tabs expanded to multiples of 8)
+        previous_indent_level: indentation on previous line
+        previous_logical: previous logical line
+        filename: Path of the file being run through pep8
+
+    When running a test on a check method the return will be False/None if
+    there is no violation in the sample input. If there is an error a tuple is
+    returned with a position in the line, and a message. So to check the result
+    just assertTrue if the check is expected to fail and assertFalse if it
+    should pass.
+    """
     def test_no_setupclass_for_unit_tests(self):
         self.assertTrue(checks.no_setupclass_for_unit_tests(
             "  def setUpClass(cls):", './tempest/tests/fake_test.py'))
@@ -24,3 +54,42 @@
             "  def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
         self.assertFalse(checks.no_setupclass_for_unit_tests(
             "  def setUpClass(cls):", './tempest/api/fake_test.py'))
+
+    def test_import_no_clients_in_api(self):
+        for client in checks.PYTHON_CLIENTS:
+            string = "import " + client + "client"
+            self.assertTrue(checks.import_no_clients_in_api(
+                string, './tempest/api/fake_test.py'))
+            self.assertFalse(checks.import_no_clients_in_api(
+                string, './tempest/scenario/fake_test.py'))
+
+    def test_scenario_tests_need_service_tags(self):
+        self.assertFalse(checks.scenario_tests_need_service_tags(
+            'def test_fake:', './tempest/scenario/test_fake.py',
+            "@test.services('compute')"))
+        self.assertFalse(checks.scenario_tests_need_service_tags(
+            'def test_fake_test:', './tempest/api/compute/test_fake.py',
+            "@test.services('image')"))
+        self.assertTrue(checks.scenario_tests_need_service_tags(
+            'def test_fake_test:', './tempest/scenario/test_fake.py',
+            '\n'))
+
+    def test_no_vi_headers(self):
+        # NOTE(mtreinish)  The lines parameter is used only for finding the
+        # line location in the file. So these tests just pass a list of an
+        # arbitrary length to use for verifying the check function.
+        self.assertTrue(checks.no_vi_headers(
+            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
+        self.assertTrue(checks.no_vi_headers(
+            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
+        self.assertFalse(checks.no_vi_headers(
+            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
+
+    def test_service_tags_not_in_module_path(self):
+        self.assertTrue(checks.service_tags_not_in_module_path(
+            "@test.services('compute')", './tempest/api/compute/fake_test.py'))
+        self.assertFalse(checks.service_tags_not_in_module_path(
+            "@test.services('compute')",
+            './tempest/scenario/compute/fake_test.py'))
+        self.assertFalse(checks.service_tags_not_in_module_path(
+            "@test.services('compute')", './tempest/api/image/fake_test.py'))
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index f6ed445..bba4012 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -14,6 +14,7 @@
 
 import os
 import shutil
+import StringIO
 import subprocess
 import tempfile
 
@@ -33,6 +34,7 @@
         # Setup Test files
         self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
         self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
+        self.subunit_trace = os.path.join(self.directory, 'subunit-trace.py')
         self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
         self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
         self.init_file = os.path.join(self.test_dir, '__init__.py')
@@ -43,55 +45,48 @@
         shutil.copy('setup.py', self.setup_py)
         shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
         shutil.copy('tempest/tests/files/__init__.py', self.init_file)
+        shutil.copy('tools/subunit-trace.py', self.subunit_trace)
+        # copy over the pretty_tox scripts
+        shutil.copy('tools/pretty_tox.sh',
+                    os.path.join(self.directory, 'pretty_tox.sh'))
+        shutil.copy('tools/pretty_tox_serial.sh',
+                    os.path.join(self.directory, 'pretty_tox_serial.sh'))
+
+        self.stdout = StringIO.StringIO()
+        self.stderr = StringIO.StringIO()
+        # Change directory, run wrapper and check result
+        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+        os.chdir(self.directory)
+
+    def assertRunExit(self, cmd, expected):
+        p = subprocess.Popen(
+            "bash %s" % cmd, shell=True,
+            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        # wait in the general case is dangerous, however the amount of
+        # data coming back on those pipes is small enough it shouldn't be
+        # a problem.
+        p.wait()
+
+        self.assertEqual(
+            p.returncode, expected,
+            "Stdout: %s; Stderr: %s" % (p.stdout, p.stderr))
 
     def test_pretty_tox(self):
-        # Copy wrapper script and requirements:
-        pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
-        shutil.copy('tools/pretty_tox.sh', pretty_tox)
-        # Change directory, run wrapper and check result
-        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
-        os.chdir(self.directory)
         # Git init is required for the pbr testr command. pbr requires a git
         # version or an sdist to work. so make the test directory a git repo
         # too.
-        subprocess.call(['git', 'init'])
-        exit_code = subprocess.call('bash pretty_tox.sh tests.passing',
-                                    shell=True, stdout=DEVNULL, stderr=DEVNULL)
-        self.assertEqual(exit_code, 0)
+        subprocess.call(['git', 'init'], stderr=DEVNULL)
+        self.assertRunExit('pretty_tox.sh tests.passing', 0)
 
     def test_pretty_tox_fails(self):
-        # Copy wrapper script and requirements:
-        pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
-        shutil.copy('tools/pretty_tox.sh', pretty_tox)
-        # Change directory, run wrapper and check result
-        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
-        os.chdir(self.directory)
         # Git init is required for the pbr testr command. pbr requires a git
         # version or an sdist to work. so make the test directory a git repo
         # too.
-        subprocess.call(['git', 'init'])
-        exit_code = subprocess.call('bash pretty_tox.sh', shell=True,
-                                    stdout=DEVNULL, stderr=DEVNULL)
-        self.assertEqual(exit_code, 1)
+        subprocess.call(['git', 'init'], stderr=DEVNULL)
+        self.assertRunExit('pretty_tox.sh', 1)
 
     def test_pretty_tox_serial(self):
-        # Copy wrapper script and requirements:
-        pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
-        shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
-        # Change directory, run wrapper and check result
-        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
-        os.chdir(self.directory)
-        exit_code = subprocess.call('bash pretty_tox_serial.sh tests.passing',
-                                    shell=True, stdout=DEVNULL, stderr=DEVNULL)
-        self.assertEqual(exit_code, 0)
+        self.assertRunExit('pretty_tox_serial.sh tests.passing', 0)
 
     def test_pretty_tox_serial_fails(self):
-        # Copy wrapper script and requirements:
-        pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
-        shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
-        # Change directory, run wrapper and check result
-        self.addCleanup(os.chdir, os.path.abspath(os.curdir))
-        os.chdir(self.directory)
-        exit_code = subprocess.call('bash pretty_tox_serial.sh', shell=True,
-                                    stdout=DEVNULL, stderr=DEVNULL)
-        self.assertEqual(exit_code, 1)
+        self.assertRunExit('pretty_tox_serial.sh', 1)
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index 46822e3..743b59d 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -101,7 +101,6 @@
             print('done.')
         else:
             print("venv already exists...")
-            pass
 
     def pip_install(self, *args):
         self.run_command(['tools/with_venv.sh',
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
index 07c35a0..f3c88f3 100755
--- a/tools/pretty_tox.sh
+++ b/tools/pretty_tox.sh
@@ -3,4 +3,4 @@
 set -o pipefail
 
 TESTRARGS=$1
-python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit2pyunit
+python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | $(dirname $0)/subunit-trace.py
diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh
index 42ce760..1634b8e 100755
--- a/tools/pretty_tox_serial.sh
+++ b/tools/pretty_tox_serial.sh
@@ -7,7 +7,7 @@
 if [ ! -d .testrepository ]; then
     testr init
 fi
-testr run --subunit $TESTRARGS | subunit2pyunit
+testr run --subunit $TESTRARGS | $(dirname $0)/subunit-trace.py
 retval=$?
 testr slowest
 exit $retval
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
new file mode 100755
index 0000000..cb710aa
--- /dev/null
+++ b/tools/subunit-trace.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2014 Samsung Electronics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Trace a subunit stream in reasonable detail and high accuracy."""
+
+import functools
+import sys
+
+import mimeparse
+import subunit
+import testtools
+
+DAY_SECONDS = 60 * 60 * 24
+FAILS = []
+RESULTS = {}
+
+
+class Starts(testtools.StreamResult):
+
+    def __init__(self, output):
+        super(Starts, self).__init__()
+        self._output = output
+
+    def startTestRun(self):
+        self._neednewline = False
+        self._emitted = set()
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+               runnable=True, file_name=None, file_bytes=None, eof=False,
+               mime_type=None, route_code=None, timestamp=None):
+        super(Starts, self).status(
+            test_id, test_status,
+            test_tags=test_tags, runnable=runnable, file_name=file_name,
+            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+            route_code=route_code, timestamp=timestamp)
+        if not test_id:
+            if not file_bytes:
+                return
+            if not mime_type or mime_type == 'test/plain;charset=utf8':
+                mime_type = 'text/plain; charset=utf-8'
+            primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
+            content_type = testtools.content_type.ContentType(
+                primary, sub, parameters)
+            content = testtools.content.Content(
+                content_type, lambda: [file_bytes])
+            text = content.as_text()
+            if text and text[-1] not in '\r\n':
+                self._neednewline = True
+            self._output.write(text)
+        elif test_status == 'inprogress' and test_id not in self._emitted:
+            if self._neednewline:
+                self._neednewline = False
+                self._output.write('\n')
+            worker = ''
+            for tag in test_tags or ():
+                if tag.startswith('worker-'):
+                    worker = '(' + tag[7:] + ') '
+            if timestamp:
+                timestr = timestamp.isoformat()
+            else:
+                timestr = ''
+                self._output.write('%s: %s%s [start]\n' %
+                                   (timestr, worker, test_id))
+            self._emitted.add(test_id)
+
+
+def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
+    """Clean up the test name for display.
+
+    By default we strip out the tags in the test because they don't help us
+    in identifying the test that is run to it's result.
+
+    Make it possible to strip out the testscenarios information (not to
+    be confused with tempest scenarios) however that's often needed to
+    indentify generated negative tests.
+    """
+    if strip_tags:
+        tags_start = name.find('[')
+        tags_end = name.find(']')
+        if tags_start > 0 and tags_end > tags_start:
+            newname = name[:tags_start]
+            newname += name[tags_end + 1:]
+            name = newname
+
+    if strip_scenarios:
+        tags_start = name.find('(')
+        tags_end = name.find(')')
+        if tags_start > 0 and tags_end > tags_start:
+            newname = name[:tags_start]
+            newname += name[tags_end + 1:]
+            name = newname
+
+    return name
+
+
+def get_duration(timestamps):
+    start, end = timestamps
+    if not start or not end:
+        duration = ''
+    else:
+        delta = end - start
+        duration = '%d.%06ds' % (
+            delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
+    return duration
+
+
+def find_worker(test):
+    for tag in test['tags']:
+        if tag.startswith('worker-'):
+            return int(tag[7:])
+    return 'NaN'
+
+
+# Print out stdout/stderr if it exists, always
+def print_attachments(stream, test, all_channels=False):
+    """Print out subunit attachments.
+
+    Print out subunit attachments that contain content. This
+    runs in 2 modes, one for successes where we print out just stdout
+    and stderr, and an override that dumps all the attachments.
+    """
+    channels = ('stdout', 'stderr')
+    for name, detail in test['details'].items():
+        # NOTE(sdague): the subunit names are a little crazy, and actually
+        # are in the form pythonlogging:'' (with the colon and quotes)
+        name = name.split(':')[0]
+        if detail.content_type.type == 'test':
+            detail.content_type.type = 'text'
+        if (all_channels or name in channels) and detail.as_text():
+            title = "Captured %s:" % name
+            stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
+            # indent attachment lines 4 spaces to make them visually
+            # offset
+            for line in detail.as_text().split('\n'):
+                stream.write("    %s\n" % line)
+
+
+def show_outcome(stream, test):
+    global RESULTS
+    status = test['status']
+    # TODO(sdague): ask lifeless why on this?
+    if status == 'exists':
+        return
+
+    worker = find_worker(test)
+    name = cleanup_test_name(test['id'])
+    duration = get_duration(test['timestamps'])
+
+    if worker not in RESULTS:
+        RESULTS[worker] = []
+    RESULTS[worker].append(test)
+
+    # don't count the end of the return code as a fail
+    if name == 'process-returncode':
+        return
+
+    if status == 'success':
+        stream.write('{%s} %s [%s] ... ok\n' % (
+            worker, name, duration))
+        print_attachments(stream, test)
+    elif status == 'fail':
+        FAILS.append(test)
+        stream.write('{%s} %s [%s] ... FAILED\n' % (
+            worker, name, duration))
+        print_attachments(stream, test, all_channels=True)
+    elif status == 'skip':
+        stream.write('{%s} %s ... SKIPPED: %s\n' % (
+            worker, name, test['details']['reason'].as_text()))
+    else:
+        stream.write('{%s} %s [%s] ... %s\n' % (
+            worker, name, duration, test['status']))
+        print_attachments(stream, test, all_channels=True)
+
+    stream.flush()
+
+
+def print_fails(stream):
+    """Print summary failure report.
+
+    Currently unused, however there remains debate on inline vs. at end
+    reporting, so leave the utility function for later use.
+    """
+    if not FAILS:
+        return
+    stream.write("\n==============================\n")
+    stream.write("Failed %s tests - output below:" % len(FAILS))
+    stream.write("\n==============================\n")
+    for f in FAILS:
+        stream.write("\n%s\n" % f['id'])
+        stream.write("%s\n" % ('-' * len(f['id'])))
+        print_attachments(stream, f, all_channels=True)
+    stream.write('\n')
+
+
+def main():
+    stream = subunit.ByteStreamToStreamResult(
+        sys.stdin, non_subunit_name='stdout')
+    starts = Starts(sys.stdout)
+    outcomes = testtools.StreamToDict(
+        functools.partial(show_outcome, sys.stdout))
+    summary = testtools.StreamSummary()
+    result = testtools.CopyStreamResult([starts, outcomes, summary])
+    result.startTestRun()
+    try:
+        stream.run(result)
+    finally:
+        result.stopTestRun()
+    return (0 if summary.wasSuccessful() else 1)
+
+
+if __name__ == '__main__':
+    sys.exit(main())