Apply cookiecutter to newly split project telemetry-tempest-plugin

This tempest plugin is being split out of the main ceilometer project in
accordance with Queens goal "Split Tempest Plugins into Separate
Repos/Projects"[1]. This patch applies the standard boilerplate files
for OpenStack projects so that it can stand on its own.

It contains tempest tests for Aodh, Gnocchi, Ceilometer and Panko projects.

[1] https://governance.openstack.org/tc/goals/queens/split-tempest-plugins.html
diff --git a/telemetry_tempest_plugin/__init__.py b/telemetry_tempest_plugin/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/telemetry_tempest_plugin/__init__.py
diff --git a/telemetry_tempest_plugin/aodh/__init__.py b/telemetry_tempest_plugin/aodh/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/telemetry_tempest_plugin/aodh/__init__.py
diff --git a/telemetry_tempest_plugin/aodh/api/__init__.py b/telemetry_tempest_plugin/aodh/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/telemetry_tempest_plugin/aodh/api/__init__.py
diff --git a/telemetry_tempest_plugin/aodh/api/base.py b/telemetry_tempest_plugin/aodh/api/base.py
new file mode 100644
index 0000000..92339b7
--- /dev/null
+++ b/telemetry_tempest_plugin/aodh/api/base.py
@@ -0,0 +1,64 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
+import tempest.test
+
+from telemetry_tempest_plugin.aodh.service import client
+
+CONF = config.CONF
+
+
+class BaseAlarmingTest(tempest.test.BaseTestCase):
+    """Base test case class for all Alarming API tests."""
+
+    credentials = ['primary']
+    client_manager = client.Manager
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseAlarmingTest, cls).skip_checks()
+        if not CONF.service_available.aodh_plugin:
+            raise cls.skipException("Aodh support is required")
+
+    @classmethod
+    def setup_clients(cls):
+        super(BaseAlarmingTest, cls).setup_clients()
+        cls.alarming_client = cls.os_primary.alarming_client
+
+    @classmethod
+    def resource_setup(cls):
+        super(BaseAlarmingTest, cls).resource_setup()
+        cls.alarm_ids = []
+
+    @classmethod
+    def create_alarm(cls, **kwargs):
+        body = cls.alarming_client.create_alarm(
+            name=data_utils.rand_name('telemetry_alarm'),
+            type='threshold', **kwargs)
+        cls.alarm_ids.append(body['alarm_id'])
+        return body
+
+    @staticmethod
+    def cleanup_resources(method, list_of_ids):
+        for resource_id in list_of_ids:
+            try:
+                method(resource_id)
+            except lib_exc.NotFound:
+                pass
+
+    @classmethod
+    def resource_cleanup(cls):
+        cls.cleanup_resources(cls.alarming_client.delete_alarm, cls.alarm_ids)
+        super(BaseAlarmingTest, cls).resource_cleanup()
diff --git a/telemetry_tempest_plugin/aodh/api/test_alarming_api.py b/telemetry_tempest_plugin/aodh/api/test_alarming_api.py
new file mode 100644
index 0000000..bb486e5
--- /dev/null
+++ b/telemetry_tempest_plugin/aodh/api/test_alarming_api.py
@@ -0,0 +1,94 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from telemetry_tempest_plugin.aodh.api import base
+
+
+class TelemetryAlarmingAPITest(base.BaseAlarmingTest):
+
+    @classmethod
+    def resource_setup(cls):
+        super(TelemetryAlarmingAPITest, cls).resource_setup()
+        cls.rule = {'meter_name': 'cpu_util',
+                    'comparison_operator': 'gt',
+                    'threshold': 80.0,
+                    'period': 70}
+        for i in range(2):
+            cls.create_alarm(threshold_rule=cls.rule)
+
+    @decorators.idempotent_id('1c918e06-210b-41eb-bd45-14676dd77cd7')
+    def test_alarm_list(self):
+        # List alarms
+        alarm_list = self.alarming_client.list_alarms()
+
+        # Verify created alarm in the list
+        fetched_ids = [a['alarm_id'] for a in alarm_list]
+        missing_alarms = [a for a in self.alarm_ids if a not in fetched_ids]
+        self.assertEqual(0, len(missing_alarms),
+                         "Failed to find the following created alarm(s)"
+                         " in a fetched list: %s" %
+                         ', '.join(str(a) for a in missing_alarms))
+
+    @decorators.idempotent_id('1297b095-39c1-4e74-8a1f-4ae998cedd68')
+    def test_create_update_get_delete_alarm(self):
+        # Create an alarm
+        alarm_name = data_utils.rand_name('telemetry_alarm')
+        body = self.alarming_client.create_alarm(
+            name=alarm_name, type='threshold', threshold_rule=self.rule)
+        self.assertEqual(alarm_name, body['name'])
+        alarm_id = body['alarm_id']
+        self.assertDictContainsSubset(self.rule, body['threshold_rule'])
+        # Update alarm with new rule and new name
+        new_rule = {'meter_name': 'cpu',
+                    'comparison_operator': 'eq',
+                    'threshold': 70.0,
+                    'period': 60}
+        alarm_name_updated = data_utils.rand_name('telemetry-alarm-update')
+        body = self.alarming_client.update_alarm(
+            alarm_id,
+            threshold_rule=new_rule,
+            name=alarm_name_updated,
+            type='threshold')
+        self.assertEqual(alarm_name_updated, body['name'])
+        self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+        # Get and verify details of an alarm after update
+        body = self.alarming_client.show_alarm(alarm_id)
+        self.assertEqual(alarm_name_updated, body['name'])
+        self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+        # Get history for the alarm and verify the same
+        body = self.alarming_client.show_alarm_history(alarm_id)
+        self.assertEqual("rule change", body[0]['type'])
+        self.assertIn(alarm_name_updated, body[0]['detail'])
+        self.assertEqual("creation", body[1]['type'])
+        self.assertIn(alarm_name, body[1]['detail'])
+        # Delete alarm and verify if deleted
+        self.alarming_client.delete_alarm(alarm_id)
+        self.assertRaises(lib_exc.NotFound,
+                          self.alarming_client.show_alarm, alarm_id)
+
+    @decorators.idempotent_id('aca49486-70bb-4016-87e0-f6131374f742')
+    def test_set_get_alarm_state(self):
+        alarm_states = ['ok', 'alarm', 'insufficient data']
+        alarm = self.create_alarm(threshold_rule=self.rule)
+        # Set alarm state and verify
+        new_state =\
+            [elem for elem in alarm_states if elem != alarm['state']][0]
+        state = self.alarming_client.alarm_set_state(alarm['alarm_id'],
+                                                     new_state)
+        self.assertEqual(new_state, state.data)
+        # Get alarm state and verify
+        state = self.alarming_client.show_alarm_state(alarm['alarm_id'])
+        self.assertEqual(new_state, state.data)
diff --git a/telemetry_tempest_plugin/aodh/api/test_alarming_api_negative.py b/telemetry_tempest_plugin/aodh/api/test_alarming_api_negative.py
new file mode 100644
index 0000000..0707f67
--- /dev/null
+++ b/telemetry_tempest_plugin/aodh/api/test_alarming_api_negative.py
@@ -0,0 +1,71 @@
+#    Copyright 2015 GlobalLogic.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_utils import uuidutils
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from telemetry_tempest_plugin.aodh.api import base
+
+
+class TelemetryAlarmingNegativeTest(base.BaseAlarmingTest):
+    """Negative tests for show_alarm, update_alarm, show_alarm_history tests
+
+        ** show non-existent alarm
+        ** show the deleted alarm
+        ** delete deleted alarm
+        ** update deleted alarm
+    """
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81e7e')
+    def test_get_non_existent_alarm(self):
+        # get the non-existent alarm
+        non_existent_id = uuidutils.generate_uuid()
+        self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm,
+                          non_existent_id)
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2582ae')
+    def test_get_update_show_history_delete_deleted_alarm(self):
+        # get, update and delete the deleted alarm
+        alarm_name = data_utils.rand_name('telemetry_alarm')
+        rule = {'meter_name': 'cpu',
+                'comparison_operator': 'eq',
+                'threshold': 100.0,
+                'period': 90}
+        body = self.alarming_client.create_alarm(
+            name=alarm_name,
+            type='threshold',
+            threshold_rule=rule)
+        alarm_id = body['alarm_id']
+        self.alarming_client.delete_alarm(alarm_id)
+        # get the deleted alarm
+        self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm,
+                          alarm_id)
+
+        # update the deleted alarm
+        updated_alarm_name = data_utils.rand_name('telemetry_alarm_updated')
+        updated_rule = {'meter_name': 'cpu_new',
+                        'comparison_operator': 'eq',
+                        'threshold': 70,
+                        'period': 50}
+        self.assertRaises(lib_exc.NotFound, self.alarming_client.update_alarm,
+                          alarm_id, threshold_rule=updated_rule,
+                          name=updated_alarm_name,
+                          type='threshold')
+        # delete the deleted alarm
+        self.assertRaises(lib_exc.NotFound, self.alarming_client.delete_alarm,
+                          alarm_id)
diff --git a/telemetry_tempest_plugin/aodh/service/__init__.py b/telemetry_tempest_plugin/aodh/service/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/telemetry_tempest_plugin/aodh/service/__init__.py
diff --git a/telemetry_tempest_plugin/aodh/service/client.py b/telemetry_tempest_plugin/aodh/service/client.py
new file mode 100644
index 0000000..39d2cf8
--- /dev/null
+++ b/telemetry_tempest_plugin/aodh/service/client.py
@@ -0,0 +1,126 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from six.moves.urllib import parse as urllib
+from tempest import config
+from tempest.lib.common import rest_client
+from tempest import manager
+import ujson
+
+CONF = config.CONF
+
+
+class AlarmingClient(rest_client.RestClient):
+
+    version = '2'
+    uri_prefix = "v2"
+
+    def deserialize(self, body):
+        return ujson.loads(body.replace("\n", ""))
+
+    def serialize(self, body):
+        return ujson.dumps(body)
+
+    def list_alarms(self, query=None):
+        uri = '%s/alarms' % self.uri_prefix
+        uri_dict = {}
+        if query:
+            uri_dict = {'q.field': query[0],
+                        'q.op': query[1],
+                        'q.value': query[2]}
+        if uri_dict:
+            uri += "?%s" % urllib.urlencode(uri_dict)
+        resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return rest_client.ResponseBodyList(resp, body)
+
+    def show_alarm(self, alarm_id):
+        uri = '%s/alarms/%s' % (self.uri_prefix, alarm_id)
+        resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_alarm_history(self, alarm_id):
+        uri = "%s/alarms/%s/history" % (self.uri_prefix, alarm_id)
+        resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return rest_client.ResponseBodyList(resp, body)
+
+    def delete_alarm(self, alarm_id):
+        uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
+        resp, body = self.delete(uri)
+        self.expected_success(204, resp.status)
+        if body:
+            body = self.deserialize(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def create_alarm(self, **kwargs):
+        uri = "%s/alarms" % self.uri_prefix
+        body = self.serialize(kwargs)
+        resp, body = self.post(uri, body)
+        self.expected_success(201, resp.status)
+        body = self.deserialize(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_alarm(self, alarm_id, **kwargs):
+        uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
+        body = self.serialize(kwargs)
+        resp, body = self.put(uri, body)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_alarm_state(self, alarm_id):
+        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+        resp, body = self.get(uri)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return rest_client.ResponseBodyData(resp, body)
+
+    def alarm_set_state(self, alarm_id, state):
+        uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+        body = self.serialize(state)
+        resp, body = self.put(uri, body)
+        self.expected_success(200, resp.status)
+        body = self.deserialize(body)
+        return rest_client.ResponseBodyData(resp, body)
+
+
+class Manager(manager.Manager):
+
+    default_params = {
+        'disable_ssl_certificate_validation':
+            CONF.identity.disable_ssl_certificate_validation,
+        'ca_certs': CONF.identity.ca_certificates_file,
+        'trace_requests': CONF.debug.trace_requests
+    }
+
+    alarming_params = {
+        'service': CONF.alarming_plugin.catalog_type,
+        'region': CONF.identity.region,
+        'endpoint_type': CONF.alarming_plugin.endpoint_type,
+    }
+    alarming_params.update(default_params)
+
+    def __init__(self, credentials=None, service=None):
+        super(Manager, self).__init__(credentials)
+        self.set_alarming_client()
+
+    def set_alarming_client(self):
+        self.alarming_client = AlarmingClient(self.auth_provider,
+                                              **self.alarming_params)
diff --git a/telemetry_tempest_plugin/config.py b/telemetry_tempest_plugin/config.py
new file mode 100644
index 0000000..87b50af
--- /dev/null
+++ b/telemetry_tempest_plugin/config.py
@@ -0,0 +1,77 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+
+service_option = [cfg.BoolOpt('ceilometer',
+                              default=True,
+                              help="Whether or not Ceilometer is expected to"
+                                   "be available"),
+                  cfg.BoolOpt('panko',
+                              default=True,
+                              help="Whether or not Panko is expected to be"
+                                   "available"),
+                  cfg.BoolOpt("aodh_plugin",
+                              default=True,
+                              help="Whether or not Aodh is expected to be"
+                                   "available")]
+
+telemetry_group = cfg.OptGroup(name='telemetry',
+                               title='Telemetry Service Options')
+
+event_group = cfg.OptGroup(name='event',
+                           title='Event Service Options')
+
+alarming_group = cfg.OptGroup(name='alarming_plugin',
+                              title='Alarming Service Options')
+
+TelemetryGroup = [
+    cfg.IntOpt('notification_wait',
+               default=120,
+               help="The seconds to wait for notifications which "
+                    "containers and objects sent to swift."),
+    cfg.IntOpt('notification_sleep',
+               default=1,
+               help="The seconds to sleep after an unsuccessful "
+                    "notification received."),
+    cfg.IntOpt('alarm_granularity',
+               default=300,
+               help="Granularity to use for aodh alarms. This must match the "
+                    "configured Gnocchi archive policy")
+
+]
+
+event_opts = [
+    cfg.StrOpt('catalog_type',
+               default='event',
+               help="Catalog type of the Event service."),
+    cfg.StrOpt('endpoint_type',
+               default='publicURL',
+               choices=['public', 'admin', 'internal',
+                        'publicURL', 'adminURL', 'internalURL'],
+               help="The endpoint type to use for the event service."),
+]
+
+AlarmingGroup = [
+    cfg.StrOpt('catalog_type',
+               default='alarming',
+               help="Catalog type of the Alarming service."),
+    cfg.StrOpt('endpoint_type',
+               default='publicURL',
+               choices=['public', 'admin', 'internal',
+                        'publicURL', 'adminURL', 'internalURL'],
+               help="The endpoint type to use for the alarming service."),
+]
diff --git a/telemetry_tempest_plugin/exceptions.py b/telemetry_tempest_plugin/exceptions.py
new file mode 100644
index 0000000..de22a41
--- /dev/null
+++ b/telemetry_tempest_plugin/exceptions.py
@@ -0,0 +1,169 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+
+class TempestException(Exception):
+    """Base Tempest Exception
+
+    To correctly use this class, inherit from it and define
+    a 'message' property. That message will get printf'd
+    with the keyword arguments provided to the constructor.
+    """
+    message = "An unknown exception occurred"
+
+    def __init__(self, *args, **kwargs):
+        super(TempestException, self).__init__()
+        try:
+            self._error_string = self.message % kwargs
+        except Exception:
+            # at least get the core message out if something happened
+            self._error_string = self.message
+        if len(args) > 0:
+            # If there is a non-kwarg parameter, assume it's the error
+            # message or reason description and tack it on to the end
+            # of the exception message
+            # Convert all arguments into their string representations...
+            args = ["%s" % arg for arg in args]
+            self._error_string = (self._error_string +
+                                  "\nDetails: %s" % '\n'.join(args))
+
+    def __str__(self):
+        return self._error_string
+
+
+class RestClientException(TempestException,
+                          testtools.TestCase.failureException):
+    pass
+
+
+class InvalidConfiguration(TempestException):
+    message = "Invalid Configuration"
+
+
+class InvalidCredentials(TempestException):
+    message = "Invalid Credentials"
+
+
+class InvalidServiceTag(TempestException):
+    message = "Invalid service tag"
+
+
+class InvalidIdentityVersion(TempestException):
+    message = "Invalid version %(identity_version)s of the identity service"
+
+
+class TimeoutException(TempestException):
+    message = "Request timed out"
+
+
+class BuildErrorException(TempestException):
+    message = "Server %(server_id)s failed to build and is in ERROR status"
+
+
+class ImageKilledException(TempestException):
+    message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
+
+
+class AddImageException(TempestException):
+    message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
+
+
+class VolumeBuildErrorException(TempestException):
+    message = "Volume %(volume_id)s failed to build and is in ERROR status"
+
+
+class VolumeRestoreErrorException(TempestException):
+    message = "Volume %(volume_id)s failed to restore and is in ERROR status"
+
+
+class SnapshotBuildErrorException(TempestException):
+    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
+
+
+class VolumeBackupException(TempestException):
+    message = "Volume backup %(backup_id)s failed and is in ERROR status"
+
+
+class StackBuildErrorException(TempestException):
+    message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
+               "due to '%(stack_status_reason)s'")
+
+
+class EndpointNotFound(TempestException):
+    message = "Endpoint not found"
+
+
+class IdentityError(TempestException):
+    message = "Got identity error"
+
+
+class ServerUnreachable(TempestException):
+    message = "The server is not reachable via the configured network"
+
+
+# NOTE(andreaf) This exception is added here to facilitate the migration
+# of get_network_from_name and preprov_creds to tempest.lib, and it should
+# be migrated along with them
+class InvalidTestResource(TempestException):
+    message = "%(name)s is not a valid %(type)s, or the name is ambiguous"
+
+
+class RFCViolation(RestClientException):
+    message = "RFC Violation"
+
+
+class InvalidHttpSuccessCode(RestClientException):
+    message = "The success code is different than the expected one"
+
+
+class BadRequest(RestClientException):
+    message = "Bad request"
+
+
+class ResponseWithNonEmptyBody(RFCViolation):
+    message = ("RFC Violation! Response with %(status)d HTTP Status Code "
+               "MUST NOT have a body")
+
+
+class ResponseWithEntity(RFCViolation):
+    message = ("RFC Violation! Response with 205 HTTP Status Code "
+               "MUST NOT have an entity")
+
+
+class InvalidHTTPResponseHeader(RestClientException):
+    message = "HTTP response header is invalid"
+
+
+class InvalidStructure(TempestException):
+    message = "Invalid structure of table with details"
+
+
+class CommandFailed(Exception):
+    def __init__(self, returncode, cmd, output, stderr):
+        super(CommandFailed, self).__init__()
+        self.returncode = returncode
+        self.cmd = cmd
+        self.stdout = output
+        self.stderr = stderr
+
+    def __str__(self):
+        return ("Command '%s' returned non-zero exit status %d.\n"
+                "stdout:\n%s\n"
+                "stderr:\n%s" % (self.cmd,
+                                 self.returncode,
+                                 self.stdout,
+                                 self.stderr))
diff --git a/telemetry_tempest_plugin/integration/__init__.py b/telemetry_tempest_plugin/integration/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/telemetry_tempest_plugin/integration/__init__.py
diff --git a/telemetry_tempest_plugin/integration/gabbi/__init__.py b/telemetry_tempest_plugin/integration/gabbi/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/telemetry_tempest_plugin/integration/gabbi/__init__.py
diff --git a/telemetry_tempest_plugin/integration/gabbi/gabbits-live/aodh-gnocchi-threshold-alarm.yaml b/telemetry_tempest_plugin/integration/gabbi/gabbits-live/aodh-gnocchi-threshold-alarm.yaml
new file mode 100644
index 0000000..65b00e9
--- /dev/null
+++ b/telemetry_tempest_plugin/integration/gabbi/gabbits-live/aodh-gnocchi-threshold-alarm.yaml
@@ -0,0 +1,309 @@
+#
+# Tests for gnocchi-threshold-alarm
+#
+#   user_id        : c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+#   project_id     : 709f6ed6-bfb3-4649-b303-0019a7f6aef2
+#   alarm name     : gabbi-gnocchi-threshold-resource-alarm
+#   resource_id    : gabbi-mock-resource
+#   archive policy : gabbiliveceph
+#
+
+defaults:
+    request_headers:
+        x-auth-token: $ENVIRON['ADMIN_TOKEN']
+
+tests:
+    #
+    # Setup gnocchi archive policy/resource type/resource/metrics
+    #
+
+    - name: SETUP. create archive policy gabbiliveceph
+      desc: create archve policy 'gabbiliveceph' for tests
+      POST: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/archive_policy
+      status: 201
+      request_headers:
+          content-type: application/json
+      data:
+          name: gabbiliveceph
+          back_window: 0
+          definition:
+              - granularity: 1 second
+                points: 60
+              - granularity: 20 second
+                timespan: 1 minute
+              - points: 5
+                timespan: 5 minute
+          aggregation_methods:
+              - mean
+              - min
+              - max
+      response_json_paths:
+          $.name: gabbiliveceph
+          $.back_window: 0
+          $.aggregation_methods.`len`: 3
+
+    - name: create resource type ceph_account
+      desc: needed to create a resource
+      POST: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource_type
+      status: 201 || 409
+      request_headers:
+          content-type: application/json
+      data:
+          name: ceph_account
+
+    - name: create resource of ceph_account type
+      POST: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/ceph_account
+      request_headers:
+          content-type: application/json
+      data:
+          id: 662e46f3-8c06-430c-8a9c-adcaedd1272c
+          user_id:      27e342e4-4489-424f-a7e4-ba8ed9ad729c
+          project_id:   d6d32769-c351-4758-b0a2-458fa1a065a3
+          metrics:
+              radosgw.objects:
+                  archive_policy_name: gabbiliveceph
+              radosgw.objects.size:
+                  archive_policy_name: gabbiliveceph
+              radosgw.objects.containers:
+                  archive_policy_name: gabbiliveceph
+              radosgw.api.request:
+                  archive_policy_name: gabbiliveceph
+              radosgw.containers.objects:
+                  archive_policy_name: gabbiliveceph
+              radosgw.containers.objects.size:
+                  archive_policy_name: gabbiliveceph
+      status: 201
+      response_json_paths:
+          $.user_id:    27e342e4-4489-424f-a7e4-ba8ed9ad729c
+          $.project_id: d6d32769-c351-4758-b0a2-458fa1a065a3
+          $.metrics.`len`: 6
+
+    #
+    # Actual tests
+    #
+
+    - name: search 'gabbi-gnocchi-threshold-resource-alarm' alarm doesnt exist
+      desc: search for alarm using user_id, project_id, alarm_name
+      GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
+      query_parameters:
+          q.field:
+              - user_id
+              - project_id
+              - name
+          q.op:
+              - eq
+              - eq
+              - eq
+          q.value:
+              - c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+              - 709f6ed6-bfb3-4649-b303-0019a7f6aef2
+              - gabbi-gnocchi-threshold-resource-alarm
+      method: GET
+      response_json_paths:
+          $.`len`: 0
+
+    - name: create alarm 'gabbi-gnocchi-threshold-resource-alarm'
+      desc: create a threshold alarm gabbi-gnocchi-threshold-resource-alarm
+      POST: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
+      request_headers:
+          content-type: application/json
+      data:
+          alarm_actions:
+              - "http://site:8000/gnocchi-threshold-resource"
+          alarm_id: null
+          description: An gabbilive threshold based alarm
+          enabled: true
+          insufficient_data_actions:
+              - "http://site:8000/nodata-gnocchi-threshold-resource"
+          name: "gabbi-gnocchi-threshold-resource-alarm"
+          ok_actions:
+              - "http://site:8000/ok-gnocchi-threshold-resource"
+          project_id: 709f6ed6-bfb3-4649-b303-0019a7f6aef2
+          repeat_actions: false
+          severity: moderate
+          state: "insufficient data"
+          type: gnocchi_resources_threshold
+          gnocchi_resources_threshold_rule:
+              evaluation_periods: 3
+              metric: "radosgw.objects.size"
+              resource_id: "662e46f3-8c06-430c-8a9c-adcaedd1272c"
+              aggregation_method: "mean"
+              granularity: 60
+              threshold: 5.0
+              comparison_operator: "ge"
+              resource_type: ceph_account
+          user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+      status: 201
+      response_json_paths:
+          $.name: gabbi-gnocchi-threshold-resource-alarm
+          $.type: gnocchi_resources_threshold
+          $.user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+          $.project_id: 709f6ed6-bfb3-4649-b303-0019a7f6aef2
+          $.severity: moderate
+
+    - name: retrieve history about 'gabbi-gnocchi-threshold-resource-alarm' creation
+      desc: get history about alarm creation
+      GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.alarm_id']/history
+      request_headers:
+          content-type: application/json
+      poll:
+          count: 5
+          delay: 2
+      response_json_paths:
+          $.`len`: 1
+          $.[0].type: creation
+          $.[0].alarm_id: $RESPONSE['$.alarm_id']
+
+    - name: update severity for alarm 'gabbi-gnocchi-threshold-resource-alarm'
+      desc: update severity for alarm gabbi-gnocchi-threshold-resource-alarm
+      PUT: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.[0].alarm_id']
+      status: 200
+      request_headers:
+          content-type: application/json
+      data:
+          alarm_actions:
+              - "http://site:8000/gnocchi-threshold-resource"
+          alarm_id: null
+          description: An gabbilive threshold based alarm
+          enabled: true
+          insufficient_data_actions:
+              - "http://site:8000/nodata-gnocchi-threshold-resource"
+          name: "gabbi-gnocchi-threshold-resource-alarm"
+          ok_actions:
+              - "http://site:8000/ok-gnocchi-threshold-resource"
+          project_id: 709f6ed6-bfb3-4649-b303-0019a7f6aef2
+          repeat_actions: false
+          severity: low
+          state: "insufficient data"
+          type: gnocchi_resources_threshold
+          gnocchi_resources_threshold_rule:
+              evaluation_periods: 3
+              metric: "radosgw.objects.size"
+              resource_id: "662e46f3-8c06-430c-8a9c-adcaedd1272c"
+              aggregation_method: "mean"
+              granularity: 60
+              threshold: 5.0
+              comparison_operator: "ge"
+              resource_type: ceph_account
+          user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+      response_json_paths:
+          $.name: gabbi-gnocchi-threshold-resource-alarm
+          $.type: gnocchi_resources_threshold
+          $.user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+          $.project_id: 709f6ed6-bfb3-4649-b303-0019a7f6aef2
+          $.severity: low
+          $.state: "insufficient data"
+
+    - name: retrieve history for 'gabbi-gnocchi-threshold-resource-alarm'
+      desc: get history for rule_change
+      GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.alarm_id']/history
+      request_headers:
+          content-type: application/json
+      poll:
+          count: 5
+          delay: 2
+      response_json_paths:
+          $.`len`: 2
+          $.[0].type: rule change
+          $.[0].alarm_id: $RESPONSE['$.alarm_id']
+          $.[0].detail: '{"severity": "low"}'
+
+    - name: update alarm state for 'gabbi-gnocchi-threshold-resource-alarm'
+      desc: update state for alarm
+      PUT: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.[0].alarm_id']/state
+      request_headers:
+          content-type: application/json
+      data: '"ok"'
+      status: 200
+      response_strings:
+          - "ok"
+
+    - name: search 'gabbi-gnocchi-threshold-resource-alarm' alarm exist
+      desc: search for alarm using user_id, project_id, alarm_name
+      GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
+      query_parameters:
+          q.field:
+              - user_id
+              - project_id
+              - name
+          q.op:
+              - eq
+              - eq
+              - eq
+          q.value:
+              - c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+              - 709f6ed6-bfb3-4649-b303-0019a7f6aef2
+              - gabbi-gnocchi-threshold-resource-alarm
+      poll:
+          count: 5
+          delay: 2
+      response_json_paths:
+          $.`len`: 1
+
+    - name: get info about 'gabbi-gnocchi-threshold-resource-alarm' alarm
+      desc: access alarm using its ID
+      GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.[0].alarm_id']
+      response_json_paths:
+          $.alarm_id: $RESPONSE['$.[0].alarm_id']
+          $.alarm_actions: ["http://site:8000/gnocchi-threshold-resource"]
+          $.name: gabbi-gnocchi-threshold-resource-alarm
+          $.gnocchi_resources_threshold_rule.resource_id: "662e46f3-8c06-430c-8a9c-adcaedd1272c"
+          $.gnocchi_resources_threshold_rule.metric: "radosgw.objects.size"
+          $.gnocchi_resources_threshold_rule.resource_type: "ceph_account"
+          $.user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+
+    - name: get alarm state for 'gabbi-gnocchi-threshold-resource-alarm'
+      desc: get state for alarm
+      GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.alarm_id']/state
+      request_headers:
+          content-type: application/json
+      status: 200
+      response_strings:
+          - "ok"
+
+    #
+    # Teardown
+    #
+
+    - name: CLEANUP. search 'gabbi-gnocchi-threshold-resource' alarm exist
+      desc: Find alarm id using user_id, project_id, alarm_name
+      GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
+      query_parameters:
+          q.field:
+              - user_id
+              - project_id
+              - name
+          q.op:
+              - eq
+              - eq
+              - eq
+          q.value:
+              - c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
+              - 709f6ed6-bfb3-4649-b303-0019a7f6aef2
+              - gabbi-gnocchi-threshold-resource-alarm
+      response_json_paths:
+          $.`len`: 1
+
+    - name: CLEANUP. delete threshold alarm 'gabbi-gnocchi-threshold-resource'
+      DELETE: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.[0].alarm_id']
+      status: 204
+
+    - name: CLEANUP. Get resource by name '662e46f3-8c06-430c-8a9c-adcaedd1272c'
+      desc: retrieve resource by 662e46f3-8c06-430c-8a9c-adcaedd1272c to get its ID
+      GET: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/generic/662e46f3-8c06-430c-8a9c-adcaedd1272c
+      status: 200
+
+    - name: CLEANUP. delete test ceph_resource '662e46f3-8c06-430c-8a9c-adcaedd1272c'
+      desc: delete ceph_account resource 662e46f3-8c06-430c-8a9c-adcaedd1272c
+      DELETE: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/generic/$RESPONSE['$.id']
+      status: 204
+
+    - name: CLEANUP. delete resource type ceph_account
+      DELETE: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource_type/ceph_account
+      status: 204
+
+    - name: CLEANUP. delete archive
+      DELETE: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/archive_policy/gabbiliveceph
+      status: 204
+      xfail: True
+
diff --git a/telemetry_tempest_plugin/integration/gabbi/gabbits-live/autoscaling.yaml b/telemetry_tempest_plugin/integration/gabbi/gabbits-live/autoscaling.yaml
new file mode 100644
index 0000000..8eac9d0
--- /dev/null
+++ b/telemetry_tempest_plugin/integration/gabbi/gabbits-live/autoscaling.yaml
@@ -0,0 +1,175 @@
+defaults:
+    request_headers:
+        x-auth-token: $ENVIRON['USER_TOKEN']
+
+tests:
+    - name: list alarms none
+      desc: Lists alarms, none yet exist
+      url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
+      method: GET
+      response_strings:
+          - "[]"
+
+    - name: list servers none
+      desc: List servers, none yet exists
+      url: $ENVIRON['NOVA_SERVICE_URL']/servers
+      method: GET
+      response_strings:
+          - "[]"
+
+    - name: create stack
+      desc: Create an autoscaling stack
+      url: $ENVIRON['HEAT_SERVICE_URL']/stacks
+      method: POST
+      request_headers:
+          content-type: application/json
+      data: <@create_stack.json
+      status: 201
+
+    - name: control stack status
+      desc: Checks the stack have been created successfully
+      url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
+      redirects: true
+      method: GET
+      status: 200
+      poll:
+          count: 300
+          delay: 1
+      response_json_paths:
+          $.stack.stack_status: "CREATE_COMPLETE"
+
+    - name: list servers grow
+      desc: Wait the autoscaling stack grow to two servers
+      url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail
+      method: GET
+      poll:
+          count: 600
+          delay: 1
+      response_json_paths:
+          $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id']
+          $.servers[1].metadata.'metering.server_group': $RESPONSE['$.stack.id']
+          $.servers[0].status: ACTIVE
+          $.servers[1].status: ACTIVE
+          $.servers.`len`: 2
+
+    - name: check gnocchi resources
+      desc: Check the gnocchi resources for this two servers exists
+      url: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/instance
+      method: GET
+      poll:
+          count: 30
+          delay: 1
+      response_strings:
+          - '"id": "$RESPONSE["$.servers[0].id"]"'
+          - '"id": "$RESPONSE["$.servers[1].id"]"'
+
+    - name: check event
+      desc: Check panko for new instance.create.end event
+      url: $ENVIRON['PANKO_SERVICE_URL']/v2/events
+      method: GET
+      request_headers:
+          content-type: application/json
+      data:
+          q:
+            - field: event_type
+              op: eq
+              type: string
+              value: compute.instance.create.end
+            - field: resource_id
+              op: eq
+              type: string
+              value: $HISTORY['list servers grow'].$RESPONSE['$.servers[0].id']
+      poll:
+          count: 30
+          delay: 1
+      response_json_paths:
+          $.`len`: 1
+          $[0].event_type: compute.instance.create.end
+          $[0].traits[?(@.name='resource_id')].value: $HISTORY['list servers grow'].$RESPONSE['$.servers[0].id']
+
+    - name: check alarm
+      desc: Check the aodh alarm and its state
+      url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
+      method: GET
+      poll:
+          count: 30
+          delay: 1
+      response_strings:
+          - "integration_test-cpu_alarm_high-"
+      response_json_paths:
+          $[0].state: alarm
+
+    - name: get stack location for update
+      desc: Get the stack location
+      url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
+      method: GET
+      status: 302
+
+    - name: update stack
+      desc: Update an autoscaling stack
+      url: $LOCATION
+      method: PUT
+      request_headers:
+          content-type: application/json
+      data: <@update_stack.json
+      status: 202
+
+    - name: control stack status
+      desc: Checks the stack have been created successfully
+      url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
+      redirects: true
+      method: GET
+      status: 200
+      poll:
+          count: 300
+          delay: 1
+      response_json_paths:
+          $.stack.stack_status: "UPDATE_COMPLETE"
+
+    - name: list servers shrink
+      desc: Wait the autoscaling stack shrink to one server
+      url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail
+      method: GET
+      poll:
+          count: 600
+          delay: 1
+      response_json_paths:
+          $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id']
+          $.servers[0].status: ACTIVE
+          $.servers.`len`: 1
+
+    - name: get stack location
+      desc: Get the stack location
+      url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
+      method: GET
+      status: 302
+
+    - name: delete stack
+      desc: Delete the stack
+      url: $LOCATION
+      method: DELETE
+      status: 204
+
+    - name: get deleted stack
+      desc: Check the stack have been deleted
+      url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
+      redirects: true
+      method: GET
+      poll:
+          count: 300
+          delay: 1
+      status: 404
+
+    - name: list alarms deleted
+      desc: List alarms, no more exist
+      url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
+      method: GET
+      response_strings:
+          - "[]"
+
+    - name: list servers deleted
+      desc: List servers, no more exists
+      url: $ENVIRON['NOVA_SERVICE_URL']/servers
+      method: GET
+      response_strings:
+          - "[]"
diff --git a/telemetry_tempest_plugin/integration/gabbi/gabbits-live/create_stack.json b/telemetry_tempest_plugin/integration/gabbi/gabbits-live/create_stack.json
new file mode 100644
index 0000000..57a6366
--- /dev/null
+++ b/telemetry_tempest_plugin/integration/gabbi/gabbits-live/create_stack.json
@@ -0,0 +1,74 @@
+{
+    "stack_name": "integration_test",
+    "template": {
+        "heat_template_version": "2013-05-23",
+        "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh",
+        "resources": {
+            "asg": {
+                "type": "OS::Heat::AutoScalingGroup",
+                "properties": {
+                    "min_size": 1,
+                    "max_size": 2,
+                    "resource": {
+                        "type": "OS::Nova::Server",
+                        "properties": {
+                            "networks": [{ "network": "$ENVIRON['NEUTRON_NETWORK']" }],
+                            "flavor": "$ENVIRON['NOVA_FLAVOR_REF']",
+                            "image": "$ENVIRON['GLANCE_IMAGE_NAME']",
+                            "metadata": {
+                                "metering.server_group": { "get_param": "OS::stack_id" }
+                            },
+                            "user_data_format": "RAW",
+                            "user_data": {"Fn::Join": ["", [
+                                "#!/bin/sh\n",
+                                "echo 'Loading CPU'\n",
+                                "set -v\n",
+                                "cat /dev/urandom > /dev/null\n"
+                            ]]}
+                        }
+                    }
+                }
+            },
+            "web_server_scaleup_policy": {
+                "type": "OS::Heat::ScalingPolicy",
+                "properties": {
+                    "adjustment_type": "change_in_capacity",
+                    "auto_scaling_group_id": { "get_resource": "asg" },
+                    "cooldown": 2,
+                    "scaling_adjustment": 1
+                }
+            },
+            "cpu_alarm_high": {
+                "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm",
+                "properties": {
+                    "description": "Scale-up if the mean CPU > 10% on 1 minute",
+                    "metric": "cpu_util",
+                    "aggregation_method": "mean",
+                    "granularity": $ENVIRON["AODH_GRANULARITY"],
+                    "evaluation_periods": 1,
+                    "threshold": 10,
+                    "comparison_operator": "gt",
+                    "alarm_actions": [
+                        {
+                            "str_replace": {
+                                "template": "trust+url",
+                                "params": {
+                                    "url": { "get_attr": [ "web_server_scaleup_policy", "signal_url" ] }
+                                }
+                            }
+                        }
+                    ],
+                    "resource_type": "instance",
+                    "query": {
+                        "str_replace": {
+                            "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}",
+                            "params": {
+                                "stack_id": { "get_param": "OS::stack_id" }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/telemetry_tempest_plugin/integration/gabbi/gabbits-live/update_stack.json b/telemetry_tempest_plugin/integration/gabbi/gabbits-live/update_stack.json
new file mode 100644
index 0000000..54f8e29
--- /dev/null
+++ b/telemetry_tempest_plugin/integration/gabbi/gabbits-live/update_stack.json
@@ -0,0 +1,73 @@
+{
+    "template": {
+        "heat_template_version": "2013-05-23",
+        "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh",
+        "resources": {
+            "asg": {
+                "type": "OS::Heat::AutoScalingGroup",
+                "properties": {
+                    "min_size": 1,
+                    "max_size": 2,
+                    "resource": {
+                        "type": "OS::Nova::Server",
+                        "properties": {
+                            "networks": [{ "network": "$ENVIRON['NEUTRON_NETWORK']" }],
+                            "flavor": "$ENVIRON['NOVA_FLAVOR_REF']",
+                            "image": "$ENVIRON['GLANCE_IMAGE_NAME']",
+                            "metadata": {
+                                "metering.server_group": { "get_param": "OS::stack_id" }
+                            },
+                            "user_data_format": "RAW",
+                            "user_data": {"Fn::Join": ["", [
+                                "#!/bin/sh\n",
+                                "echo 'Loading CPU'\n",
+                                "set -v\n",
+                                "cat /dev/urandom > /dev/null\n"
+                            ]]}
+                        }
+                    }
+                }
+            },
+            "web_server_scaledown_policy": {
+                "type": "OS::Heat::ScalingPolicy",
+                "properties": {
+                    "adjustment_type": "change_in_capacity",
+                    "auto_scaling_group_id": { "get_resource": "asg" },
+                    "cooldown": 2,
+                    "scaling_adjustment": -1
+                }
+            },
+            "cpu_alarm_high": {
+                "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm",
+                "properties": {
+                    "description": "Scale-down if the mean CPU > 10% on 1 minute",
+                    "metric": "cpu_util",
+                    "aggregation_method": "mean",
+                    "granularity": $ENVIRON["AODH_GRANULARITY"],
+                    "evaluation_periods": 1,
+                    "threshold": 10,
+                    "comparison_operator": "gt",
+                    "alarm_actions": [
+                        {
+                            "str_replace": {
+                                "template": "trust+url",
+                                "params": {
+                                    "url": { "get_attr": [ "web_server_scaledown_policy", "signal_url" ] }
+                                }
+                            }
+                        }
+                    ],
+                    "resource_type": "instance",
+                    "query": {
+                        "str_replace": {
+                            "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}",
+                            "params": {
+                                "stack_id": { "get_param": "OS::stack_id" }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/telemetry_tempest_plugin/integration/gabbi/test_gabbi_live.py b/telemetry_tempest_plugin/integration/gabbi/test_gabbi_live.py
new file mode 100644
index 0000000..925fbf3
--- /dev/null
+++ b/telemetry_tempest_plugin/integration/gabbi/test_gabbi_live.py
@@ -0,0 +1,40 @@
+#
+# Copyright 2015 Red Hat. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A test module to exercise the Gnocchi API with gabbi."""
+
+import os
+
+from gabbi import driver
+
+
+TESTS_DIR = 'gabbits-live'
+
+
+def load_tests(loader, tests, pattern):
+    """Provide a TestSuite to the discovery process."""
+    NEEDED_ENV = ["AODH_SERVICE_URL", "GNOCCHI_SERVICE_URL",
+                  "HEAT_SERVICE_URL", "NOVA_SERVICE_URL", "PANKO_SERVICE_URL",
+                  "GLANCE_IMAGE_NAME", "ADMIN_TOKEN"]
+
+    for env_variable in NEEDED_ENV:
+        if not os.getenv(env_variable):
+            if os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"):
+                raise RuntimeError('%s is not set' % env_variable)
+            else:
+                return
+
+    test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
+    return driver.build_tests(test_dir, loader, host="localhost", port=8041)
diff --git a/telemetry_tempest_plugin/integration/hooks/post_test_hook.sh b/telemetry_tempest_plugin/integration/hooks/post_test_hook.sh
new file mode 100755
index 0000000..ca3f5be
--- /dev/null
+++ b/telemetry_tempest_plugin/integration/hooks/post_test_hook.sh
@@ -0,0 +1,93 @@
+#!/bin/bash -xe
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This script is executed inside post_test_hook function in devstack gate.
+
+function export_subunit_data {
+    target="$1"
+    if [ -f .testrepository/0 ]; then
+        sudo testr last --subunit > $WORKSPACE/testrepository.subunit.$target
+    fi
+}
+
+function generate_testr_results {
+    cat $WORKSPACE/testrepository.subunit.* | sudo tee $BASE/logs/testrepository.subunit
+    sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html
+    sudo gzip -9 $BASE/logs/testrepository.subunit
+    sudo gzip -9 $BASE/logs/testr_results.html
+    sudo chown $USER:$USER $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
+    sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
+}
+
+function generate_telemetry_report(){
+    set +x
+    set +e
+
+    echo "* Message queue status:"
+    sudo rabbitmqctl list_queues | grep -e \\.sample -e \\.info
+
+    source $BASE/new/devstack/openrc admin admin
+
+    echo "* Heat stack:"
+    openstack stack show integration_test
+    echo "* Alarm list:"
+    aodh alarm list
+    echo "* Event list:"
+    ceilometer event-list -q 'event_type=string::compute.instance.create.end'
+    echo "* Nova instance list:"
+    openstack server list
+
+    echo "* Gnocchi instance list:"
+    gnocchi resource list -t instance
+    for instance_id in $(openstack server list -f value -c ID); do
+        echo "* Nova instance detail:"
+        openstack server show $instance_id
+        echo "* Gnocchi instance detail:"
+        gnocchi resource show -t instance $instance_id
+        echo "* Gnocchi measures for instance ${instance_id}:"
+        gnocchi measures show -r $instance_id cpu_util
+    done
+
+    gnocchi status
+
+    echo "* Unprocessed measures:"
+    for key in $(redis-cli --scan --pattern 'incoming*'); do echo -n "$key length = " && redis-cli llen $key; done
+
+    set -e
+    set -x
+}
+
+function generate_reports_and_maybe_exit() {
+    local ret="$1"
+    if [[ $ret != 0 ]]; then
+        # Collect and parse result
+        generate_telemetry_report
+        generate_testr_results
+        exit $ret
+    fi
+}
+
+
+# Run tests with tempest
+sudo chown -R tempest:stack $BASE/new/tempest
+sudo chown -R tempest:stack $BASE/data/tempest
+cd $BASE/new/tempest
+set +e
+sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- telemetry_tempest_plugin.scenario.test_telemetry_integration --concurrency=$TEMPEST_CONCURRENCY
+EXIT_CODE=$?
+set -e
+export_subunit_data "all-plugin"
+generate_reports_and_maybe_exit $EXIT_CODE
+
+exit $EXIT_CODE
diff --git a/telemetry_tempest_plugin/plugin.py b/telemetry_tempest_plugin/plugin.py
new file mode 100644
index 0000000..0d8fdfd
--- /dev/null
+++ b/telemetry_tempest_plugin/plugin.py
@@ -0,0 +1,55 @@
+#
+# Copyright 2015 NEC Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from tempest import config
+from tempest.test_discover import plugins
+
+from telemetry_tempest_plugin import config as tempest_config
+
+
+class TelemetryTempestPlugin(plugins.TempestPlugin):
+
+    def load_tests(self):
+        base_path = os.path.split(os.path.dirname(
+            os.path.abspath(__file__)))[0]
+        test_dir = "telemetry_tempest_plugin"
+        full_test_dir = os.path.join(base_path, test_dir)
+        return full_test_dir, base_path
+
+    def register_opts(self, conf):
+        config.register_opt_group(
+            conf, config.service_available_group,
+            tempest_config.service_option)
+        config.register_opt_group(
+            conf, tempest_config.telemetry_group,
+            tempest_config.TelemetryGroup)
+        config.register_opt_group(
+            conf, tempest_config.event_group,
+            tempest_config.event_opts)
+        config.register_opt_group(
+            conf, tempest_config.alarming_group,
+            tempest_config.AlarmingGroup)
+
+    def get_opt_lists(self):
+        return [(tempest_config.telemetry_group.name,
+                 tempest_config.TelemetryGroup),
+                (tempest_config.event_group.name,
+                 tempest_config.event_opts),
+                (config.service_available_group.name,
+                 tempest_config.service_option),
+                (tempest_config.alarming_group.name,
+                 tempest_config.AlarmingGroup)]
diff --git a/telemetry_tempest_plugin/scenario/__init__.py b/telemetry_tempest_plugin/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/telemetry_tempest_plugin/scenario/__init__.py
diff --git a/telemetry_tempest_plugin/scenario/test_telemetry_integration.py b/telemetry_tempest_plugin/scenario/test_telemetry_integration.py
new file mode 100644
index 0000000..aba412f
--- /dev/null
+++ b/telemetry_tempest_plugin/scenario/test_telemetry_integration.py
@@ -0,0 +1,143 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import unittest
+
+from gabbi import runner
+from gabbi import suitemaker
+from gabbi import utils
+from tempest import config
+from tempest.scenario import manager
+
+TEST_DIR = os.path.join(os.path.dirname(__file__), '..',
+                        'integration', 'gabbi', 'gabbits-live')
+
+
+class TestTelemetryIntegration(manager.ScenarioTest):
+    credentials = ['admin', 'primary']
+
+    TIMEOUT_SCALING_FACTOR = 5
+
+    @classmethod
+    def skip_checks(cls):
+        super(TestTelemetryIntegration, cls).skip_checks()
+        for name in ["aodh_plugin", "gnocchi", "nova", "heat", "panko",
+                     "ceilometer", "glance"]:
+            cls._check_service(name)
+
+    @classmethod
+    def _check_service(cls, name):
+        if not getattr(config.CONF.service_available, name, False):
+            raise cls.skipException("%s support is required" %
+                                    name.capitalize())
+
+    @staticmethod
+    def _get_endpoint(auth, service):
+        opt_section = getattr(config.CONF, service)
+        endpoint_type = opt_section.endpoint_type
+        is_keystone_v3 = 'catalog' in auth[1]
+
+        if is_keystone_v3:
+            if endpoint_type.endswith("URL"):
+                endpoint_type = endpoint_type[:-3]
+            catalog = auth[1]['catalog']
+            endpoints = [e['endpoints'] for e in catalog
+                         if e['type'] == opt_section.catalog_type]
+            if not endpoints:
+                raise Exception("%s endpoint not found" %
+                                opt_section.catalog_type)
+            endpoints = [e['url'] for e in endpoints[0]
+                         if e['interface'] == endpoint_type]
+            if not endpoints:
+                raise Exception("%s interface not found for endpoint %s" %
+                                (endpoint_type,
+                                 opt_section.catalog_type))
+            return endpoints[0]
+
+        else:
+            if not endpoint_type.endswith("URL"):
+                endpoint_type += "URL"
+            catalog = auth[1]['serviceCatalog']
+            endpoints = [e for e in catalog
+                         if e['type'] == opt_section.catalog_type]
+            if not endpoints:
+                raise Exception("%s endpoint not found" %
+                                opt_section.catalog_type)
+            return endpoints[0]['endpoints'][0][endpoint_type]
+
+    def _do_test(self, filename):
+        admin_auth = self.os_admin.auth_provider.get_auth()
+        auth = self.os_primary.auth_provider.get_auth()
+        networks = self.os_primary.networks_client.list_networks(
+            **{'router:external': False, 'fields': 'id'})['networks']
+
+        os.environ.update({
+            "ADMIN_TOKEN": admin_auth[0],
+            "USER_TOKEN": auth[0],
+            "AODH_GRANULARITY": str(config.CONF.telemetry.alarm_granularity),
+            "AODH_SERVICE_URL": self._get_endpoint(auth, "alarming_plugin"),
+            "GNOCCHI_SERVICE_URL": self._get_endpoint(auth, "metric"),
+            "PANKO_SERVICE_URL": self._get_endpoint(auth, "event"),
+            "HEAT_SERVICE_URL": self._get_endpoint(auth, "orchestration"),
+            "NOVA_SERVICE_URL": self._get_endpoint(auth, "compute"),
+            "GLANCE_SERVICE_URL": self._get_endpoint(auth, "image"),
+            "GLANCE_IMAGE_NAME": self.glance_image_create(),
+            "NOVA_FLAVOR_REF": config.CONF.compute.flavor_ref,
+            "NEUTRON_NETWORK": networks[0].get('id'),
+        })
+
+        with open(os.path.join(TEST_DIR, filename)) as f:
+            test_suite = suitemaker.test_suite_from_dict(
+                loader=unittest.defaultTestLoader,
+                test_base_name="gabbi",
+                suite_dict=utils.load_yaml(f),
+                test_directory=TEST_DIR,
+                host=None, port=None,
+                fixture_module=None,
+                intercept=None,
+                handlers=runner.initialize_handlers([]),
+                test_loader_name="tempest")
+
+            # NOTE(sileht): We hide stdout/stderr and reraise the failure
+            # manually, tempest will print it itself.
+            with open(os.devnull, 'w') as stream:
+                result = unittest.TextTestRunner(
+                    stream=stream, verbosity=0, failfast=True,
+                ).run(test_suite)
+
+            if not result.wasSuccessful():
+                failures = (result.errors + result.failures +
+                            result.unexpectedSuccesses)
+                if failures:
+                    test, bt = failures[0]
+                    name = test.test_data.get('name', test.id())
+                    msg = 'From test "%s" :\n%s' % (name, bt)
+                    self.fail(msg)
+
+            self.assertTrue(result.wasSuccessful())
+
+
+def test_maker(name, filename):
+    def test(self):
+        self._do_test(filename)
+        test.__name__ = name
+    return test
+
+
+# Create one scenario per yaml file
+for filename in os.listdir(TEST_DIR):
+    if not filename.endswith('.yaml'):
+        continue
+    name = "test_%s" % filename[:-5].lower().replace("-", "_")
+    setattr(TestTelemetryIntegration, name,
+            test_maker(name, filename))