Cleanup leftover stacks.

When the autoscaling scenario fails, gabbi doesn't execute the
rest of the steps in the scenario. This means the stack
used for the autoscaling test doesn't get deleted. The tempest
tearDown isn't able to delete the stack, which means we get
additional errors from the tearDown.

This patch adds a code which runs before the tearDown. It checks
if there is a stack running and it deletes it if necessary.

Change-Id: I7f88a29a296b213851e4987557903192938f31e4
diff --git a/telemetry_tempest_plugin/scenario/test_telemetry_integration.py b/telemetry_tempest_plugin/scenario/test_telemetry_integration.py
index 3cd7301..035c138 100644
--- a/telemetry_tempest_plugin/scenario/test_telemetry_integration.py
+++ b/telemetry_tempest_plugin/scenario/test_telemetry_integration.py
@@ -11,6 +11,8 @@
 #    under the License.
 
 import os
+import requests
+import time
 
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -28,6 +30,10 @@
     TIMEOUT_SCALING_FACTOR = 5
 
     @classmethod
+    def resource_setup(cls):
+        cls.stack_name = data_utils.rand_name("telemetry")
+
+    @classmethod
     def skip_checks(cls):
         super(TestTelemetryIntegration, cls).skip_checks()
         for name in ["aodh", "gnocchi", "nova", "heat",
@@ -74,6 +80,29 @@
                                 opt_section.catalog_type)
             return endpoints[0]['endpoints'][0][endpoint_type].rstrip('/')
 
+    @classmethod
+    def resource_cleanup(cls):
+        headers = {'X-Auth-Token': cls.os_primary.auth_provider.get_auth()[0]}
+        url = os.environ['HEAT_SERVICE_URL'] + "/stacks/" + cls.stack_name
+        r = requests.get(url, headers=headers)
+
+        if r.status_code == 200 and \
+                "stack" in r.json():
+            stack = r.json()["stack"]
+            stack_url = (f'{os.environ["HEAT_SERVICE_URL"]}/stacks/'
+                         f'{stack["stack_name"]}/{stack["id"]}')
+            requests.delete(stack_url, headers=headers)
+
+            repeats = 0
+            r = requests.get(stack_url, headers=headers)
+            while r.json()["stack"]["stack_status"] == \
+                    "DELETE_IN_PROGRESS" and repeats < 30:
+                time.sleep(2)
+                r = requests.get(stack_url, headers=headers)
+                repeats += 1
+
+        super(TestTelemetryIntegration, cls).resource_cleanup()
+
     def _prep_test(self, filename):
         admin_auth = self.os_admin.auth_provider.get_auth()
         auth = self.os_primary.auth_provider.get_auth()
@@ -99,7 +128,7 @@
             "GLANCE_IMAGE_NAME": self.image_create(),
             "NOVA_FLAVOR_REF": config.CONF.compute.flavor_ref,
             "NEUTRON_NETWORK": networks[0].get('id'),
-            "STACK_NAME": data_utils.rand_name('telemetry'),
+            "STACK_NAME": self.stack_name,
         })
 
 
diff --git a/telemetry_tempest_plugin/scenario/test_telemetry_integration_prometheus.py b/telemetry_tempest_plugin/scenario/test_telemetry_integration_prometheus.py
index 0d6637b..63b4661 100644
--- a/telemetry_tempest_plugin/scenario/test_telemetry_integration_prometheus.py
+++ b/telemetry_tempest_plugin/scenario/test_telemetry_integration_prometheus.py
@@ -11,6 +11,8 @@
 #    under the License.
 
 import os
+import requests
+import time
 
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -29,6 +31,10 @@
     TIMEOUT_SCALING_FACTOR = 5
 
     @classmethod
+    def resource_setup(cls):
+        cls.stack_name = data_utils.rand_name("telemetry")
+
+    @classmethod
     def skip_checks(cls):
         super(PrometheusGabbiTest, cls).skip_checks()
         for name in ["aodh", "nova", "heat",
@@ -75,15 +81,37 @@
                                 opt_section.catalog_type)
             return endpoints[0]['endpoints'][0][endpoint_type].rstrip('/')
 
+    @classmethod
+    def resource_cleanup(cls):
+        headers = {'X-Auth-Token': cls.os_primary.auth_provider.get_auth()[0]}
+        url = os.environ['HEAT_SERVICE_URL'] + "/stacks/" + cls.stack_name
+        r = requests.get(url, headers=headers)
+
+        if r.status_code == 200 and \
+                "stack" in r.json():
+            stack = r.json()["stack"]
+            stack_url = (f'{os.environ["HEAT_SERVICE_URL"]}/stacks/'
+                         f'{stack["stack_name"]}/{stack["id"]}')
+            requests.delete(stack_url, headers=headers)
+
+            repeats = 0
+            r = requests.get(stack_url, headers=headers)
+            while r.json()["stack"]["stack_status"] == \
+                    "DELETE_IN_PROGRESS" and repeats < 30:
+                time.sleep(2)
+                r = requests.get(stack_url, headers=headers)
+                repeats += 1
+
+        super(PrometheusGabbiTest, cls).resource_cleanup()
+
     def _prep_test(self, filename):
         auth = self.os_primary.auth_provider.get_auth()
         networks = self.os_primary.networks_client.list_networks(
             **{'router:external': False, 'fields': 'id'})['networks']
-        stack_name = data_utils.rand_name('telemetry')
         # NOTE(marihan): This is being used in prometheus query as heat is
         # using the last 7 digits from stack_name to create the autoscaling
         # resources.
-        resource_prefix = stack_name[-7:]
+        resource_prefix = self.stack_name[-7:]
         prometheus_rate_duration = (
             config.CONF.telemetry.ceilometer_polling_interval
             + config.CONF.telemetry.prometheus_scrape_interval)
@@ -104,7 +132,7 @@
             "GLANCE_IMAGE_NAME": self.image_create(),
             "NOVA_FLAVOR_REF": config.CONF.compute.flavor_ref,
             "NEUTRON_NETWORK": networks[0].get('id'),
-            "STACK_NAME": stack_name,
+            "STACK_NAME": self.stack_name,
             "RESOURCE_PREFIX": resource_prefix,
             "PROMETHEUS_RATE_DURATION": str(prometheus_rate_duration),
         })