Merge "Re-use stack_create method from base class"
diff --git a/common/config.py b/common/config.py
index 31a996b..0ea6263 100644
--- a/common/config.py
+++ b/common/config.py
@@ -88,6 +88,12 @@
     cfg.IntOpt('volume_size',
                default=1,
                help='Default size in GB for volumes created by volumes tests'),
+    cfg.BoolOpt('skip_stack_adopt_tests',
+                default=False,
+                help="Skip Stack Adopt Integration tests"),
+    cfg.BoolOpt('skip_stack_abandon_tests',
+                default=False,
+                help="Skip Stack Abandon Integration tests"),
 ]
 
 
diff --git a/common/test.py b/common/test.py
index 7b17d57..be9cdce 100644
--- a/common/test.py
+++ b/common/test.py
@@ -33,7 +33,7 @@
 _LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
 
 
-def call_until_true(func, duration, sleep_for):
+def call_until_true(duration, sleep_for, func, *args, **kwargs):
     """
     Call the given function until it returns True (and return True) or
     until the specified duration (in seconds) elapses (and return
@@ -48,7 +48,7 @@
     now = time.time()
     timeout = now + duration
     while now < timeout:
-        if func():
+        if func(*args, **kwargs):
             return True
         LOG.debug("Sleeping for %d seconds", sleep_for)
         time.sleep(sleep_for)
@@ -88,71 +88,6 @@
         self.object_client = self.manager.object_client
         self.useFixture(fixtures.FakeLogger(format=_LOG_FORMAT))
 
-    def status_timeout(self, things, thing_id, expected_status,
-                       error_status='ERROR',
-                       not_found_exception=heat_exceptions.NotFound):
-        """
-        Given a thing and an expected status, do a loop, sleeping
-        for a configurable amount of time, checking for the
-        expected status to show. At any time, if the returned
-        status of the thing is ERROR, fail out.
-        """
-        self._status_timeout(things, thing_id,
-                             expected_status=expected_status,
-                             error_status=error_status,
-                             not_found_exception=not_found_exception)
-
-    def _status_timeout(self,
-                        things,
-                        thing_id,
-                        expected_status=None,
-                        allow_notfound=False,
-                        error_status='ERROR',
-                        not_found_exception=heat_exceptions.NotFound):
-
-        log_status = expected_status if expected_status else ''
-        if allow_notfound:
-            log_status += ' or NotFound' if log_status != '' else 'NotFound'
-
-        def check_status():
-            # python-novaclient has resources available to its client
-            # that all implement a get() method taking an identifier
-            # for the singular resource to retrieve.
-            try:
-                thing = things.get(thing_id)
-            except not_found_exception:
-                if allow_notfound:
-                    return True
-                raise
-            except Exception as e:
-                if allow_notfound and self.not_found_exception(e):
-                    return True
-                raise
-
-            new_status = thing.status
-
-            # Some components are reporting error status in lower case
-            # so case sensitive comparisons can really mess things
-            # up.
-            if new_status.lower() == error_status.lower():
-                message = ("%s failed to get to expected status (%s). "
-                           "In %s state.") % (thing, expected_status,
-                                              new_status)
-                raise exceptions.BuildErrorException(message,
-                                                     server_id=thing_id)
-            elif new_status == expected_status and expected_status is not None:
-                return True  # All good.
-            LOG.debug("Waiting for %s to get to %s status. "
-                      "Currently in %s status",
-                      thing, log_status, new_status)
-        if not call_until_true(
-                check_status,
-                self.conf.build_timeout,
-                self.conf.build_interval):
-            message = ("Timed out waiting for thing %s "
-                       "to become %s") % (thing_id, log_status)
-            raise exceptions.TimeoutException(message)
-
     def get_remote_client(self, server_or_ip, username, private_key=None):
         if isinstance(server_or_ip, six.string_types):
             ip = server_or_ip
@@ -227,7 +162,7 @@
             return (proc.returncode == 0) == should_succeed
 
         return call_until_true(
-            ping, self.conf.build_timeout, 1)
+            self.conf.build_timeout, 1, ping)
 
     def _wait_for_resource_status(self, stack_identifier, resource_name,
                                   status, failure_pattern='^.*_FAILED$',
@@ -374,6 +309,8 @@
     def stack_adopt(self, stack_name=None, files=None,
                     parameters=None, environment=None, adopt_data=None,
                     wait_for_status='ADOPT_COMPLETE'):
+        if self.conf.skip_stack_adopt_tests:
+            self.skipTest('Testing Stack adopt disabled in conf, skipping')
         name = stack_name or self._stack_rand_name()
         templ_files = files or {}
         params = parameters or {}
@@ -392,3 +329,10 @@
         stack_identifier = '%s/%s' % (name, stack.id)
         self._wait_for_stack_status(stack_identifier, wait_for_status)
         return stack_identifier
+
+    def stack_abandon(self, stack_id):
+        if self.conf.skip_stack_abandon_tests:
+            self.addCleanup(self.client.stacks.delete, stack_id)
+            self.skipTest('Testing Stack abandon disabled in conf, skipping')
+        info = self.client.stacks.abandon(stack_id=stack_id)
+        return info
diff --git a/functional/test_autoscaling.py b/functional/test_autoscaling.py
index a8a8fc4..15b318b 100644
--- a/functional/test_autoscaling.py
+++ b/functional/test_autoscaling.py
@@ -78,8 +78,11 @@
     properties:
       salt: {get_param: ImageId}
 outputs:
-  PublicIp:
-    value: {get_attr: [random1, value]}
+  PublicIp: {value: {get_attr: [random1, value]}}
+  AvailabilityZone: {value: 'not-used11'}
+  PrivateDnsName: {value: 'not-used12'}
+  PublicDnsName: {value: 'not-used13'}
+  PrivateIp: {value: 'not-used14'}
 '''
 
     # This is designed to fail.
@@ -530,3 +533,190 @@
                                    num_creates_expected_on_updt=2,
                                    num_deletes_expected_on_updt=2,
                                    update_replace=False)
+
+
+class AutoScalingSignalTest(AutoscalingGroupTest):
+
+    template = '''
+{
+  "AWSTemplateFormatVersion" : "2010-09-09",
+  "Description" : "Template to create multiple instances.",
+  "Parameters" : {"size": {"Type": "String", "Default": "1"},
+                  "AZ": {"Type": "String", "Default": "nova"},
+                  "image": {"Type": "String"},
+                  "flavor": {"Type": "String"}},
+  "Resources": {
+    "custom_lb": {
+      "Type": "AWS::EC2::Instance",
+      "Properties": {
+        "ImageId": {"Ref": "image"},
+        "InstanceType": {"Ref": "flavor"},
+        "UserData": "foo",
+        "SecurityGroups": [ "sg-1" ],
+        "Tags": []
+      },
+      "Metadata": {
+        "IPs": {"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}
+      }
+    },
+    "JobServerGroup": {
+      "Type" : "AWS::AutoScaling::AutoScalingGroup",
+      "Properties" : {
+        "AvailabilityZones" : [{"Ref": "AZ"}],
+        "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
+        "DesiredCapacity" : {"Ref": "size"},
+        "MinSize" : "0",
+        "MaxSize" : "20"
+      }
+    },
+    "JobServerConfig" : {
+      "Type" : "AWS::AutoScaling::LaunchConfiguration",
+      "Metadata": {"foo": "bar"},
+      "Properties": {
+        "ImageId"           : {"Ref": "image"},
+        "InstanceType"      : {"Ref": "flavor"},
+        "SecurityGroups"    : [ "sg-1" ],
+        "UserData"          : "jsconfig data"
+      }
+    },
+    "ScaleUpPolicy" : {
+      "Type" : "AWS::AutoScaling::ScalingPolicy",
+      "Properties" : {
+        "AdjustmentType" : "ChangeInCapacity",
+        "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
+        "Cooldown" : "0",
+        "ScalingAdjustment": "1"
+      }
+    },
+    "ScaleDownPolicy" : {
+      "Type" : "AWS::AutoScaling::ScalingPolicy",
+      "Properties" : {
+        "AdjustmentType" : "ChangeInCapacity",
+        "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
+        "Cooldown" : "0",
+        "ScalingAdjustment" : "-2"
+      }
+    }
+  },
+  "Outputs": {
+    "InstanceList": {"Value": {
+      "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}}
+  }
+}
+'''
+
+    lb_template = '''
+heat_template_version: 2013-05-23
+parameters:
+  ImageId: {type: string}
+  InstanceType: {type: string}
+  SecurityGroups: {type: comma_delimited_list}
+  UserData: {type: string}
+  Tags: {type: comma_delimited_list}
+
+resources:
+outputs:
+  PublicIp: {value: "not-used"}
+  AvailabilityZone: {value: 'not-used1'}
+  PrivateDnsName: {value: 'not-used2'}
+  PublicDnsName: {value: 'not-used3'}
+  PrivateIp: {value: 'not-used4'}
+
+'''
+
+    def setUp(self):
+        super(AutoScalingSignalTest, self).setUp()
+        self.build_timeout = self.conf.build_timeout
+        self.build_interval = self.conf.build_interval
+        self.files = {'provider.yaml': self.instance_template,
+                      'lb.yaml': self.lb_template}
+        self.env = {'resource_registry':
+                    {'resources':
+                     {'custom_lb': {'AWS::EC2::Instance': 'lb.yaml'}},
+                     'AWS::EC2::Instance': 'provider.yaml'},
+                    'parameters': {'size': 2,
+                                   'image': self.conf.image_ref,
+                                   'flavor': self.conf.instance_type}}
+
+    def check_instance_count(self, stack_identifier, expected):
+        md = self.client.resources.metadata(stack_identifier, 'custom_lb')
+        actual_md = len(md['IPs'].split(','))
+        if actual_md != expected:
+            LOG.warn('check_instance_count exp:%d, meta:%s' % (expected,
+                                                               md['IPs']))
+            return False
+
+        stack = self.client.stacks.get(stack_identifier)
+        inst_list = self._stack_output(stack, 'InstanceList')
+        actual = len(inst_list.split(','))
+        if actual != expected:
+            LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
+                                                              inst_list))
+        return actual == expected
+
+    def test_scaling_meta_update(self):
+        """Use heatclient to signal the up and down policy.
+
+        Then confirm that the metadata in the custom_lb is updated each
+        time.
+        """
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=self.files,
+                                             environment=self.env)
+
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 2))
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        # Scale up one, Trigger alarm
+        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
+        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 3))
+
+        # Scale down two, Trigger alarm
+        self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
+        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 1))
+
+    def test_signal_with_policy_update(self):
+        """Prove that an updated policy is used in the next signal."""
+
+        stack_identifier = self.stack_create(template=self.template,
+                                             files=self.files,
+                                             environment=self.env)
+
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 2))
+
+        nested_ident = self.assert_resource_is_a_stack(stack_identifier,
+                                                       'JobServerGroup')
+        # Scale up one, Trigger alarm
+        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
+        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 3))
+
+        # increase the adjustment to "+2" and remove the DesiredCapacity
+        # so we don't go from 3 to 2.
+        new_template = self.template.replace(
+            '"ScalingAdjustment": "1"',
+            '"ScalingAdjustment": "2"').replace(
+                '"DesiredCapacity" : {"Ref": "size"},', '')
+
+        self.update_stack(stack_identifier, template=new_template,
+                          environment=self.env, files=self.files)
+
+        # Scale up two, Trigger alarm
+        self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
+        self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
+        self.assertTrue(test.call_until_true(
+            self.build_timeout, self.build_interval,
+            self.check_instance_count, stack_identifier, 5))
diff --git a/functional/test_template_resource.py b/functional/test_template_resource.py
index 80dcdce..5df4235 100644
--- a/functional/test_template_resource.py
+++ b/functional/test_template_resource.py
@@ -437,7 +437,7 @@
         stack_identifier = '%s/%s' % (stack_name, stack.id)
         self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
 
-        info = self.client.stacks.abandon(stack_id=stack_identifier)
+        info = self.stack_abandon(stack_id=stack_identifier)
         self.assertEqual(self._yaml_to_json(self.main_template),
                          info['template'])
         self.assertEqual(self._yaml_to_json(self.nested_templ),