blob: 5c2d1aff6bc991e90ddab64debc448da8c215ed8 [file] [log] [blame]
Angus Salkeld28339012015-01-20 19:15:37 +10001# Licensed under the Apache License, Version 2.0 (the "License"); you may
2# not use this file except in compliance with the License. You may obtain
3# a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10# License for the specific language governing permissions and limitations
11# under the License.
12
13import copy
14import json
Angus Salkeld28339012015-01-20 19:15:37 +100015
huangtianhuaf71ae072015-07-09 09:15:10 +080016from heatclient import exc
Steve Baker24641292015-03-13 10:47:50 +130017from oslo_log import log as logging
huangtianhuaf71ae072015-07-09 09:15:10 +080018import six
Angus Salkeld28339012015-01-20 19:15:37 +100019from testtools import matchers
20
21from heat_integrationtests.common import test
Rabi Mishra477efc92015-07-31 13:01:45 +053022from heat_integrationtests.functional import functional_base
Angus Salkeld28339012015-01-20 19:15:37 +100023
24
25LOG = logging.getLogger(__name__)
26
27
Rabi Mishra477efc92015-07-31 13:01:45 +053028class AutoscalingGroupTest(functional_base.FunctionalTestsBase):
Angus Salkeld28339012015-01-20 19:15:37 +100029
30 template = '''
31{
32 "AWSTemplateFormatVersion" : "2010-09-09",
33 "Description" : "Template to create multiple instances.",
34 "Parameters" : {"size": {"Type": "String", "Default": "1"},
35 "AZ": {"Type": "String", "Default": "nova"},
36 "image": {"Type": "String"},
37 "flavor": {"Type": "String"}},
38 "Resources": {
39 "JobServerGroup": {
40 "Type" : "AWS::AutoScaling::AutoScalingGroup",
41 "Properties" : {
42 "AvailabilityZones" : [{"Ref": "AZ"}],
43 "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
44 "MinSize" : {"Ref": "size"},
45 "MaxSize" : "20"
46 }
47 },
48
49 "JobServerConfig" : {
50 "Type" : "AWS::AutoScaling::LaunchConfiguration",
51 "Metadata": {"foo": "bar"},
52 "Properties": {
53 "ImageId" : {"Ref": "image"},
54 "InstanceType" : {"Ref": "flavor"},
55 "SecurityGroups" : [ "sg-1" ],
56 "UserData" : "jsconfig data"
57 }
58 }
59 },
60 "Outputs": {
61 "InstanceList": {"Value": {
62 "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}},
63 "JobServerConfigRef": {"Value": {
64 "Ref": "JobServerConfig"}}
65 }
66}
67'''
68
69 instance_template = '''
70heat_template_version: 2013-05-23
71parameters:
72 ImageId: {type: string}
73 InstanceType: {type: string}
74 SecurityGroups: {type: comma_delimited_list}
75 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +100076 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeld28339012015-01-20 19:15:37 +100077
78resources:
79 random1:
80 type: OS::Heat::RandomString
81 properties:
82 salt: {get_param: ImageId}
83outputs:
Angus Salkeldc85229b2015-02-09 10:58:04 +100084 PublicIp: {value: {get_attr: [random1, value]}}
85 AvailabilityZone: {value: 'not-used11'}
86 PrivateDnsName: {value: 'not-used12'}
87 PublicDnsName: {value: 'not-used13'}
88 PrivateIp: {value: 'not-used14'}
Angus Salkeld28339012015-01-20 19:15:37 +100089'''
90
91 # This is designed to fail.
92 bad_instance_template = '''
93heat_template_version: 2013-05-23
94parameters:
95 ImageId: {type: string}
96 InstanceType: {type: string}
97 SecurityGroups: {type: comma_delimited_list}
98 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +100099 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeld28339012015-01-20 19:15:37 +1000100
101resources:
102 random1:
103 type: OS::Heat::RandomString
104 depends_on: waiter
105 ready_poster:
106 type: AWS::CloudFormation::WaitConditionHandle
107 waiter:
108 type: AWS::CloudFormation::WaitCondition
109 properties:
Angus Salkeld12e13d42015-08-11 12:17:58 +1000110 Handle: {get_resource: ready_poster}
Angus Salkeld28339012015-01-20 19:15:37 +1000111 Timeout: 1
112outputs:
113 PublicIp:
114 value: {get_attr: [random1, value]}
115'''
116
117 def setUp(self):
118 super(AutoscalingGroupTest, self).setUp()
Angus Salkeld28339012015-01-20 19:15:37 +1000119 if not self.conf.image_ref:
120 raise self.skipException("No image configured to test")
121 if not self.conf.minimal_image_ref:
122 raise self.skipException("No minimal image configured to test")
123 if not self.conf.instance_type:
124 raise self.skipException("No flavor configured to test")
125
126 def assert_instance_count(self, stack, expected_count):
127 inst_list = self._stack_output(stack, 'InstanceList')
128 self.assertEqual(expected_count, len(inst_list.split(',')))
129
130 def _assert_instance_state(self, nested_identifier,
131 num_complete, num_failed):
132 for res in self.client.resources.list(nested_identifier):
133 if 'COMPLETE' in res.resource_status:
134 num_complete = num_complete - 1
135 elif 'FAILED' in res.resource_status:
136 num_failed = num_failed - 1
137 self.assertEqual(0, num_failed)
138 self.assertEqual(0, num_complete)
139
140
141class AutoscalingGroupBasicTest(AutoscalingGroupTest):
142
143 def test_basic_create_works(self):
144 """Make sure the working case is good.
145
146 Note this combines test_override_aws_ec2_instance into this test as
147 well, which is:
148 If AWS::EC2::Instance is overridden, AutoScalingGroup will
149 automatically use that overridden resource type.
150 """
151
152 files = {'provider.yaml': self.instance_template}
153 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
154 'parameters': {'size': 4,
155 'image': self.conf.image_ref,
156 'flavor': self.conf.instance_type}}
157 stack_identifier = self.stack_create(template=self.template,
158 files=files, environment=env)
159 initial_resources = {
160 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
161 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
162 self.assertEqual(initial_resources,
163 self.list_resources(stack_identifier))
164
165 stack = self.client.stacks.get(stack_identifier)
166 self.assert_instance_count(stack, 4)
167
168 def test_size_updates_work(self):
169 files = {'provider.yaml': self.instance_template}
170 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
171 'parameters': {'size': 2,
172 'image': self.conf.image_ref,
173 'flavor': self.conf.instance_type}}
174
175 stack_identifier = self.stack_create(template=self.template,
176 files=files,
177 environment=env)
178 stack = self.client.stacks.get(stack_identifier)
179 self.assert_instance_count(stack, 2)
180
181 # Increase min size to 5
182 env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
183 'parameters': {'size': 5,
184 'image': self.conf.image_ref,
185 'flavor': self.conf.instance_type}}
186 self.update_stack(stack_identifier, self.template,
187 environment=env2, files=files)
Angus Salkeld28339012015-01-20 19:15:37 +1000188 stack = self.client.stacks.get(stack_identifier)
189 self.assert_instance_count(stack, 5)
190
191 def test_update_group_replace(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300192 """Test case for ensuring non-updatable props case a replacement.
193
194 Make sure that during a group update the non-updatable
Angus Salkeld28339012015-01-20 19:15:37 +1000195 properties cause a replacement.
196 """
197 files = {'provider.yaml': self.instance_template}
198 env = {'resource_registry':
199 {'AWS::EC2::Instance': 'provider.yaml'},
200 'parameters': {'size': '1',
201 'image': self.conf.image_ref,
202 'flavor': self.conf.instance_type}}
203
204 stack_identifier = self.stack_create(template=self.template,
205 files=files,
206 environment=env)
207 rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
208 orig_asg_id = rsrc.physical_resource_id
209
210 env2 = {'resource_registry':
211 {'AWS::EC2::Instance': 'provider.yaml'},
212 'parameters': {'size': '1',
213 'AZ': 'wibble',
214 'image': self.conf.image_ref,
215 'flavor': self.conf.instance_type}}
216 self.update_stack(stack_identifier, self.template,
217 environment=env2, files=files)
218
219 # replacement will cause the resource physical_resource_id to change.
220 rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
221 self.assertNotEqual(orig_asg_id, rsrc.physical_resource_id)
222
223 def test_create_instance_error_causes_group_error(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300224 """Test create failing a resource in the instance group.
225
226 If a resource in an instance group fails to be created, the instance
Angus Salkeld28339012015-01-20 19:15:37 +1000227 group itself will fail and the broken inner resource will remain.
228 """
229 stack_name = self._stack_rand_name()
230 files = {'provider.yaml': self.bad_instance_template}
231 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
232 'parameters': {'size': 2,
233 'image': self.conf.image_ref,
234 'flavor': self.conf.instance_type}}
235
236 self.client.stacks.create(
237 stack_name=stack_name,
238 template=self.template,
239 files=files,
240 disable_rollback=True,
241 parameters={},
242 environment=env
243 )
Steve Bakerdbea6ab2015-08-19 13:37:08 +1200244 self.addCleanup(self._stack_delete, stack_name)
Angus Salkeld28339012015-01-20 19:15:37 +1000245 stack = self.client.stacks.get(stack_name)
246 stack_identifier = '%s/%s' % (stack_name, stack.id)
247 self._wait_for_stack_status(stack_identifier, 'CREATE_FAILED')
248 initial_resources = {
249 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
250 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
251 self.assertEqual(initial_resources,
252 self.list_resources(stack_identifier))
253
254 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
255 'JobServerGroup')
256 self._assert_instance_state(nested_ident, 0, 2)
257
258 def test_update_instance_error_causes_group_error(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300259 """Test update failing a resource in the instance group.
260
261 If a resource in an instance group fails to be created during an
Angus Salkeld28339012015-01-20 19:15:37 +1000262 update, the instance group itself will fail and the broken inner
263 resource will remain.
264 """
265 files = {'provider.yaml': self.instance_template}
266 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
267 'parameters': {'size': 2,
268 'image': self.conf.image_ref,
269 'flavor': self.conf.instance_type}}
270
271 stack_identifier = self.stack_create(template=self.template,
272 files=files,
273 environment=env)
274 initial_resources = {
275 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
276 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
277 self.assertEqual(initial_resources,
278 self.list_resources(stack_identifier))
279
280 stack = self.client.stacks.get(stack_identifier)
281 self.assert_instance_count(stack, 2)
282 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
283 'JobServerGroup')
284 self._assert_instance_state(nested_ident, 2, 0)
Angus Salkeldd4b6bc02015-02-04 16:48:45 +1000285 initial_list = [res.resource_name
286 for res in self.client.resources.list(nested_ident)]
Angus Salkeld28339012015-01-20 19:15:37 +1000287
288 env['parameters']['size'] = 3
289 files2 = {'provider.yaml': self.bad_instance_template}
290 self.client.stacks.update(
291 stack_id=stack_identifier,
292 template=self.template,
293 files=files2,
294 disable_rollback=True,
295 parameters={},
296 environment=env
297 )
298 self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
299
300 # assert that there are 3 bad instances
301 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
302 'JobServerGroup')
Angus Salkeldd4b6bc02015-02-04 16:48:45 +1000303
304 # 2 resources should be in update failed, and one create failed.
305 for res in self.client.resources.list(nested_ident):
306 if res.resource_name in initial_list:
307 self._wait_for_resource_status(nested_ident,
308 res.resource_name,
309 'UPDATE_FAILED')
310 else:
311 self._wait_for_resource_status(nested_ident,
312 res.resource_name,
313 'CREATE_FAILED')
Angus Salkeld28339012015-01-20 19:15:37 +1000314
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000315 def test_group_suspend_resume(self):
316
317 files = {'provider.yaml': self.instance_template}
318 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
319 'parameters': {'size': 4,
320 'image': self.conf.image_ref,
321 'flavor': self.conf.instance_type}}
322 stack_identifier = self.stack_create(template=self.template,
323 files=files, environment=env)
324
325 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
326 'JobServerGroup')
327
Angus Salkelda7500d12015-04-10 15:44:07 +1000328 self.stack_suspend(stack_identifier)
329 self._wait_for_all_resource_status(nested_ident, 'SUSPEND_COMPLETE')
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000330
Angus Salkelda7500d12015-04-10 15:44:07 +1000331 self.stack_resume(stack_identifier)
332 self._wait_for_all_resource_status(nested_ident, 'RESUME_COMPLETE')
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000333
Angus Salkeld28339012015-01-20 19:15:37 +1000334
335class AutoscalingGroupUpdatePolicyTest(AutoscalingGroupTest):
336
337 def ig_tmpl_with_updt_policy(self):
338 templ = json.loads(copy.deepcopy(self.template))
339 up = {"AutoScalingRollingUpdate": {
340 "MinInstancesInService": "1",
341 "MaxBatchSize": "2",
342 "PauseTime": "PT1S"}}
343 templ['Resources']['JobServerGroup']['UpdatePolicy'] = up
344 return templ
345
346 def update_instance_group(self, updt_template,
347 num_updates_expected_on_updt,
348 num_creates_expected_on_updt,
349 num_deletes_expected_on_updt,
350 update_replace):
351
352 # setup stack from the initial template
353 files = {'provider.yaml': self.instance_template}
354 size = 10
355 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
356 'parameters': {'size': size,
357 'image': self.conf.image_ref,
358 'flavor': self.conf.instance_type}}
359 stack_name = self._stack_rand_name()
360 stack_identifier = self.stack_create(
361 stack_name=stack_name,
362 template=self.ig_tmpl_with_updt_policy(),
363 files=files,
364 environment=env)
365 stack = self.client.stacks.get(stack_identifier)
366 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
367 'JobServerGroup')
368
369 # test that physical resource name of launch configuration is used
370 conf_name = self._stack_output(stack, 'JobServerConfigRef')
371 conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack_name
372 self.assertThat(conf_name,
373 matchers.MatchesRegex(conf_name_pattern))
374
375 # test the number of instances created
376 self.assert_instance_count(stack, size)
377 # saves info from initial list of instances for comparison later
378 init_instances = self.client.resources.list(nested_ident)
379 init_names = [inst.resource_name for inst in init_instances]
380
381 # test stack update
382 self.update_stack(stack_identifier, updt_template,
383 environment=env, files=files)
Angus Salkeld28339012015-01-20 19:15:37 +1000384 updt_stack = self.client.stacks.get(stack_identifier)
385
386 # test that the launch configuration is replaced
387 updt_conf_name = self._stack_output(updt_stack, 'JobServerConfigRef')
388 self.assertThat(updt_conf_name,
389 matchers.MatchesRegex(conf_name_pattern))
390 self.assertNotEqual(conf_name, updt_conf_name)
391
392 # test that the group size are the same
393 updt_instances = self.client.resources.list(nested_ident)
394 updt_names = [inst.resource_name for inst in updt_instances]
395 self.assertEqual(len(init_names), len(updt_names))
396 for res in updt_instances:
397 self.assertEqual('UPDATE_COMPLETE', res.resource_status)
398
399 # test that the appropriate number of instance names are the same
400 matched_names = set(updt_names) & set(init_names)
401 self.assertEqual(num_updates_expected_on_updt, len(matched_names))
402
403 # test that the appropriate number of new instances are created
404 self.assertEqual(num_creates_expected_on_updt,
405 len(set(updt_names) - set(init_names)))
406
407 # test that the appropriate number of instances are deleted
408 self.assertEqual(num_deletes_expected_on_updt,
409 len(set(init_names) - set(updt_names)))
410
411 # test that the older instances are the ones being deleted
412 if num_deletes_expected_on_updt > 0:
413 deletes_expected = init_names[:num_deletes_expected_on_updt]
414 self.assertNotIn(deletes_expected, updt_names)
415
416 def test_instance_group_update_replace(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300417 """Test simple update replace.
418
419 Test update replace with no conflict in batch size and minimum
420 instances in service.
Angus Salkeld28339012015-01-20 19:15:37 +1000421 """
422 updt_template = self.ig_tmpl_with_updt_policy()
423 grp = updt_template['Resources']['JobServerGroup']
424 policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
425 policy['MinInstancesInService'] = '1'
426 policy['MaxBatchSize'] = '3'
427 config = updt_template['Resources']['JobServerConfig']
428 config['Properties']['ImageId'] = self.conf.minimal_image_ref
429
430 self.update_instance_group(updt_template,
431 num_updates_expected_on_updt=10,
432 num_creates_expected_on_updt=0,
433 num_deletes_expected_on_updt=0,
434 update_replace=True)
435
436 def test_instance_group_update_replace_with_adjusted_capacity(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300437 """Test update replace with capacity adjustment.
438
439 Test update replace with capacity adjustment due to conflict in batch
440 size and minimum instances in service.
Angus Salkeld28339012015-01-20 19:15:37 +1000441 """
442 updt_template = self.ig_tmpl_with_updt_policy()
443 grp = updt_template['Resources']['JobServerGroup']
444 policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
445 policy['MinInstancesInService'] = '8'
446 policy['MaxBatchSize'] = '4'
447 config = updt_template['Resources']['JobServerConfig']
448 config['Properties']['ImageId'] = self.conf.minimal_image_ref
449
450 self.update_instance_group(updt_template,
451 num_updates_expected_on_updt=8,
452 num_creates_expected_on_updt=2,
453 num_deletes_expected_on_updt=2,
454 update_replace=True)
455
456 def test_instance_group_update_replace_huge_batch_size(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300457 """Test update replace with a huge batch size."""
Angus Salkeld28339012015-01-20 19:15:37 +1000458 updt_template = self.ig_tmpl_with_updt_policy()
459 group = updt_template['Resources']['JobServerGroup']
460 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
461 policy['MinInstancesInService'] = '0'
462 policy['MaxBatchSize'] = '20'
463 config = updt_template['Resources']['JobServerConfig']
464 config['Properties']['ImageId'] = self.conf.minimal_image_ref
465
466 self.update_instance_group(updt_template,
467 num_updates_expected_on_updt=10,
468 num_creates_expected_on_updt=0,
469 num_deletes_expected_on_updt=0,
470 update_replace=True)
471
472 def test_instance_group_update_replace_huge_min_in_service(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300473 """Update replace with huge number of minimum instances in service."""
Angus Salkeld28339012015-01-20 19:15:37 +1000474 updt_template = self.ig_tmpl_with_updt_policy()
475 group = updt_template['Resources']['JobServerGroup']
476 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
477 policy['MinInstancesInService'] = '20'
478 policy['MaxBatchSize'] = '1'
479 policy['PauseTime'] = 'PT0S'
480 config = updt_template['Resources']['JobServerConfig']
481 config['Properties']['ImageId'] = self.conf.minimal_image_ref
482
483 self.update_instance_group(updt_template,
484 num_updates_expected_on_updt=9,
485 num_creates_expected_on_updt=1,
486 num_deletes_expected_on_updt=1,
487 update_replace=True)
488
489 def test_instance_group_update_no_replace(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300490 """Test simple update only and no replace.
491
Angus Salkeld28339012015-01-20 19:15:37 +1000492 Test simple update only and no replace (i.e. updated instance flavor
493 in Launch Configuration) with no conflict in batch size and
494 minimum instances in service.
495 """
496 updt_template = self.ig_tmpl_with_updt_policy()
497 group = updt_template['Resources']['JobServerGroup']
498 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
499 policy['MinInstancesInService'] = '1'
500 policy['MaxBatchSize'] = '3'
501 policy['PauseTime'] = 'PT0S'
502 config = updt_template['Resources']['JobServerConfig']
503 config['Properties']['InstanceType'] = 'm1.tiny'
504
505 self.update_instance_group(updt_template,
506 num_updates_expected_on_updt=10,
507 num_creates_expected_on_updt=0,
508 num_deletes_expected_on_updt=0,
509 update_replace=False)
510
511 def test_instance_group_update_no_replace_with_adjusted_capacity(self):
Peter Razumovskyf0ac9582015-09-24 16:49:03 +0300512 """Test update only and no replace with capacity adjustment.
513
Angus Salkeld28339012015-01-20 19:15:37 +1000514 Test update only and no replace (i.e. updated instance flavor in
515 Launch Configuration) with capacity adjustment due to conflict in
516 batch size and minimum instances in service.
517 """
518 updt_template = self.ig_tmpl_with_updt_policy()
519 group = updt_template['Resources']['JobServerGroup']
520 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
521 policy['MinInstancesInService'] = '8'
522 policy['MaxBatchSize'] = '4'
523 policy['PauseTime'] = 'PT0S'
524 config = updt_template['Resources']['JobServerConfig']
525 config['Properties']['InstanceType'] = 'm1.tiny'
526
527 self.update_instance_group(updt_template,
528 num_updates_expected_on_updt=8,
529 num_creates_expected_on_updt=2,
530 num_deletes_expected_on_updt=2,
531 update_replace=False)
Angus Salkeldc85229b2015-02-09 10:58:04 +1000532
533
534class AutoScalingSignalTest(AutoscalingGroupTest):
535
536 template = '''
537{
538 "AWSTemplateFormatVersion" : "2010-09-09",
539 "Description" : "Template to create multiple instances.",
540 "Parameters" : {"size": {"Type": "String", "Default": "1"},
541 "AZ": {"Type": "String", "Default": "nova"},
542 "image": {"Type": "String"},
543 "flavor": {"Type": "String"}},
544 "Resources": {
545 "custom_lb": {
546 "Type": "AWS::EC2::Instance",
547 "Properties": {
548 "ImageId": {"Ref": "image"},
549 "InstanceType": {"Ref": "flavor"},
550 "UserData": "foo",
551 "SecurityGroups": [ "sg-1" ],
552 "Tags": []
553 },
554 "Metadata": {
555 "IPs": {"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}
556 }
557 },
558 "JobServerGroup": {
559 "Type" : "AWS::AutoScaling::AutoScalingGroup",
560 "Properties" : {
561 "AvailabilityZones" : [{"Ref": "AZ"}],
562 "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
563 "DesiredCapacity" : {"Ref": "size"},
564 "MinSize" : "0",
565 "MaxSize" : "20"
566 }
567 },
568 "JobServerConfig" : {
569 "Type" : "AWS::AutoScaling::LaunchConfiguration",
570 "Metadata": {"foo": "bar"},
571 "Properties": {
572 "ImageId" : {"Ref": "image"},
573 "InstanceType" : {"Ref": "flavor"},
574 "SecurityGroups" : [ "sg-1" ],
575 "UserData" : "jsconfig data"
576 }
577 },
578 "ScaleUpPolicy" : {
579 "Type" : "AWS::AutoScaling::ScalingPolicy",
580 "Properties" : {
581 "AdjustmentType" : "ChangeInCapacity",
582 "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
583 "Cooldown" : "0",
584 "ScalingAdjustment": "1"
585 }
586 },
587 "ScaleDownPolicy" : {
588 "Type" : "AWS::AutoScaling::ScalingPolicy",
589 "Properties" : {
590 "AdjustmentType" : "ChangeInCapacity",
591 "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
592 "Cooldown" : "0",
593 "ScalingAdjustment" : "-2"
594 }
595 }
596 },
597 "Outputs": {
598 "InstanceList": {"Value": {
599 "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}}
600 }
601}
602'''
603
604 lb_template = '''
605heat_template_version: 2013-05-23
606parameters:
607 ImageId: {type: string}
608 InstanceType: {type: string}
609 SecurityGroups: {type: comma_delimited_list}
610 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +1000611 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeldc85229b2015-02-09 10:58:04 +1000612
613resources:
614outputs:
615 PublicIp: {value: "not-used"}
616 AvailabilityZone: {value: 'not-used1'}
617 PrivateDnsName: {value: 'not-used2'}
618 PublicDnsName: {value: 'not-used3'}
619 PrivateIp: {value: 'not-used4'}
620
621'''
622
623 def setUp(self):
624 super(AutoScalingSignalTest, self).setUp()
625 self.build_timeout = self.conf.build_timeout
626 self.build_interval = self.conf.build_interval
627 self.files = {'provider.yaml': self.instance_template,
628 'lb.yaml': self.lb_template}
629 self.env = {'resource_registry':
630 {'resources':
631 {'custom_lb': {'AWS::EC2::Instance': 'lb.yaml'}},
632 'AWS::EC2::Instance': 'provider.yaml'},
633 'parameters': {'size': 2,
634 'image': self.conf.image_ref,
635 'flavor': self.conf.instance_type}}
636
637 def check_instance_count(self, stack_identifier, expected):
638 md = self.client.resources.metadata(stack_identifier, 'custom_lb')
639 actual_md = len(md['IPs'].split(','))
640 if actual_md != expected:
641 LOG.warn('check_instance_count exp:%d, meta:%s' % (expected,
642 md['IPs']))
643 return False
644
645 stack = self.client.stacks.get(stack_identifier)
646 inst_list = self._stack_output(stack, 'InstanceList')
647 actual = len(inst_list.split(','))
648 if actual != expected:
649 LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
650 inst_list))
651 return actual == expected
652
653 def test_scaling_meta_update(self):
654 """Use heatclient to signal the up and down policy.
655
656 Then confirm that the metadata in the custom_lb is updated each
657 time.
658 """
659 stack_identifier = self.stack_create(template=self.template,
660 files=self.files,
661 environment=self.env)
662
663 self.assertTrue(test.call_until_true(
664 self.build_timeout, self.build_interval,
665 self.check_instance_count, stack_identifier, 2))
666
667 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
668 'JobServerGroup')
669 # Scale up one, Trigger alarm
670 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
671 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
672 self.assertTrue(test.call_until_true(
673 self.build_timeout, self.build_interval,
674 self.check_instance_count, stack_identifier, 3))
675
676 # Scale down two, Trigger alarm
677 self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
678 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
679 self.assertTrue(test.call_until_true(
680 self.build_timeout, self.build_interval,
681 self.check_instance_count, stack_identifier, 1))
682
683 def test_signal_with_policy_update(self):
684 """Prove that an updated policy is used in the next signal."""
685
686 stack_identifier = self.stack_create(template=self.template,
687 files=self.files,
688 environment=self.env)
689
690 self.assertTrue(test.call_until_true(
691 self.build_timeout, self.build_interval,
692 self.check_instance_count, stack_identifier, 2))
693
694 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
695 'JobServerGroup')
696 # Scale up one, Trigger alarm
697 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
698 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
699 self.assertTrue(test.call_until_true(
700 self.build_timeout, self.build_interval,
701 self.check_instance_count, stack_identifier, 3))
702
703 # increase the adjustment to "+2" and remove the DesiredCapacity
704 # so we don't go from 3 to 2.
705 new_template = self.template.replace(
706 '"ScalingAdjustment": "1"',
707 '"ScalingAdjustment": "2"').replace(
708 '"DesiredCapacity" : {"Ref": "size"},', '')
709
710 self.update_stack(stack_identifier, template=new_template,
711 environment=self.env, files=self.files)
712
713 # Scale up two, Trigger alarm
714 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
715 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
716 self.assertTrue(test.call_until_true(
717 self.build_timeout, self.build_interval,
718 self.check_instance_count, stack_identifier, 5))
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000719
720 def test_signal_during_suspend(self):
721 """Prove that a signal will fail when the stack is in suspend."""
722
723 stack_identifier = self.stack_create(template=self.template,
724 files=self.files,
725 environment=self.env)
726
727 self.assertTrue(test.call_until_true(
728 self.build_timeout, self.build_interval,
729 self.check_instance_count, stack_identifier, 2))
730
731 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
732 'JobServerGroup')
733
734 # suspend the top level stack.
735 self.client.actions.suspend(stack_id=stack_identifier)
736 self._wait_for_resource_status(
737 stack_identifier, 'JobServerGroup', 'SUSPEND_COMPLETE')
738
huangtianhuaf71ae072015-07-09 09:15:10 +0800739 # Send a signal and a exception will raise
740 ex = self.assertRaises(exc.BadRequest,
741 self.client.resources.signal,
742 stack_identifier, 'ScaleUpPolicy')
743
744 error_msg = 'Signal resource during SUSPEND is not supported'
745 self.assertIn(error_msg, six.text_type(ex))
Angus Salkeldf1324492015-04-02 09:14:39 +1000746 ev = self.wait_for_event_with_reason(
747 stack_identifier,
748 reason='Cannot signal resource during SUSPEND',
749 rsrc_name='ScaleUpPolicy')
750 self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)
751
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000752 # still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
753 self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
Pavlo Shchelokovskyy28ac2c02015-04-06 10:22:35 +0000754 self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000755 # still 2 instances.
756 self.assertTrue(test.call_until_true(
757 self.build_timeout, self.build_interval,
758 self.check_instance_count, stack_identifier, 2))