blob: 301981fa81aced5867b229a90970298ebdc7bf57 [file] [log] [blame]
Angus Salkeld28339012015-01-20 19:15:37 +10001# Licensed under the Apache License, Version 2.0 (the "License"); you may
2# not use this file except in compliance with the License. You may obtain
3# a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10# License for the specific language governing permissions and limitations
11# under the License.
12
13import copy
14import json
Angus Salkeld28339012015-01-20 19:15:37 +100015
Steve Baker24641292015-03-13 10:47:50 +130016from oslo_log import log as logging
Angus Salkeld28339012015-01-20 19:15:37 +100017from testtools import matchers
18
19from heat_integrationtests.common import test
20
21
22LOG = logging.getLogger(__name__)
23
24
25class AutoscalingGroupTest(test.HeatIntegrationTest):
26
27 template = '''
28{
29 "AWSTemplateFormatVersion" : "2010-09-09",
30 "Description" : "Template to create multiple instances.",
31 "Parameters" : {"size": {"Type": "String", "Default": "1"},
32 "AZ": {"Type": "String", "Default": "nova"},
33 "image": {"Type": "String"},
34 "flavor": {"Type": "String"}},
35 "Resources": {
36 "JobServerGroup": {
37 "Type" : "AWS::AutoScaling::AutoScalingGroup",
38 "Properties" : {
39 "AvailabilityZones" : [{"Ref": "AZ"}],
40 "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
41 "MinSize" : {"Ref": "size"},
42 "MaxSize" : "20"
43 }
44 },
45
46 "JobServerConfig" : {
47 "Type" : "AWS::AutoScaling::LaunchConfiguration",
48 "Metadata": {"foo": "bar"},
49 "Properties": {
50 "ImageId" : {"Ref": "image"},
51 "InstanceType" : {"Ref": "flavor"},
52 "SecurityGroups" : [ "sg-1" ],
53 "UserData" : "jsconfig data"
54 }
55 }
56 },
57 "Outputs": {
58 "InstanceList": {"Value": {
59 "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}},
60 "JobServerConfigRef": {"Value": {
61 "Ref": "JobServerConfig"}}
62 }
63}
64'''
65
66 instance_template = '''
67heat_template_version: 2013-05-23
68parameters:
69 ImageId: {type: string}
70 InstanceType: {type: string}
71 SecurityGroups: {type: comma_delimited_list}
72 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +100073 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeld28339012015-01-20 19:15:37 +100074
75resources:
76 random1:
77 type: OS::Heat::RandomString
78 properties:
79 salt: {get_param: ImageId}
80outputs:
Angus Salkeldc85229b2015-02-09 10:58:04 +100081 PublicIp: {value: {get_attr: [random1, value]}}
82 AvailabilityZone: {value: 'not-used11'}
83 PrivateDnsName: {value: 'not-used12'}
84 PublicDnsName: {value: 'not-used13'}
85 PrivateIp: {value: 'not-used14'}
Angus Salkeld28339012015-01-20 19:15:37 +100086'''
87
88 # This is designed to fail.
89 bad_instance_template = '''
90heat_template_version: 2013-05-23
91parameters:
92 ImageId: {type: string}
93 InstanceType: {type: string}
94 SecurityGroups: {type: comma_delimited_list}
95 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +100096 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeld28339012015-01-20 19:15:37 +100097
98resources:
99 random1:
100 type: OS::Heat::RandomString
101 depends_on: waiter
102 ready_poster:
103 type: AWS::CloudFormation::WaitConditionHandle
104 waiter:
105 type: AWS::CloudFormation::WaitCondition
106 properties:
107 Handle: {Ref: ready_poster}
108 Timeout: 1
109outputs:
110 PublicIp:
111 value: {get_attr: [random1, value]}
112'''
113
114 def setUp(self):
115 super(AutoscalingGroupTest, self).setUp()
116 self.client = self.orchestration_client
117 if not self.conf.image_ref:
118 raise self.skipException("No image configured to test")
119 if not self.conf.minimal_image_ref:
120 raise self.skipException("No minimal image configured to test")
121 if not self.conf.instance_type:
122 raise self.skipException("No flavor configured to test")
123
124 def assert_instance_count(self, stack, expected_count):
125 inst_list = self._stack_output(stack, 'InstanceList')
126 self.assertEqual(expected_count, len(inst_list.split(',')))
127
128 def _assert_instance_state(self, nested_identifier,
129 num_complete, num_failed):
130 for res in self.client.resources.list(nested_identifier):
131 if 'COMPLETE' in res.resource_status:
132 num_complete = num_complete - 1
133 elif 'FAILED' in res.resource_status:
134 num_failed = num_failed - 1
135 self.assertEqual(0, num_failed)
136 self.assertEqual(0, num_complete)
137
138
139class AutoscalingGroupBasicTest(AutoscalingGroupTest):
140
141 def test_basic_create_works(self):
142 """Make sure the working case is good.
143
144 Note this combines test_override_aws_ec2_instance into this test as
145 well, which is:
146 If AWS::EC2::Instance is overridden, AutoScalingGroup will
147 automatically use that overridden resource type.
148 """
149
150 files = {'provider.yaml': self.instance_template}
151 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
152 'parameters': {'size': 4,
153 'image': self.conf.image_ref,
154 'flavor': self.conf.instance_type}}
155 stack_identifier = self.stack_create(template=self.template,
156 files=files, environment=env)
157 initial_resources = {
158 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
159 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
160 self.assertEqual(initial_resources,
161 self.list_resources(stack_identifier))
162
163 stack = self.client.stacks.get(stack_identifier)
164 self.assert_instance_count(stack, 4)
165
166 def test_size_updates_work(self):
167 files = {'provider.yaml': self.instance_template}
168 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
169 'parameters': {'size': 2,
170 'image': self.conf.image_ref,
171 'flavor': self.conf.instance_type}}
172
173 stack_identifier = self.stack_create(template=self.template,
174 files=files,
175 environment=env)
176 stack = self.client.stacks.get(stack_identifier)
177 self.assert_instance_count(stack, 2)
178
179 # Increase min size to 5
180 env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
181 'parameters': {'size': 5,
182 'image': self.conf.image_ref,
183 'flavor': self.conf.instance_type}}
184 self.update_stack(stack_identifier, self.template,
185 environment=env2, files=files)
Angus Salkeld28339012015-01-20 19:15:37 +1000186 stack = self.client.stacks.get(stack_identifier)
187 self.assert_instance_count(stack, 5)
188
189 def test_update_group_replace(self):
190 """Make sure that during a group update the non updatable
191 properties cause a replacement.
192 """
193 files = {'provider.yaml': self.instance_template}
194 env = {'resource_registry':
195 {'AWS::EC2::Instance': 'provider.yaml'},
196 'parameters': {'size': '1',
197 'image': self.conf.image_ref,
198 'flavor': self.conf.instance_type}}
199
200 stack_identifier = self.stack_create(template=self.template,
201 files=files,
202 environment=env)
203 rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
204 orig_asg_id = rsrc.physical_resource_id
205
206 env2 = {'resource_registry':
207 {'AWS::EC2::Instance': 'provider.yaml'},
208 'parameters': {'size': '1',
209 'AZ': 'wibble',
210 'image': self.conf.image_ref,
211 'flavor': self.conf.instance_type}}
212 self.update_stack(stack_identifier, self.template,
213 environment=env2, files=files)
214
215 # replacement will cause the resource physical_resource_id to change.
216 rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
217 self.assertNotEqual(orig_asg_id, rsrc.physical_resource_id)
218
219 def test_create_instance_error_causes_group_error(self):
220 """If a resource in an instance group fails to be created, the instance
221 group itself will fail and the broken inner resource will remain.
222 """
223 stack_name = self._stack_rand_name()
224 files = {'provider.yaml': self.bad_instance_template}
225 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
226 'parameters': {'size': 2,
227 'image': self.conf.image_ref,
228 'flavor': self.conf.instance_type}}
229
230 self.client.stacks.create(
231 stack_name=stack_name,
232 template=self.template,
233 files=files,
234 disable_rollback=True,
235 parameters={},
236 environment=env
237 )
238 self.addCleanup(self.client.stacks.delete, stack_name)
239 stack = self.client.stacks.get(stack_name)
240 stack_identifier = '%s/%s' % (stack_name, stack.id)
241 self._wait_for_stack_status(stack_identifier, 'CREATE_FAILED')
242 initial_resources = {
243 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
244 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
245 self.assertEqual(initial_resources,
246 self.list_resources(stack_identifier))
247
248 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
249 'JobServerGroup')
250 self._assert_instance_state(nested_ident, 0, 2)
251
252 def test_update_instance_error_causes_group_error(self):
253 """If a resource in an instance group fails to be created during an
254 update, the instance group itself will fail and the broken inner
255 resource will remain.
256 """
257 files = {'provider.yaml': self.instance_template}
258 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
259 'parameters': {'size': 2,
260 'image': self.conf.image_ref,
261 'flavor': self.conf.instance_type}}
262
263 stack_identifier = self.stack_create(template=self.template,
264 files=files,
265 environment=env)
266 initial_resources = {
267 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
268 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
269 self.assertEqual(initial_resources,
270 self.list_resources(stack_identifier))
271
272 stack = self.client.stacks.get(stack_identifier)
273 self.assert_instance_count(stack, 2)
274 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
275 'JobServerGroup')
276 self._assert_instance_state(nested_ident, 2, 0)
Angus Salkeldd4b6bc02015-02-04 16:48:45 +1000277 initial_list = [res.resource_name
278 for res in self.client.resources.list(nested_ident)]
Angus Salkeld28339012015-01-20 19:15:37 +1000279
280 env['parameters']['size'] = 3
281 files2 = {'provider.yaml': self.bad_instance_template}
282 self.client.stacks.update(
283 stack_id=stack_identifier,
284 template=self.template,
285 files=files2,
286 disable_rollback=True,
287 parameters={},
288 environment=env
289 )
290 self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
291
292 # assert that there are 3 bad instances
293 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
294 'JobServerGroup')
Angus Salkeldd4b6bc02015-02-04 16:48:45 +1000295
296 # 2 resources should be in update failed, and one create failed.
297 for res in self.client.resources.list(nested_ident):
298 if res.resource_name in initial_list:
299 self._wait_for_resource_status(nested_ident,
300 res.resource_name,
301 'UPDATE_FAILED')
302 else:
303 self._wait_for_resource_status(nested_ident,
304 res.resource_name,
305 'CREATE_FAILED')
Angus Salkeld28339012015-01-20 19:15:37 +1000306
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000307 def test_group_suspend_resume(self):
308
309 files = {'provider.yaml': self.instance_template}
310 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
311 'parameters': {'size': 4,
312 'image': self.conf.image_ref,
313 'flavor': self.conf.instance_type}}
314 stack_identifier = self.stack_create(template=self.template,
315 files=files, environment=env)
316
317 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
318 'JobServerGroup')
319
Angus Salkelda7500d12015-04-10 15:44:07 +1000320 self.stack_suspend(stack_identifier)
321 self._wait_for_all_resource_status(nested_ident, 'SUSPEND_COMPLETE')
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000322
Angus Salkelda7500d12015-04-10 15:44:07 +1000323 self.stack_resume(stack_identifier)
324 self._wait_for_all_resource_status(nested_ident, 'RESUME_COMPLETE')
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000325
Angus Salkeld28339012015-01-20 19:15:37 +1000326
327class AutoscalingGroupUpdatePolicyTest(AutoscalingGroupTest):
328
329 def ig_tmpl_with_updt_policy(self):
330 templ = json.loads(copy.deepcopy(self.template))
331 up = {"AutoScalingRollingUpdate": {
332 "MinInstancesInService": "1",
333 "MaxBatchSize": "2",
334 "PauseTime": "PT1S"}}
335 templ['Resources']['JobServerGroup']['UpdatePolicy'] = up
336 return templ
337
338 def update_instance_group(self, updt_template,
339 num_updates_expected_on_updt,
340 num_creates_expected_on_updt,
341 num_deletes_expected_on_updt,
342 update_replace):
343
344 # setup stack from the initial template
345 files = {'provider.yaml': self.instance_template}
346 size = 10
347 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
348 'parameters': {'size': size,
349 'image': self.conf.image_ref,
350 'flavor': self.conf.instance_type}}
351 stack_name = self._stack_rand_name()
352 stack_identifier = self.stack_create(
353 stack_name=stack_name,
354 template=self.ig_tmpl_with_updt_policy(),
355 files=files,
356 environment=env)
357 stack = self.client.stacks.get(stack_identifier)
358 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
359 'JobServerGroup')
360
361 # test that physical resource name of launch configuration is used
362 conf_name = self._stack_output(stack, 'JobServerConfigRef')
363 conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack_name
364 self.assertThat(conf_name,
365 matchers.MatchesRegex(conf_name_pattern))
366
367 # test the number of instances created
368 self.assert_instance_count(stack, size)
369 # saves info from initial list of instances for comparison later
370 init_instances = self.client.resources.list(nested_ident)
371 init_names = [inst.resource_name for inst in init_instances]
372
373 # test stack update
374 self.update_stack(stack_identifier, updt_template,
375 environment=env, files=files)
Angus Salkeld28339012015-01-20 19:15:37 +1000376 updt_stack = self.client.stacks.get(stack_identifier)
377
378 # test that the launch configuration is replaced
379 updt_conf_name = self._stack_output(updt_stack, 'JobServerConfigRef')
380 self.assertThat(updt_conf_name,
381 matchers.MatchesRegex(conf_name_pattern))
382 self.assertNotEqual(conf_name, updt_conf_name)
383
384 # test that the group size are the same
385 updt_instances = self.client.resources.list(nested_ident)
386 updt_names = [inst.resource_name for inst in updt_instances]
387 self.assertEqual(len(init_names), len(updt_names))
388 for res in updt_instances:
389 self.assertEqual('UPDATE_COMPLETE', res.resource_status)
390
391 # test that the appropriate number of instance names are the same
392 matched_names = set(updt_names) & set(init_names)
393 self.assertEqual(num_updates_expected_on_updt, len(matched_names))
394
395 # test that the appropriate number of new instances are created
396 self.assertEqual(num_creates_expected_on_updt,
397 len(set(updt_names) - set(init_names)))
398
399 # test that the appropriate number of instances are deleted
400 self.assertEqual(num_deletes_expected_on_updt,
401 len(set(init_names) - set(updt_names)))
402
403 # test that the older instances are the ones being deleted
404 if num_deletes_expected_on_updt > 0:
405 deletes_expected = init_names[:num_deletes_expected_on_updt]
406 self.assertNotIn(deletes_expected, updt_names)
407
408 def test_instance_group_update_replace(self):
409 """
410 Test simple update replace with no conflict in batch size and
411 minimum instances in service.
412 """
413 updt_template = self.ig_tmpl_with_updt_policy()
414 grp = updt_template['Resources']['JobServerGroup']
415 policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
416 policy['MinInstancesInService'] = '1'
417 policy['MaxBatchSize'] = '3'
418 config = updt_template['Resources']['JobServerConfig']
419 config['Properties']['ImageId'] = self.conf.minimal_image_ref
420
421 self.update_instance_group(updt_template,
422 num_updates_expected_on_updt=10,
423 num_creates_expected_on_updt=0,
424 num_deletes_expected_on_updt=0,
425 update_replace=True)
426
427 def test_instance_group_update_replace_with_adjusted_capacity(self):
428 """
429 Test update replace with capacity adjustment due to conflict in
430 batch size and minimum instances in service.
431 """
432 updt_template = self.ig_tmpl_with_updt_policy()
433 grp = updt_template['Resources']['JobServerGroup']
434 policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
435 policy['MinInstancesInService'] = '8'
436 policy['MaxBatchSize'] = '4'
437 config = updt_template['Resources']['JobServerConfig']
438 config['Properties']['ImageId'] = self.conf.minimal_image_ref
439
440 self.update_instance_group(updt_template,
441 num_updates_expected_on_updt=8,
442 num_creates_expected_on_updt=2,
443 num_deletes_expected_on_updt=2,
444 update_replace=True)
445
446 def test_instance_group_update_replace_huge_batch_size(self):
447 """
448 Test update replace with a huge batch size.
449 """
450 updt_template = self.ig_tmpl_with_updt_policy()
451 group = updt_template['Resources']['JobServerGroup']
452 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
453 policy['MinInstancesInService'] = '0'
454 policy['MaxBatchSize'] = '20'
455 config = updt_template['Resources']['JobServerConfig']
456 config['Properties']['ImageId'] = self.conf.minimal_image_ref
457
458 self.update_instance_group(updt_template,
459 num_updates_expected_on_updt=10,
460 num_creates_expected_on_updt=0,
461 num_deletes_expected_on_updt=0,
462 update_replace=True)
463
464 def test_instance_group_update_replace_huge_min_in_service(self):
465 """
466 Test update replace with a huge number of minimum instances in service.
467 """
468 updt_template = self.ig_tmpl_with_updt_policy()
469 group = updt_template['Resources']['JobServerGroup']
470 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
471 policy['MinInstancesInService'] = '20'
472 policy['MaxBatchSize'] = '1'
473 policy['PauseTime'] = 'PT0S'
474 config = updt_template['Resources']['JobServerConfig']
475 config['Properties']['ImageId'] = self.conf.minimal_image_ref
476
477 self.update_instance_group(updt_template,
478 num_updates_expected_on_updt=9,
479 num_creates_expected_on_updt=1,
480 num_deletes_expected_on_updt=1,
481 update_replace=True)
482
483 def test_instance_group_update_no_replace(self):
484 """
485 Test simple update only and no replace (i.e. updated instance flavor
486 in Launch Configuration) with no conflict in batch size and
487 minimum instances in service.
488 """
489 updt_template = self.ig_tmpl_with_updt_policy()
490 group = updt_template['Resources']['JobServerGroup']
491 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
492 policy['MinInstancesInService'] = '1'
493 policy['MaxBatchSize'] = '3'
494 policy['PauseTime'] = 'PT0S'
495 config = updt_template['Resources']['JobServerConfig']
496 config['Properties']['InstanceType'] = 'm1.tiny'
497
498 self.update_instance_group(updt_template,
499 num_updates_expected_on_updt=10,
500 num_creates_expected_on_updt=0,
501 num_deletes_expected_on_updt=0,
502 update_replace=False)
503
504 def test_instance_group_update_no_replace_with_adjusted_capacity(self):
505 """
506 Test update only and no replace (i.e. updated instance flavor in
507 Launch Configuration) with capacity adjustment due to conflict in
508 batch size and minimum instances in service.
509 """
510 updt_template = self.ig_tmpl_with_updt_policy()
511 group = updt_template['Resources']['JobServerGroup']
512 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
513 policy['MinInstancesInService'] = '8'
514 policy['MaxBatchSize'] = '4'
515 policy['PauseTime'] = 'PT0S'
516 config = updt_template['Resources']['JobServerConfig']
517 config['Properties']['InstanceType'] = 'm1.tiny'
518
519 self.update_instance_group(updt_template,
520 num_updates_expected_on_updt=8,
521 num_creates_expected_on_updt=2,
522 num_deletes_expected_on_updt=2,
523 update_replace=False)
Angus Salkeldc85229b2015-02-09 10:58:04 +1000524
525
526class AutoScalingSignalTest(AutoscalingGroupTest):
527
528 template = '''
529{
530 "AWSTemplateFormatVersion" : "2010-09-09",
531 "Description" : "Template to create multiple instances.",
532 "Parameters" : {"size": {"Type": "String", "Default": "1"},
533 "AZ": {"Type": "String", "Default": "nova"},
534 "image": {"Type": "String"},
535 "flavor": {"Type": "String"}},
536 "Resources": {
537 "custom_lb": {
538 "Type": "AWS::EC2::Instance",
539 "Properties": {
540 "ImageId": {"Ref": "image"},
541 "InstanceType": {"Ref": "flavor"},
542 "UserData": "foo",
543 "SecurityGroups": [ "sg-1" ],
544 "Tags": []
545 },
546 "Metadata": {
547 "IPs": {"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}
548 }
549 },
550 "JobServerGroup": {
551 "Type" : "AWS::AutoScaling::AutoScalingGroup",
552 "Properties" : {
553 "AvailabilityZones" : [{"Ref": "AZ"}],
554 "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
555 "DesiredCapacity" : {"Ref": "size"},
556 "MinSize" : "0",
557 "MaxSize" : "20"
558 }
559 },
560 "JobServerConfig" : {
561 "Type" : "AWS::AutoScaling::LaunchConfiguration",
562 "Metadata": {"foo": "bar"},
563 "Properties": {
564 "ImageId" : {"Ref": "image"},
565 "InstanceType" : {"Ref": "flavor"},
566 "SecurityGroups" : [ "sg-1" ],
567 "UserData" : "jsconfig data"
568 }
569 },
570 "ScaleUpPolicy" : {
571 "Type" : "AWS::AutoScaling::ScalingPolicy",
572 "Properties" : {
573 "AdjustmentType" : "ChangeInCapacity",
574 "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
575 "Cooldown" : "0",
576 "ScalingAdjustment": "1"
577 }
578 },
579 "ScaleDownPolicy" : {
580 "Type" : "AWS::AutoScaling::ScalingPolicy",
581 "Properties" : {
582 "AdjustmentType" : "ChangeInCapacity",
583 "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
584 "Cooldown" : "0",
585 "ScalingAdjustment" : "-2"
586 }
587 }
588 },
589 "Outputs": {
590 "InstanceList": {"Value": {
591 "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}}
592 }
593}
594'''
595
596 lb_template = '''
597heat_template_version: 2013-05-23
598parameters:
599 ImageId: {type: string}
600 InstanceType: {type: string}
601 SecurityGroups: {type: comma_delimited_list}
602 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +1000603 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeldc85229b2015-02-09 10:58:04 +1000604
605resources:
606outputs:
607 PublicIp: {value: "not-used"}
608 AvailabilityZone: {value: 'not-used1'}
609 PrivateDnsName: {value: 'not-used2'}
610 PublicDnsName: {value: 'not-used3'}
611 PrivateIp: {value: 'not-used4'}
612
613'''
614
615 def setUp(self):
616 super(AutoScalingSignalTest, self).setUp()
617 self.build_timeout = self.conf.build_timeout
618 self.build_interval = self.conf.build_interval
619 self.files = {'provider.yaml': self.instance_template,
620 'lb.yaml': self.lb_template}
621 self.env = {'resource_registry':
622 {'resources':
623 {'custom_lb': {'AWS::EC2::Instance': 'lb.yaml'}},
624 'AWS::EC2::Instance': 'provider.yaml'},
625 'parameters': {'size': 2,
626 'image': self.conf.image_ref,
627 'flavor': self.conf.instance_type}}
628
629 def check_instance_count(self, stack_identifier, expected):
630 md = self.client.resources.metadata(stack_identifier, 'custom_lb')
631 actual_md = len(md['IPs'].split(','))
632 if actual_md != expected:
633 LOG.warn('check_instance_count exp:%d, meta:%s' % (expected,
634 md['IPs']))
635 return False
636
637 stack = self.client.stacks.get(stack_identifier)
638 inst_list = self._stack_output(stack, 'InstanceList')
639 actual = len(inst_list.split(','))
640 if actual != expected:
641 LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
642 inst_list))
643 return actual == expected
644
645 def test_scaling_meta_update(self):
646 """Use heatclient to signal the up and down policy.
647
648 Then confirm that the metadata in the custom_lb is updated each
649 time.
650 """
651 stack_identifier = self.stack_create(template=self.template,
652 files=self.files,
653 environment=self.env)
654
655 self.assertTrue(test.call_until_true(
656 self.build_timeout, self.build_interval,
657 self.check_instance_count, stack_identifier, 2))
658
659 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
660 'JobServerGroup')
661 # Scale up one, Trigger alarm
662 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
663 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
664 self.assertTrue(test.call_until_true(
665 self.build_timeout, self.build_interval,
666 self.check_instance_count, stack_identifier, 3))
667
668 # Scale down two, Trigger alarm
669 self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
670 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
671 self.assertTrue(test.call_until_true(
672 self.build_timeout, self.build_interval,
673 self.check_instance_count, stack_identifier, 1))
674
675 def test_signal_with_policy_update(self):
676 """Prove that an updated policy is used in the next signal."""
677
678 stack_identifier = self.stack_create(template=self.template,
679 files=self.files,
680 environment=self.env)
681
682 self.assertTrue(test.call_until_true(
683 self.build_timeout, self.build_interval,
684 self.check_instance_count, stack_identifier, 2))
685
686 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
687 'JobServerGroup')
688 # Scale up one, Trigger alarm
689 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
690 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
691 self.assertTrue(test.call_until_true(
692 self.build_timeout, self.build_interval,
693 self.check_instance_count, stack_identifier, 3))
694
695 # increase the adjustment to "+2" and remove the DesiredCapacity
696 # so we don't go from 3 to 2.
697 new_template = self.template.replace(
698 '"ScalingAdjustment": "1"',
699 '"ScalingAdjustment": "2"').replace(
700 '"DesiredCapacity" : {"Ref": "size"},', '')
701
702 self.update_stack(stack_identifier, template=new_template,
703 environment=self.env, files=self.files)
704
705 # Scale up two, Trigger alarm
706 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
707 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
708 self.assertTrue(test.call_until_true(
709 self.build_timeout, self.build_interval,
710 self.check_instance_count, stack_identifier, 5))
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000711
712 def test_signal_during_suspend(self):
713 """Prove that a signal will fail when the stack is in suspend."""
714
715 stack_identifier = self.stack_create(template=self.template,
716 files=self.files,
717 environment=self.env)
718
719 self.assertTrue(test.call_until_true(
720 self.build_timeout, self.build_interval,
721 self.check_instance_count, stack_identifier, 2))
722
723 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
724 'JobServerGroup')
725
726 # suspend the top level stack.
727 self.client.actions.suspend(stack_id=stack_identifier)
728 self._wait_for_resource_status(
729 stack_identifier, 'JobServerGroup', 'SUSPEND_COMPLETE')
730
731 # Send a signal and confirm nothing happened.
732 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
Angus Salkeldf1324492015-04-02 09:14:39 +1000733 ev = self.wait_for_event_with_reason(
734 stack_identifier,
735 reason='Cannot signal resource during SUSPEND',
736 rsrc_name='ScaleUpPolicy')
737 self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)
738
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000739 # still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
740 self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
Pavlo Shchelokovskyy28ac2c02015-04-06 10:22:35 +0000741 self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000742 # still 2 instances.
743 self.assertTrue(test.call_until_true(
744 self.build_timeout, self.build_interval,
745 self.check_instance_count, stack_identifier, 2))