blob: 15b318bdace8f87977a0eba4d50b9ea09e5b371b [file] [log] [blame]
Angus Salkeld28339012015-01-20 19:15:37 +10001# Licensed under the Apache License, Version 2.0 (the "License"); you may
2# not use this file except in compliance with the License. You may obtain
3# a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10# License for the specific language governing permissions and limitations
11# under the License.
12
13import copy
14import json
15import logging
16
17from testtools import matchers
18
19from heat_integrationtests.common import test
20
21
22LOG = logging.getLogger(__name__)
23
24
25class AutoscalingGroupTest(test.HeatIntegrationTest):
26
27 template = '''
28{
29 "AWSTemplateFormatVersion" : "2010-09-09",
30 "Description" : "Template to create multiple instances.",
31 "Parameters" : {"size": {"Type": "String", "Default": "1"},
32 "AZ": {"Type": "String", "Default": "nova"},
33 "image": {"Type": "String"},
34 "flavor": {"Type": "String"}},
35 "Resources": {
36 "JobServerGroup": {
37 "Type" : "AWS::AutoScaling::AutoScalingGroup",
38 "Properties" : {
39 "AvailabilityZones" : [{"Ref": "AZ"}],
40 "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
41 "MinSize" : {"Ref": "size"},
42 "MaxSize" : "20"
43 }
44 },
45
46 "JobServerConfig" : {
47 "Type" : "AWS::AutoScaling::LaunchConfiguration",
48 "Metadata": {"foo": "bar"},
49 "Properties": {
50 "ImageId" : {"Ref": "image"},
51 "InstanceType" : {"Ref": "flavor"},
52 "SecurityGroups" : [ "sg-1" ],
53 "UserData" : "jsconfig data"
54 }
55 }
56 },
57 "Outputs": {
58 "InstanceList": {"Value": {
59 "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}},
60 "JobServerConfigRef": {"Value": {
61 "Ref": "JobServerConfig"}}
62 }
63}
64'''
65
66 instance_template = '''
67heat_template_version: 2013-05-23
68parameters:
69 ImageId: {type: string}
70 InstanceType: {type: string}
71 SecurityGroups: {type: comma_delimited_list}
72 UserData: {type: string}
73 Tags: {type: comma_delimited_list}
74
75resources:
76 random1:
77 type: OS::Heat::RandomString
78 properties:
79 salt: {get_param: ImageId}
80outputs:
Angus Salkeldc85229b2015-02-09 10:58:04 +100081 PublicIp: {value: {get_attr: [random1, value]}}
82 AvailabilityZone: {value: 'not-used11'}
83 PrivateDnsName: {value: 'not-used12'}
84 PublicDnsName: {value: 'not-used13'}
85 PrivateIp: {value: 'not-used14'}
Angus Salkeld28339012015-01-20 19:15:37 +100086'''
87
88 # This is designed to fail.
89 bad_instance_template = '''
90heat_template_version: 2013-05-23
91parameters:
92 ImageId: {type: string}
93 InstanceType: {type: string}
94 SecurityGroups: {type: comma_delimited_list}
95 UserData: {type: string}
96 Tags: {type: comma_delimited_list}
97
98resources:
99 random1:
100 type: OS::Heat::RandomString
101 depends_on: waiter
102 ready_poster:
103 type: AWS::CloudFormation::WaitConditionHandle
104 waiter:
105 type: AWS::CloudFormation::WaitCondition
106 properties:
107 Handle: {Ref: ready_poster}
108 Timeout: 1
109outputs:
110 PublicIp:
111 value: {get_attr: [random1, value]}
112'''
113
114 def setUp(self):
115 super(AutoscalingGroupTest, self).setUp()
116 self.client = self.orchestration_client
117 if not self.conf.image_ref:
118 raise self.skipException("No image configured to test")
119 if not self.conf.minimal_image_ref:
120 raise self.skipException("No minimal image configured to test")
121 if not self.conf.instance_type:
122 raise self.skipException("No flavor configured to test")
123
124 def assert_instance_count(self, stack, expected_count):
125 inst_list = self._stack_output(stack, 'InstanceList')
126 self.assertEqual(expected_count, len(inst_list.split(',')))
127
128 def _assert_instance_state(self, nested_identifier,
129 num_complete, num_failed):
130 for res in self.client.resources.list(nested_identifier):
131 if 'COMPLETE' in res.resource_status:
132 num_complete = num_complete - 1
133 elif 'FAILED' in res.resource_status:
134 num_failed = num_failed - 1
135 self.assertEqual(0, num_failed)
136 self.assertEqual(0, num_complete)
137
138
139class AutoscalingGroupBasicTest(AutoscalingGroupTest):
140
141 def test_basic_create_works(self):
142 """Make sure the working case is good.
143
144 Note this combines test_override_aws_ec2_instance into this test as
145 well, which is:
146 If AWS::EC2::Instance is overridden, AutoScalingGroup will
147 automatically use that overridden resource type.
148 """
149
150 files = {'provider.yaml': self.instance_template}
151 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
152 'parameters': {'size': 4,
153 'image': self.conf.image_ref,
154 'flavor': self.conf.instance_type}}
155 stack_identifier = self.stack_create(template=self.template,
156 files=files, environment=env)
157 initial_resources = {
158 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
159 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
160 self.assertEqual(initial_resources,
161 self.list_resources(stack_identifier))
162
163 stack = self.client.stacks.get(stack_identifier)
164 self.assert_instance_count(stack, 4)
165
166 def test_size_updates_work(self):
167 files = {'provider.yaml': self.instance_template}
168 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
169 'parameters': {'size': 2,
170 'image': self.conf.image_ref,
171 'flavor': self.conf.instance_type}}
172
173 stack_identifier = self.stack_create(template=self.template,
174 files=files,
175 environment=env)
176 stack = self.client.stacks.get(stack_identifier)
177 self.assert_instance_count(stack, 2)
178
179 # Increase min size to 5
180 env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
181 'parameters': {'size': 5,
182 'image': self.conf.image_ref,
183 'flavor': self.conf.instance_type}}
184 self.update_stack(stack_identifier, self.template,
185 environment=env2, files=files)
186 self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
187 stack = self.client.stacks.get(stack_identifier)
188 self.assert_instance_count(stack, 5)
189
190 def test_update_group_replace(self):
191 """Make sure that during a group update the non updatable
192 properties cause a replacement.
193 """
194 files = {'provider.yaml': self.instance_template}
195 env = {'resource_registry':
196 {'AWS::EC2::Instance': 'provider.yaml'},
197 'parameters': {'size': '1',
198 'image': self.conf.image_ref,
199 'flavor': self.conf.instance_type}}
200
201 stack_identifier = self.stack_create(template=self.template,
202 files=files,
203 environment=env)
204 rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
205 orig_asg_id = rsrc.physical_resource_id
206
207 env2 = {'resource_registry':
208 {'AWS::EC2::Instance': 'provider.yaml'},
209 'parameters': {'size': '1',
210 'AZ': 'wibble',
211 'image': self.conf.image_ref,
212 'flavor': self.conf.instance_type}}
213 self.update_stack(stack_identifier, self.template,
214 environment=env2, files=files)
215
216 # replacement will cause the resource physical_resource_id to change.
217 rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
218 self.assertNotEqual(orig_asg_id, rsrc.physical_resource_id)
219
220 def test_create_instance_error_causes_group_error(self):
221 """If a resource in an instance group fails to be created, the instance
222 group itself will fail and the broken inner resource will remain.
223 """
224 stack_name = self._stack_rand_name()
225 files = {'provider.yaml': self.bad_instance_template}
226 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
227 'parameters': {'size': 2,
228 'image': self.conf.image_ref,
229 'flavor': self.conf.instance_type}}
230
231 self.client.stacks.create(
232 stack_name=stack_name,
233 template=self.template,
234 files=files,
235 disable_rollback=True,
236 parameters={},
237 environment=env
238 )
239 self.addCleanup(self.client.stacks.delete, stack_name)
240 stack = self.client.stacks.get(stack_name)
241 stack_identifier = '%s/%s' % (stack_name, stack.id)
242 self._wait_for_stack_status(stack_identifier, 'CREATE_FAILED')
243 initial_resources = {
244 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
245 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
246 self.assertEqual(initial_resources,
247 self.list_resources(stack_identifier))
248
249 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
250 'JobServerGroup')
251 self._assert_instance_state(nested_ident, 0, 2)
252
253 def test_update_instance_error_causes_group_error(self):
254 """If a resource in an instance group fails to be created during an
255 update, the instance group itself will fail and the broken inner
256 resource will remain.
257 """
258 files = {'provider.yaml': self.instance_template}
259 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
260 'parameters': {'size': 2,
261 'image': self.conf.image_ref,
262 'flavor': self.conf.instance_type}}
263
264 stack_identifier = self.stack_create(template=self.template,
265 files=files,
266 environment=env)
267 initial_resources = {
268 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
269 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
270 self.assertEqual(initial_resources,
271 self.list_resources(stack_identifier))
272
273 stack = self.client.stacks.get(stack_identifier)
274 self.assert_instance_count(stack, 2)
275 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
276 'JobServerGroup')
277 self._assert_instance_state(nested_ident, 2, 0)
Angus Salkeldd4b6bc02015-02-04 16:48:45 +1000278 initial_list = [res.resource_name
279 for res in self.client.resources.list(nested_ident)]
Angus Salkeld28339012015-01-20 19:15:37 +1000280
281 env['parameters']['size'] = 3
282 files2 = {'provider.yaml': self.bad_instance_template}
283 self.client.stacks.update(
284 stack_id=stack_identifier,
285 template=self.template,
286 files=files2,
287 disable_rollback=True,
288 parameters={},
289 environment=env
290 )
291 self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
292
293 # assert that there are 3 bad instances
294 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
295 'JobServerGroup')
Angus Salkeldd4b6bc02015-02-04 16:48:45 +1000296
297 # 2 resources should be in update failed, and one create failed.
298 for res in self.client.resources.list(nested_ident):
299 if res.resource_name in initial_list:
300 self._wait_for_resource_status(nested_ident,
301 res.resource_name,
302 'UPDATE_FAILED')
303 else:
304 self._wait_for_resource_status(nested_ident,
305 res.resource_name,
306 'CREATE_FAILED')
Angus Salkeld28339012015-01-20 19:15:37 +1000307
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000308 def test_group_suspend_resume(self):
309
310 files = {'provider.yaml': self.instance_template}
311 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
312 'parameters': {'size': 4,
313 'image': self.conf.image_ref,
314 'flavor': self.conf.instance_type}}
315 stack_identifier = self.stack_create(template=self.template,
316 files=files, environment=env)
317
318 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
319 'JobServerGroup')
320
321 self.client.actions.suspend(stack_id=stack_identifier)
322 self._wait_for_resource_status(
323 stack_identifier, 'JobServerGroup', 'SUSPEND_COMPLETE')
324 for res in self.client.resources.list(nested_ident):
325 self._wait_for_resource_status(nested_ident,
326 res.resource_name,
327 'SUSPEND_COMPLETE')
328
329 self.client.actions.resume(stack_id=stack_identifier)
330 self._wait_for_resource_status(
331 stack_identifier, 'JobServerGroup', 'RESUME_COMPLETE')
332 for res in self.client.resources.list(nested_ident):
333 self._wait_for_resource_status(nested_ident,
334 res.resource_name,
335 'RESUME_COMPLETE')
336
Angus Salkeld28339012015-01-20 19:15:37 +1000337
338class AutoscalingGroupUpdatePolicyTest(AutoscalingGroupTest):
339
340 def ig_tmpl_with_updt_policy(self):
341 templ = json.loads(copy.deepcopy(self.template))
342 up = {"AutoScalingRollingUpdate": {
343 "MinInstancesInService": "1",
344 "MaxBatchSize": "2",
345 "PauseTime": "PT1S"}}
346 templ['Resources']['JobServerGroup']['UpdatePolicy'] = up
347 return templ
348
349 def update_instance_group(self, updt_template,
350 num_updates_expected_on_updt,
351 num_creates_expected_on_updt,
352 num_deletes_expected_on_updt,
353 update_replace):
354
355 # setup stack from the initial template
356 files = {'provider.yaml': self.instance_template}
357 size = 10
358 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
359 'parameters': {'size': size,
360 'image': self.conf.image_ref,
361 'flavor': self.conf.instance_type}}
362 stack_name = self._stack_rand_name()
363 stack_identifier = self.stack_create(
364 stack_name=stack_name,
365 template=self.ig_tmpl_with_updt_policy(),
366 files=files,
367 environment=env)
368 stack = self.client.stacks.get(stack_identifier)
369 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
370 'JobServerGroup')
371
372 # test that physical resource name of launch configuration is used
373 conf_name = self._stack_output(stack, 'JobServerConfigRef')
374 conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack_name
375 self.assertThat(conf_name,
376 matchers.MatchesRegex(conf_name_pattern))
377
378 # test the number of instances created
379 self.assert_instance_count(stack, size)
380 # saves info from initial list of instances for comparison later
381 init_instances = self.client.resources.list(nested_ident)
382 init_names = [inst.resource_name for inst in init_instances]
383
384 # test stack update
385 self.update_stack(stack_identifier, updt_template,
386 environment=env, files=files)
387 self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
388 updt_stack = self.client.stacks.get(stack_identifier)
389
390 # test that the launch configuration is replaced
391 updt_conf_name = self._stack_output(updt_stack, 'JobServerConfigRef')
392 self.assertThat(updt_conf_name,
393 matchers.MatchesRegex(conf_name_pattern))
394 self.assertNotEqual(conf_name, updt_conf_name)
395
396 # test that the group size are the same
397 updt_instances = self.client.resources.list(nested_ident)
398 updt_names = [inst.resource_name for inst in updt_instances]
399 self.assertEqual(len(init_names), len(updt_names))
400 for res in updt_instances:
401 self.assertEqual('UPDATE_COMPLETE', res.resource_status)
402
403 # test that the appropriate number of instance names are the same
404 matched_names = set(updt_names) & set(init_names)
405 self.assertEqual(num_updates_expected_on_updt, len(matched_names))
406
407 # test that the appropriate number of new instances are created
408 self.assertEqual(num_creates_expected_on_updt,
409 len(set(updt_names) - set(init_names)))
410
411 # test that the appropriate number of instances are deleted
412 self.assertEqual(num_deletes_expected_on_updt,
413 len(set(init_names) - set(updt_names)))
414
415 # test that the older instances are the ones being deleted
416 if num_deletes_expected_on_updt > 0:
417 deletes_expected = init_names[:num_deletes_expected_on_updt]
418 self.assertNotIn(deletes_expected, updt_names)
419
420 def test_instance_group_update_replace(self):
421 """
422 Test simple update replace with no conflict in batch size and
423 minimum instances in service.
424 """
425 updt_template = self.ig_tmpl_with_updt_policy()
426 grp = updt_template['Resources']['JobServerGroup']
427 policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
428 policy['MinInstancesInService'] = '1'
429 policy['MaxBatchSize'] = '3'
430 config = updt_template['Resources']['JobServerConfig']
431 config['Properties']['ImageId'] = self.conf.minimal_image_ref
432
433 self.update_instance_group(updt_template,
434 num_updates_expected_on_updt=10,
435 num_creates_expected_on_updt=0,
436 num_deletes_expected_on_updt=0,
437 update_replace=True)
438
439 def test_instance_group_update_replace_with_adjusted_capacity(self):
440 """
441 Test update replace with capacity adjustment due to conflict in
442 batch size and minimum instances in service.
443 """
444 updt_template = self.ig_tmpl_with_updt_policy()
445 grp = updt_template['Resources']['JobServerGroup']
446 policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
447 policy['MinInstancesInService'] = '8'
448 policy['MaxBatchSize'] = '4'
449 config = updt_template['Resources']['JobServerConfig']
450 config['Properties']['ImageId'] = self.conf.minimal_image_ref
451
452 self.update_instance_group(updt_template,
453 num_updates_expected_on_updt=8,
454 num_creates_expected_on_updt=2,
455 num_deletes_expected_on_updt=2,
456 update_replace=True)
457
458 def test_instance_group_update_replace_huge_batch_size(self):
459 """
460 Test update replace with a huge batch size.
461 """
462 updt_template = self.ig_tmpl_with_updt_policy()
463 group = updt_template['Resources']['JobServerGroup']
464 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
465 policy['MinInstancesInService'] = '0'
466 policy['MaxBatchSize'] = '20'
467 config = updt_template['Resources']['JobServerConfig']
468 config['Properties']['ImageId'] = self.conf.minimal_image_ref
469
470 self.update_instance_group(updt_template,
471 num_updates_expected_on_updt=10,
472 num_creates_expected_on_updt=0,
473 num_deletes_expected_on_updt=0,
474 update_replace=True)
475
476 def test_instance_group_update_replace_huge_min_in_service(self):
477 """
478 Test update replace with a huge number of minimum instances in service.
479 """
480 updt_template = self.ig_tmpl_with_updt_policy()
481 group = updt_template['Resources']['JobServerGroup']
482 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
483 policy['MinInstancesInService'] = '20'
484 policy['MaxBatchSize'] = '1'
485 policy['PauseTime'] = 'PT0S'
486 config = updt_template['Resources']['JobServerConfig']
487 config['Properties']['ImageId'] = self.conf.minimal_image_ref
488
489 self.update_instance_group(updt_template,
490 num_updates_expected_on_updt=9,
491 num_creates_expected_on_updt=1,
492 num_deletes_expected_on_updt=1,
493 update_replace=True)
494
495 def test_instance_group_update_no_replace(self):
496 """
497 Test simple update only and no replace (i.e. updated instance flavor
498 in Launch Configuration) with no conflict in batch size and
499 minimum instances in service.
500 """
501 updt_template = self.ig_tmpl_with_updt_policy()
502 group = updt_template['Resources']['JobServerGroup']
503 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
504 policy['MinInstancesInService'] = '1'
505 policy['MaxBatchSize'] = '3'
506 policy['PauseTime'] = 'PT0S'
507 config = updt_template['Resources']['JobServerConfig']
508 config['Properties']['InstanceType'] = 'm1.tiny'
509
510 self.update_instance_group(updt_template,
511 num_updates_expected_on_updt=10,
512 num_creates_expected_on_updt=0,
513 num_deletes_expected_on_updt=0,
514 update_replace=False)
515
516 def test_instance_group_update_no_replace_with_adjusted_capacity(self):
517 """
518 Test update only and no replace (i.e. updated instance flavor in
519 Launch Configuration) with capacity adjustment due to conflict in
520 batch size and minimum instances in service.
521 """
522 updt_template = self.ig_tmpl_with_updt_policy()
523 group = updt_template['Resources']['JobServerGroup']
524 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
525 policy['MinInstancesInService'] = '8'
526 policy['MaxBatchSize'] = '4'
527 policy['PauseTime'] = 'PT0S'
528 config = updt_template['Resources']['JobServerConfig']
529 config['Properties']['InstanceType'] = 'm1.tiny'
530
531 self.update_instance_group(updt_template,
532 num_updates_expected_on_updt=8,
533 num_creates_expected_on_updt=2,
534 num_deletes_expected_on_updt=2,
535 update_replace=False)
Angus Salkeldc85229b2015-02-09 10:58:04 +1000536
537
538class AutoScalingSignalTest(AutoscalingGroupTest):
539
540 template = '''
541{
542 "AWSTemplateFormatVersion" : "2010-09-09",
543 "Description" : "Template to create multiple instances.",
544 "Parameters" : {"size": {"Type": "String", "Default": "1"},
545 "AZ": {"Type": "String", "Default": "nova"},
546 "image": {"Type": "String"},
547 "flavor": {"Type": "String"}},
548 "Resources": {
549 "custom_lb": {
550 "Type": "AWS::EC2::Instance",
551 "Properties": {
552 "ImageId": {"Ref": "image"},
553 "InstanceType": {"Ref": "flavor"},
554 "UserData": "foo",
555 "SecurityGroups": [ "sg-1" ],
556 "Tags": []
557 },
558 "Metadata": {
559 "IPs": {"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}
560 }
561 },
562 "JobServerGroup": {
563 "Type" : "AWS::AutoScaling::AutoScalingGroup",
564 "Properties" : {
565 "AvailabilityZones" : [{"Ref": "AZ"}],
566 "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
567 "DesiredCapacity" : {"Ref": "size"},
568 "MinSize" : "0",
569 "MaxSize" : "20"
570 }
571 },
572 "JobServerConfig" : {
573 "Type" : "AWS::AutoScaling::LaunchConfiguration",
574 "Metadata": {"foo": "bar"},
575 "Properties": {
576 "ImageId" : {"Ref": "image"},
577 "InstanceType" : {"Ref": "flavor"},
578 "SecurityGroups" : [ "sg-1" ],
579 "UserData" : "jsconfig data"
580 }
581 },
582 "ScaleUpPolicy" : {
583 "Type" : "AWS::AutoScaling::ScalingPolicy",
584 "Properties" : {
585 "AdjustmentType" : "ChangeInCapacity",
586 "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
587 "Cooldown" : "0",
588 "ScalingAdjustment": "1"
589 }
590 },
591 "ScaleDownPolicy" : {
592 "Type" : "AWS::AutoScaling::ScalingPolicy",
593 "Properties" : {
594 "AdjustmentType" : "ChangeInCapacity",
595 "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
596 "Cooldown" : "0",
597 "ScalingAdjustment" : "-2"
598 }
599 }
600 },
601 "Outputs": {
602 "InstanceList": {"Value": {
603 "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}}
604 }
605}
606'''
607
608 lb_template = '''
609heat_template_version: 2013-05-23
610parameters:
611 ImageId: {type: string}
612 InstanceType: {type: string}
613 SecurityGroups: {type: comma_delimited_list}
614 UserData: {type: string}
615 Tags: {type: comma_delimited_list}
616
617resources:
618outputs:
619 PublicIp: {value: "not-used"}
620 AvailabilityZone: {value: 'not-used1'}
621 PrivateDnsName: {value: 'not-used2'}
622 PublicDnsName: {value: 'not-used3'}
623 PrivateIp: {value: 'not-used4'}
624
625'''
626
627 def setUp(self):
628 super(AutoScalingSignalTest, self).setUp()
629 self.build_timeout = self.conf.build_timeout
630 self.build_interval = self.conf.build_interval
631 self.files = {'provider.yaml': self.instance_template,
632 'lb.yaml': self.lb_template}
633 self.env = {'resource_registry':
634 {'resources':
635 {'custom_lb': {'AWS::EC2::Instance': 'lb.yaml'}},
636 'AWS::EC2::Instance': 'provider.yaml'},
637 'parameters': {'size': 2,
638 'image': self.conf.image_ref,
639 'flavor': self.conf.instance_type}}
640
641 def check_instance_count(self, stack_identifier, expected):
642 md = self.client.resources.metadata(stack_identifier, 'custom_lb')
643 actual_md = len(md['IPs'].split(','))
644 if actual_md != expected:
645 LOG.warn('check_instance_count exp:%d, meta:%s' % (expected,
646 md['IPs']))
647 return False
648
649 stack = self.client.stacks.get(stack_identifier)
650 inst_list = self._stack_output(stack, 'InstanceList')
651 actual = len(inst_list.split(','))
652 if actual != expected:
653 LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
654 inst_list))
655 return actual == expected
656
657 def test_scaling_meta_update(self):
658 """Use heatclient to signal the up and down policy.
659
660 Then confirm that the metadata in the custom_lb is updated each
661 time.
662 """
663 stack_identifier = self.stack_create(template=self.template,
664 files=self.files,
665 environment=self.env)
666
667 self.assertTrue(test.call_until_true(
668 self.build_timeout, self.build_interval,
669 self.check_instance_count, stack_identifier, 2))
670
671 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
672 'JobServerGroup')
673 # Scale up one, Trigger alarm
674 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
675 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
676 self.assertTrue(test.call_until_true(
677 self.build_timeout, self.build_interval,
678 self.check_instance_count, stack_identifier, 3))
679
680 # Scale down two, Trigger alarm
681 self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
682 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
683 self.assertTrue(test.call_until_true(
684 self.build_timeout, self.build_interval,
685 self.check_instance_count, stack_identifier, 1))
686
687 def test_signal_with_policy_update(self):
688 """Prove that an updated policy is used in the next signal."""
689
690 stack_identifier = self.stack_create(template=self.template,
691 files=self.files,
692 environment=self.env)
693
694 self.assertTrue(test.call_until_true(
695 self.build_timeout, self.build_interval,
696 self.check_instance_count, stack_identifier, 2))
697
698 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
699 'JobServerGroup')
700 # Scale up one, Trigger alarm
701 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
702 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
703 self.assertTrue(test.call_until_true(
704 self.build_timeout, self.build_interval,
705 self.check_instance_count, stack_identifier, 3))
706
707 # increase the adjustment to "+2" and remove the DesiredCapacity
708 # so we don't go from 3 to 2.
709 new_template = self.template.replace(
710 '"ScalingAdjustment": "1"',
711 '"ScalingAdjustment": "2"').replace(
712 '"DesiredCapacity" : {"Ref": "size"},', '')
713
714 self.update_stack(stack_identifier, template=new_template,
715 environment=self.env, files=self.files)
716
717 # Scale up two, Trigger alarm
718 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
719 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
720 self.assertTrue(test.call_until_true(
721 self.build_timeout, self.build_interval,
722 self.check_instance_count, stack_identifier, 5))