blob: 90414051d15b3c93d5466b49ba85b0c803482a51 [file] [log] [blame]
Angus Salkeld28339012015-01-20 19:15:37 +10001# Licensed under the Apache License, Version 2.0 (the "License"); you may
2# not use this file except in compliance with the License. You may obtain
3# a copy of the License at
4#
5# http://www.apache.org/licenses/LICENSE-2.0
6#
7# Unless required by applicable law or agreed to in writing, software
8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10# License for the specific language governing permissions and limitations
11# under the License.
12
13import copy
14import json
Angus Salkeld28339012015-01-20 19:15:37 +100015
huangtianhuaf71ae072015-07-09 09:15:10 +080016from heatclient import exc
Steve Baker24641292015-03-13 10:47:50 +130017from oslo_log import log as logging
huangtianhuaf71ae072015-07-09 09:15:10 +080018import six
Angus Salkeld28339012015-01-20 19:15:37 +100019from testtools import matchers
20
21from heat_integrationtests.common import test
Rabi Mishra477efc92015-07-31 13:01:45 +053022from heat_integrationtests.functional import functional_base
Angus Salkeld28339012015-01-20 19:15:37 +100023
24
25LOG = logging.getLogger(__name__)
26
27
Rabi Mishra477efc92015-07-31 13:01:45 +053028class AutoscalingGroupTest(functional_base.FunctionalTestsBase):
Angus Salkeld28339012015-01-20 19:15:37 +100029
30 template = '''
31{
32 "AWSTemplateFormatVersion" : "2010-09-09",
33 "Description" : "Template to create multiple instances.",
34 "Parameters" : {"size": {"Type": "String", "Default": "1"},
35 "AZ": {"Type": "String", "Default": "nova"},
36 "image": {"Type": "String"},
37 "flavor": {"Type": "String"}},
38 "Resources": {
39 "JobServerGroup": {
40 "Type" : "AWS::AutoScaling::AutoScalingGroup",
41 "Properties" : {
42 "AvailabilityZones" : [{"Ref": "AZ"}],
43 "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
44 "MinSize" : {"Ref": "size"},
45 "MaxSize" : "20"
46 }
47 },
48
49 "JobServerConfig" : {
50 "Type" : "AWS::AutoScaling::LaunchConfiguration",
51 "Metadata": {"foo": "bar"},
52 "Properties": {
53 "ImageId" : {"Ref": "image"},
54 "InstanceType" : {"Ref": "flavor"},
55 "SecurityGroups" : [ "sg-1" ],
56 "UserData" : "jsconfig data"
57 }
58 }
59 },
60 "Outputs": {
61 "InstanceList": {"Value": {
62 "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}},
63 "JobServerConfigRef": {"Value": {
64 "Ref": "JobServerConfig"}}
65 }
66}
67'''
68
69 instance_template = '''
70heat_template_version: 2013-05-23
71parameters:
72 ImageId: {type: string}
73 InstanceType: {type: string}
74 SecurityGroups: {type: comma_delimited_list}
75 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +100076 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeld28339012015-01-20 19:15:37 +100077
78resources:
79 random1:
80 type: OS::Heat::RandomString
81 properties:
82 salt: {get_param: ImageId}
83outputs:
Angus Salkeldc85229b2015-02-09 10:58:04 +100084 PublicIp: {value: {get_attr: [random1, value]}}
85 AvailabilityZone: {value: 'not-used11'}
86 PrivateDnsName: {value: 'not-used12'}
87 PublicDnsName: {value: 'not-used13'}
88 PrivateIp: {value: 'not-used14'}
Angus Salkeld28339012015-01-20 19:15:37 +100089'''
90
91 # This is designed to fail.
92 bad_instance_template = '''
93heat_template_version: 2013-05-23
94parameters:
95 ImageId: {type: string}
96 InstanceType: {type: string}
97 SecurityGroups: {type: comma_delimited_list}
98 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +100099 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeld28339012015-01-20 19:15:37 +1000100
101resources:
102 random1:
103 type: OS::Heat::RandomString
104 depends_on: waiter
105 ready_poster:
106 type: AWS::CloudFormation::WaitConditionHandle
107 waiter:
108 type: AWS::CloudFormation::WaitCondition
109 properties:
Angus Salkeld12e13d42015-08-11 12:17:58 +1000110 Handle: {get_resource: ready_poster}
Angus Salkeld28339012015-01-20 19:15:37 +1000111 Timeout: 1
112outputs:
113 PublicIp:
114 value: {get_attr: [random1, value]}
115'''
116
117 def setUp(self):
118 super(AutoscalingGroupTest, self).setUp()
Angus Salkeld28339012015-01-20 19:15:37 +1000119 if not self.conf.image_ref:
120 raise self.skipException("No image configured to test")
121 if not self.conf.minimal_image_ref:
122 raise self.skipException("No minimal image configured to test")
123 if not self.conf.instance_type:
124 raise self.skipException("No flavor configured to test")
125
126 def assert_instance_count(self, stack, expected_count):
127 inst_list = self._stack_output(stack, 'InstanceList')
128 self.assertEqual(expected_count, len(inst_list.split(',')))
129
130 def _assert_instance_state(self, nested_identifier,
131 num_complete, num_failed):
132 for res in self.client.resources.list(nested_identifier):
133 if 'COMPLETE' in res.resource_status:
134 num_complete = num_complete - 1
135 elif 'FAILED' in res.resource_status:
136 num_failed = num_failed - 1
137 self.assertEqual(0, num_failed)
138 self.assertEqual(0, num_complete)
139
140
141class AutoscalingGroupBasicTest(AutoscalingGroupTest):
142
143 def test_basic_create_works(self):
144 """Make sure the working case is good.
145
146 Note this combines test_override_aws_ec2_instance into this test as
147 well, which is:
148 If AWS::EC2::Instance is overridden, AutoScalingGroup will
149 automatically use that overridden resource type.
150 """
151
152 files = {'provider.yaml': self.instance_template}
153 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
154 'parameters': {'size': 4,
155 'image': self.conf.image_ref,
156 'flavor': self.conf.instance_type}}
157 stack_identifier = self.stack_create(template=self.template,
158 files=files, environment=env)
159 initial_resources = {
160 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
161 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
162 self.assertEqual(initial_resources,
163 self.list_resources(stack_identifier))
164
165 stack = self.client.stacks.get(stack_identifier)
166 self.assert_instance_count(stack, 4)
167
168 def test_size_updates_work(self):
169 files = {'provider.yaml': self.instance_template}
170 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
171 'parameters': {'size': 2,
172 'image': self.conf.image_ref,
173 'flavor': self.conf.instance_type}}
174
175 stack_identifier = self.stack_create(template=self.template,
176 files=files,
177 environment=env)
178 stack = self.client.stacks.get(stack_identifier)
179 self.assert_instance_count(stack, 2)
180
181 # Increase min size to 5
182 env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
183 'parameters': {'size': 5,
184 'image': self.conf.image_ref,
185 'flavor': self.conf.instance_type}}
186 self.update_stack(stack_identifier, self.template,
187 environment=env2, files=files)
Angus Salkeld28339012015-01-20 19:15:37 +1000188 stack = self.client.stacks.get(stack_identifier)
189 self.assert_instance_count(stack, 5)
190
191 def test_update_group_replace(self):
192 """Make sure that during a group update the non updatable
193 properties cause a replacement.
194 """
195 files = {'provider.yaml': self.instance_template}
196 env = {'resource_registry':
197 {'AWS::EC2::Instance': 'provider.yaml'},
198 'parameters': {'size': '1',
199 'image': self.conf.image_ref,
200 'flavor': self.conf.instance_type}}
201
202 stack_identifier = self.stack_create(template=self.template,
203 files=files,
204 environment=env)
205 rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
206 orig_asg_id = rsrc.physical_resource_id
207
208 env2 = {'resource_registry':
209 {'AWS::EC2::Instance': 'provider.yaml'},
210 'parameters': {'size': '1',
211 'AZ': 'wibble',
212 'image': self.conf.image_ref,
213 'flavor': self.conf.instance_type}}
214 self.update_stack(stack_identifier, self.template,
215 environment=env2, files=files)
216
217 # replacement will cause the resource physical_resource_id to change.
218 rsrc = self.client.resources.get(stack_identifier, 'JobServerGroup')
219 self.assertNotEqual(orig_asg_id, rsrc.physical_resource_id)
220
221 def test_create_instance_error_causes_group_error(self):
222 """If a resource in an instance group fails to be created, the instance
223 group itself will fail and the broken inner resource will remain.
224 """
225 stack_name = self._stack_rand_name()
226 files = {'provider.yaml': self.bad_instance_template}
227 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
228 'parameters': {'size': 2,
229 'image': self.conf.image_ref,
230 'flavor': self.conf.instance_type}}
231
232 self.client.stacks.create(
233 stack_name=stack_name,
234 template=self.template,
235 files=files,
236 disable_rollback=True,
237 parameters={},
238 environment=env
239 )
Steve Bakerdbea6ab2015-08-19 13:37:08 +1200240 self.addCleanup(self._stack_delete, stack_name)
Angus Salkeld28339012015-01-20 19:15:37 +1000241 stack = self.client.stacks.get(stack_name)
242 stack_identifier = '%s/%s' % (stack_name, stack.id)
243 self._wait_for_stack_status(stack_identifier, 'CREATE_FAILED')
244 initial_resources = {
245 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
246 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
247 self.assertEqual(initial_resources,
248 self.list_resources(stack_identifier))
249
250 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
251 'JobServerGroup')
252 self._assert_instance_state(nested_ident, 0, 2)
253
254 def test_update_instance_error_causes_group_error(self):
255 """If a resource in an instance group fails to be created during an
256 update, the instance group itself will fail and the broken inner
257 resource will remain.
258 """
259 files = {'provider.yaml': self.instance_template}
260 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
261 'parameters': {'size': 2,
262 'image': self.conf.image_ref,
263 'flavor': self.conf.instance_type}}
264
265 stack_identifier = self.stack_create(template=self.template,
266 files=files,
267 environment=env)
268 initial_resources = {
269 'JobServerConfig': 'AWS::AutoScaling::LaunchConfiguration',
270 'JobServerGroup': 'AWS::AutoScaling::AutoScalingGroup'}
271 self.assertEqual(initial_resources,
272 self.list_resources(stack_identifier))
273
274 stack = self.client.stacks.get(stack_identifier)
275 self.assert_instance_count(stack, 2)
276 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
277 'JobServerGroup')
278 self._assert_instance_state(nested_ident, 2, 0)
Angus Salkeldd4b6bc02015-02-04 16:48:45 +1000279 initial_list = [res.resource_name
280 for res in self.client.resources.list(nested_ident)]
Angus Salkeld28339012015-01-20 19:15:37 +1000281
282 env['parameters']['size'] = 3
283 files2 = {'provider.yaml': self.bad_instance_template}
284 self.client.stacks.update(
285 stack_id=stack_identifier,
286 template=self.template,
287 files=files2,
288 disable_rollback=True,
289 parameters={},
290 environment=env
291 )
292 self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
293
294 # assert that there are 3 bad instances
295 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
296 'JobServerGroup')
Angus Salkeldd4b6bc02015-02-04 16:48:45 +1000297
298 # 2 resources should be in update failed, and one create failed.
299 for res in self.client.resources.list(nested_ident):
300 if res.resource_name in initial_list:
301 self._wait_for_resource_status(nested_ident,
302 res.resource_name,
303 'UPDATE_FAILED')
304 else:
305 self._wait_for_resource_status(nested_ident,
306 res.resource_name,
307 'CREATE_FAILED')
Angus Salkeld28339012015-01-20 19:15:37 +1000308
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000309 def test_group_suspend_resume(self):
310
311 files = {'provider.yaml': self.instance_template}
312 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
313 'parameters': {'size': 4,
314 'image': self.conf.image_ref,
315 'flavor': self.conf.instance_type}}
316 stack_identifier = self.stack_create(template=self.template,
317 files=files, environment=env)
318
319 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
320 'JobServerGroup')
321
Angus Salkelda7500d12015-04-10 15:44:07 +1000322 self.stack_suspend(stack_identifier)
323 self._wait_for_all_resource_status(nested_ident, 'SUSPEND_COMPLETE')
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000324
Angus Salkelda7500d12015-04-10 15:44:07 +1000325 self.stack_resume(stack_identifier)
326 self._wait_for_all_resource_status(nested_ident, 'RESUME_COMPLETE')
Angus Salkeldf1b10dd2015-02-04 10:57:38 +1000327
Angus Salkeld28339012015-01-20 19:15:37 +1000328
329class AutoscalingGroupUpdatePolicyTest(AutoscalingGroupTest):
330
331 def ig_tmpl_with_updt_policy(self):
332 templ = json.loads(copy.deepcopy(self.template))
333 up = {"AutoScalingRollingUpdate": {
334 "MinInstancesInService": "1",
335 "MaxBatchSize": "2",
336 "PauseTime": "PT1S"}}
337 templ['Resources']['JobServerGroup']['UpdatePolicy'] = up
338 return templ
339
340 def update_instance_group(self, updt_template,
341 num_updates_expected_on_updt,
342 num_creates_expected_on_updt,
343 num_deletes_expected_on_updt,
344 update_replace):
345
346 # setup stack from the initial template
347 files = {'provider.yaml': self.instance_template}
348 size = 10
349 env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
350 'parameters': {'size': size,
351 'image': self.conf.image_ref,
352 'flavor': self.conf.instance_type}}
353 stack_name = self._stack_rand_name()
354 stack_identifier = self.stack_create(
355 stack_name=stack_name,
356 template=self.ig_tmpl_with_updt_policy(),
357 files=files,
358 environment=env)
359 stack = self.client.stacks.get(stack_identifier)
360 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
361 'JobServerGroup')
362
363 # test that physical resource name of launch configuration is used
364 conf_name = self._stack_output(stack, 'JobServerConfigRef')
365 conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack_name
366 self.assertThat(conf_name,
367 matchers.MatchesRegex(conf_name_pattern))
368
369 # test the number of instances created
370 self.assert_instance_count(stack, size)
371 # saves info from initial list of instances for comparison later
372 init_instances = self.client.resources.list(nested_ident)
373 init_names = [inst.resource_name for inst in init_instances]
374
375 # test stack update
376 self.update_stack(stack_identifier, updt_template,
377 environment=env, files=files)
Angus Salkeld28339012015-01-20 19:15:37 +1000378 updt_stack = self.client.stacks.get(stack_identifier)
379
380 # test that the launch configuration is replaced
381 updt_conf_name = self._stack_output(updt_stack, 'JobServerConfigRef')
382 self.assertThat(updt_conf_name,
383 matchers.MatchesRegex(conf_name_pattern))
384 self.assertNotEqual(conf_name, updt_conf_name)
385
386 # test that the group size are the same
387 updt_instances = self.client.resources.list(nested_ident)
388 updt_names = [inst.resource_name for inst in updt_instances]
389 self.assertEqual(len(init_names), len(updt_names))
390 for res in updt_instances:
391 self.assertEqual('UPDATE_COMPLETE', res.resource_status)
392
393 # test that the appropriate number of instance names are the same
394 matched_names = set(updt_names) & set(init_names)
395 self.assertEqual(num_updates_expected_on_updt, len(matched_names))
396
397 # test that the appropriate number of new instances are created
398 self.assertEqual(num_creates_expected_on_updt,
399 len(set(updt_names) - set(init_names)))
400
401 # test that the appropriate number of instances are deleted
402 self.assertEqual(num_deletes_expected_on_updt,
403 len(set(init_names) - set(updt_names)))
404
405 # test that the older instances are the ones being deleted
406 if num_deletes_expected_on_updt > 0:
407 deletes_expected = init_names[:num_deletes_expected_on_updt]
408 self.assertNotIn(deletes_expected, updt_names)
409
410 def test_instance_group_update_replace(self):
411 """
412 Test simple update replace with no conflict in batch size and
413 minimum instances in service.
414 """
415 updt_template = self.ig_tmpl_with_updt_policy()
416 grp = updt_template['Resources']['JobServerGroup']
417 policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
418 policy['MinInstancesInService'] = '1'
419 policy['MaxBatchSize'] = '3'
420 config = updt_template['Resources']['JobServerConfig']
421 config['Properties']['ImageId'] = self.conf.minimal_image_ref
422
423 self.update_instance_group(updt_template,
424 num_updates_expected_on_updt=10,
425 num_creates_expected_on_updt=0,
426 num_deletes_expected_on_updt=0,
427 update_replace=True)
428
429 def test_instance_group_update_replace_with_adjusted_capacity(self):
430 """
431 Test update replace with capacity adjustment due to conflict in
432 batch size and minimum instances in service.
433 """
434 updt_template = self.ig_tmpl_with_updt_policy()
435 grp = updt_template['Resources']['JobServerGroup']
436 policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
437 policy['MinInstancesInService'] = '8'
438 policy['MaxBatchSize'] = '4'
439 config = updt_template['Resources']['JobServerConfig']
440 config['Properties']['ImageId'] = self.conf.minimal_image_ref
441
442 self.update_instance_group(updt_template,
443 num_updates_expected_on_updt=8,
444 num_creates_expected_on_updt=2,
445 num_deletes_expected_on_updt=2,
446 update_replace=True)
447
448 def test_instance_group_update_replace_huge_batch_size(self):
449 """
450 Test update replace with a huge batch size.
451 """
452 updt_template = self.ig_tmpl_with_updt_policy()
453 group = updt_template['Resources']['JobServerGroup']
454 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
455 policy['MinInstancesInService'] = '0'
456 policy['MaxBatchSize'] = '20'
457 config = updt_template['Resources']['JobServerConfig']
458 config['Properties']['ImageId'] = self.conf.minimal_image_ref
459
460 self.update_instance_group(updt_template,
461 num_updates_expected_on_updt=10,
462 num_creates_expected_on_updt=0,
463 num_deletes_expected_on_updt=0,
464 update_replace=True)
465
466 def test_instance_group_update_replace_huge_min_in_service(self):
467 """
468 Test update replace with a huge number of minimum instances in service.
469 """
470 updt_template = self.ig_tmpl_with_updt_policy()
471 group = updt_template['Resources']['JobServerGroup']
472 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
473 policy['MinInstancesInService'] = '20'
474 policy['MaxBatchSize'] = '1'
475 policy['PauseTime'] = 'PT0S'
476 config = updt_template['Resources']['JobServerConfig']
477 config['Properties']['ImageId'] = self.conf.minimal_image_ref
478
479 self.update_instance_group(updt_template,
480 num_updates_expected_on_updt=9,
481 num_creates_expected_on_updt=1,
482 num_deletes_expected_on_updt=1,
483 update_replace=True)
484
485 def test_instance_group_update_no_replace(self):
486 """
487 Test simple update only and no replace (i.e. updated instance flavor
488 in Launch Configuration) with no conflict in batch size and
489 minimum instances in service.
490 """
491 updt_template = self.ig_tmpl_with_updt_policy()
492 group = updt_template['Resources']['JobServerGroup']
493 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
494 policy['MinInstancesInService'] = '1'
495 policy['MaxBatchSize'] = '3'
496 policy['PauseTime'] = 'PT0S'
497 config = updt_template['Resources']['JobServerConfig']
498 config['Properties']['InstanceType'] = 'm1.tiny'
499
500 self.update_instance_group(updt_template,
501 num_updates_expected_on_updt=10,
502 num_creates_expected_on_updt=0,
503 num_deletes_expected_on_updt=0,
504 update_replace=False)
505
506 def test_instance_group_update_no_replace_with_adjusted_capacity(self):
507 """
508 Test update only and no replace (i.e. updated instance flavor in
509 Launch Configuration) with capacity adjustment due to conflict in
510 batch size and minimum instances in service.
511 """
512 updt_template = self.ig_tmpl_with_updt_policy()
513 group = updt_template['Resources']['JobServerGroup']
514 policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
515 policy['MinInstancesInService'] = '8'
516 policy['MaxBatchSize'] = '4'
517 policy['PauseTime'] = 'PT0S'
518 config = updt_template['Resources']['JobServerConfig']
519 config['Properties']['InstanceType'] = 'm1.tiny'
520
521 self.update_instance_group(updt_template,
522 num_updates_expected_on_updt=8,
523 num_creates_expected_on_updt=2,
524 num_deletes_expected_on_updt=2,
525 update_replace=False)
Angus Salkeldc85229b2015-02-09 10:58:04 +1000526
527
528class AutoScalingSignalTest(AutoscalingGroupTest):
529
530 template = '''
531{
532 "AWSTemplateFormatVersion" : "2010-09-09",
533 "Description" : "Template to create multiple instances.",
534 "Parameters" : {"size": {"Type": "String", "Default": "1"},
535 "AZ": {"Type": "String", "Default": "nova"},
536 "image": {"Type": "String"},
537 "flavor": {"Type": "String"}},
538 "Resources": {
539 "custom_lb": {
540 "Type": "AWS::EC2::Instance",
541 "Properties": {
542 "ImageId": {"Ref": "image"},
543 "InstanceType": {"Ref": "flavor"},
544 "UserData": "foo",
545 "SecurityGroups": [ "sg-1" ],
546 "Tags": []
547 },
548 "Metadata": {
549 "IPs": {"Fn::GetAtt": ["JobServerGroup", "InstanceList"]}
550 }
551 },
552 "JobServerGroup": {
553 "Type" : "AWS::AutoScaling::AutoScalingGroup",
554 "Properties" : {
555 "AvailabilityZones" : [{"Ref": "AZ"}],
556 "LaunchConfigurationName" : { "Ref" : "JobServerConfig" },
557 "DesiredCapacity" : {"Ref": "size"},
558 "MinSize" : "0",
559 "MaxSize" : "20"
560 }
561 },
562 "JobServerConfig" : {
563 "Type" : "AWS::AutoScaling::LaunchConfiguration",
564 "Metadata": {"foo": "bar"},
565 "Properties": {
566 "ImageId" : {"Ref": "image"},
567 "InstanceType" : {"Ref": "flavor"},
568 "SecurityGroups" : [ "sg-1" ],
569 "UserData" : "jsconfig data"
570 }
571 },
572 "ScaleUpPolicy" : {
573 "Type" : "AWS::AutoScaling::ScalingPolicy",
574 "Properties" : {
575 "AdjustmentType" : "ChangeInCapacity",
576 "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
577 "Cooldown" : "0",
578 "ScalingAdjustment": "1"
579 }
580 },
581 "ScaleDownPolicy" : {
582 "Type" : "AWS::AutoScaling::ScalingPolicy",
583 "Properties" : {
584 "AdjustmentType" : "ChangeInCapacity",
585 "AutoScalingGroupName" : { "Ref" : "JobServerGroup" },
586 "Cooldown" : "0",
587 "ScalingAdjustment" : "-2"
588 }
589 }
590 },
591 "Outputs": {
592 "InstanceList": {"Value": {
593 "Fn::GetAtt": ["JobServerGroup", "InstanceList"]}}
594 }
595}
596'''
597
598 lb_template = '''
599heat_template_version: 2013-05-23
600parameters:
601 ImageId: {type: string}
602 InstanceType: {type: string}
603 SecurityGroups: {type: comma_delimited_list}
604 UserData: {type: string}
Angus Salkeld8d1050c2015-02-24 12:23:06 +1000605 Tags: {type: comma_delimited_list, default: "x,y"}
Angus Salkeldc85229b2015-02-09 10:58:04 +1000606
607resources:
608outputs:
609 PublicIp: {value: "not-used"}
610 AvailabilityZone: {value: 'not-used1'}
611 PrivateDnsName: {value: 'not-used2'}
612 PublicDnsName: {value: 'not-used3'}
613 PrivateIp: {value: 'not-used4'}
614
615'''
616
617 def setUp(self):
618 super(AutoScalingSignalTest, self).setUp()
619 self.build_timeout = self.conf.build_timeout
620 self.build_interval = self.conf.build_interval
621 self.files = {'provider.yaml': self.instance_template,
622 'lb.yaml': self.lb_template}
623 self.env = {'resource_registry':
624 {'resources':
625 {'custom_lb': {'AWS::EC2::Instance': 'lb.yaml'}},
626 'AWS::EC2::Instance': 'provider.yaml'},
627 'parameters': {'size': 2,
628 'image': self.conf.image_ref,
629 'flavor': self.conf.instance_type}}
630
631 def check_instance_count(self, stack_identifier, expected):
632 md = self.client.resources.metadata(stack_identifier, 'custom_lb')
633 actual_md = len(md['IPs'].split(','))
634 if actual_md != expected:
635 LOG.warn('check_instance_count exp:%d, meta:%s' % (expected,
636 md['IPs']))
637 return False
638
639 stack = self.client.stacks.get(stack_identifier)
640 inst_list = self._stack_output(stack, 'InstanceList')
641 actual = len(inst_list.split(','))
642 if actual != expected:
643 LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
644 inst_list))
645 return actual == expected
646
647 def test_scaling_meta_update(self):
648 """Use heatclient to signal the up and down policy.
649
650 Then confirm that the metadata in the custom_lb is updated each
651 time.
652 """
653 stack_identifier = self.stack_create(template=self.template,
654 files=self.files,
655 environment=self.env)
656
657 self.assertTrue(test.call_until_true(
658 self.build_timeout, self.build_interval,
659 self.check_instance_count, stack_identifier, 2))
660
661 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
662 'JobServerGroup')
663 # Scale up one, Trigger alarm
664 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
665 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
666 self.assertTrue(test.call_until_true(
667 self.build_timeout, self.build_interval,
668 self.check_instance_count, stack_identifier, 3))
669
670 # Scale down two, Trigger alarm
671 self.client.resources.signal(stack_identifier, 'ScaleDownPolicy')
672 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
673 self.assertTrue(test.call_until_true(
674 self.build_timeout, self.build_interval,
675 self.check_instance_count, stack_identifier, 1))
676
677 def test_signal_with_policy_update(self):
678 """Prove that an updated policy is used in the next signal."""
679
680 stack_identifier = self.stack_create(template=self.template,
681 files=self.files,
682 environment=self.env)
683
684 self.assertTrue(test.call_until_true(
685 self.build_timeout, self.build_interval,
686 self.check_instance_count, stack_identifier, 2))
687
688 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
689 'JobServerGroup')
690 # Scale up one, Trigger alarm
691 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
692 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
693 self.assertTrue(test.call_until_true(
694 self.build_timeout, self.build_interval,
695 self.check_instance_count, stack_identifier, 3))
696
697 # increase the adjustment to "+2" and remove the DesiredCapacity
698 # so we don't go from 3 to 2.
699 new_template = self.template.replace(
700 '"ScalingAdjustment": "1"',
701 '"ScalingAdjustment": "2"').replace(
702 '"DesiredCapacity" : {"Ref": "size"},', '')
703
704 self.update_stack(stack_identifier, template=new_template,
705 environment=self.env, files=self.files)
706
707 # Scale up two, Trigger alarm
708 self.client.resources.signal(stack_identifier, 'ScaleUpPolicy')
709 self._wait_for_stack_status(nested_ident, 'UPDATE_COMPLETE')
710 self.assertTrue(test.call_until_true(
711 self.build_timeout, self.build_interval,
712 self.check_instance_count, stack_identifier, 5))
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000713
714 def test_signal_during_suspend(self):
715 """Prove that a signal will fail when the stack is in suspend."""
716
717 stack_identifier = self.stack_create(template=self.template,
718 files=self.files,
719 environment=self.env)
720
721 self.assertTrue(test.call_until_true(
722 self.build_timeout, self.build_interval,
723 self.check_instance_count, stack_identifier, 2))
724
725 nested_ident = self.assert_resource_is_a_stack(stack_identifier,
726 'JobServerGroup')
727
728 # suspend the top level stack.
729 self.client.actions.suspend(stack_id=stack_identifier)
730 self._wait_for_resource_status(
731 stack_identifier, 'JobServerGroup', 'SUSPEND_COMPLETE')
732
huangtianhuaf71ae072015-07-09 09:15:10 +0800733 # Send a signal and a exception will raise
734 ex = self.assertRaises(exc.BadRequest,
735 self.client.resources.signal,
736 stack_identifier, 'ScaleUpPolicy')
737
738 error_msg = 'Signal resource during SUSPEND is not supported'
739 self.assertIn(error_msg, six.text_type(ex))
Angus Salkeldf1324492015-04-02 09:14:39 +1000740 ev = self.wait_for_event_with_reason(
741 stack_identifier,
742 reason='Cannot signal resource during SUSPEND',
743 rsrc_name='ScaleUpPolicy')
744 self.assertEqual('SUSPEND_COMPLETE', ev[0].resource_status)
745
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000746 # still SUSPEND_COMPLETE (not gone to UPDATE_COMPLETE)
747 self._wait_for_stack_status(nested_ident, 'SUSPEND_COMPLETE')
Pavlo Shchelokovskyy28ac2c02015-04-06 10:22:35 +0000748 self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
Angus Salkeld2eb7d602015-04-01 11:22:40 +1000749 # still 2 instances.
750 self.assertTrue(test.call_until_true(
751 self.build_timeout, self.build_interval,
752 self.check_instance_count, stack_identifier, 2))