Drop autoscaling scenario test
Drop autoscaling scenario test. It has always been skipped, so there
is no loss of test coverage. It takes a very long time to run since
it uses real alarms to perform 4 scaling operations.
See https://review.openstack.org/#/c/122320/.
Change-Id: If37f63565a235c6128566f8f2cd993ed247f0912
Partially-implements: bp/tempest-client-scenarios
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
deleted file mode 100644
index 8894106..0000000
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-
-import heatclient.exc as heat_exceptions
-
-from tempest import config
-from tempest.scenario import manager
-from tempest import test
-
-CONF = config.CONF
-
-
-class AutoScalingTest(manager.OrchestrationScenarioTest):
-
- def setUp(self):
- super(AutoScalingTest, self).setUp()
- if not CONF.orchestration.image_ref:
- raise self.skipException("No image available to test")
- self.client = self.orchestration_client
-
- def assign_keypair(self):
- self.stack_name = self._stack_rand_name()
- if CONF.orchestration.keypair_name:
- self.keypair_name = CONF.orchestration.keypair_name
- else:
- self.keypair = self.create_keypair()
- self.keypair_name = self.keypair.id
-
- def launch_stack(self):
- net = self._get_default_network()
- self.parameters = {
- 'KeyName': self.keypair_name,
- 'InstanceType': CONF.orchestration.instance_type,
- 'ImageId': CONF.orchestration.image_ref,
- 'StackStart': str(time.time()),
- 'Subnet': net['subnets'][0]
- }
-
- # create the stack
- self.template = self._load_template(__file__, 'test_autoscaling.yaml')
- self.client.stacks.create(
- stack_name=self.stack_name,
- template=self.template,
- parameters=self.parameters)
-
- self.stack = self.client.stacks.get(self.stack_name)
- self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
-
- # if a keypair was set, do not delete the stack on exit to allow
- # for manual post-mortums
- if not CONF.orchestration.keypair_name:
- self.addCleanup(self.client.stacks.delete, self.stack)
-
- @test.skip_because(bug="1257575")
- @test.attr(type='slow')
- @test.services('orchestration', 'compute')
- def test_scale_up_then_down(self):
-
- self.assign_keypair()
- self.launch_stack()
-
- sid = self.stack_identifier
- timeout = CONF.orchestration.build_timeout
- interval = 10
-
- self.assertEqual('CREATE', self.stack.action)
- # wait for create to complete.
- self.status_timeout(self.client.stacks, sid, 'COMPLETE',
- error_status='FAILED')
-
- self.stack.get()
- self.assertEqual('CREATE_COMPLETE', self.stack.stack_status)
-
- # the resource SmokeServerGroup is implemented as a nested
- # stack, so servers can be counted by counting the resources
- # inside that nested stack
- resource = self.client.resources.get(sid, 'SmokeServerGroup')
- nested_stack_id = resource.physical_resource_id
-
- def server_count():
- # the number of servers is the number of resources
- # in the nested stack
- self.server_count = len(
- self.client.resources.list(nested_stack_id))
- return self.server_count
-
- def assertScale(from_servers, to_servers):
- test.call_until_true(lambda: server_count() == to_servers,
- timeout, interval)
- self.assertEqual(to_servers, self.server_count,
- 'Failed scaling from %d to %d servers. '
- 'Current server count: %s' % (
- from_servers, to_servers,
- self.server_count))
-
- # he marched them up to the top of the hill
- assertScale(1, 2)
- assertScale(2, 3)
-
- # and he marched them down again
- assertScale(3, 2)
- assertScale(2, 1)
-
- # delete stack on completion
- self.stack.delete()
- self.status_timeout(self.client.stacks, sid, 'COMPLETE',
- error_status='FAILED',
- not_found_exception=heat_exceptions.NotFound)
-
- try:
- self.stack.get()
- self.assertEqual('DELETE_COMPLETE', self.stack.stack_status)
- except heat_exceptions.NotFound:
- pass
diff --git a/tempest/scenario/orchestration/test_autoscaling.yaml b/tempest/scenario/orchestration/test_autoscaling.yaml
deleted file mode 100644
index 4651284..0000000
--- a/tempest/scenario/orchestration/test_autoscaling.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-HeatTemplateFormatVersion: '2012-12-12'
-Description: |
- Template which tests autoscaling and load balancing
-Parameters:
- KeyName:
- Type: String
- InstanceType:
- Type: String
- ImageId:
- Type: String
- Subnet:
- Type: String
- StackStart:
- Description: Epoch seconds when the stack was launched
- Type: Number
- ConsumeStartSeconds:
- Description: Seconds after invocation when memory should be consumed
- Type: Number
- Default: '60'
- ConsumeStopSeconds:
- Description: Seconds after StackStart when memory should be released
- Type: Number
- Default: '420'
- ScaleUpThreshold:
- Description: Memory percentage threshold to scale up on
- Type: String
- Default: '70'
- ScaleDownThreshold:
- Description: Memory percentage threshold to scale down on
- Type: String
- Default: '60'
- ConsumeMemoryLimit:
- Description: Memory percentage threshold to consume
- Type: Number
- Default: '71'
-Resources:
- SmokeServerGroup:
- Type: AWS::AutoScaling::AutoScalingGroup
- Properties:
- AvailabilityZones: {'Fn::GetAZs': ''}
- LaunchConfigurationName: {Ref: LaunchConfig}
- MinSize: '1'
- MaxSize: '3'
- VPCZoneIdentifier: [{Ref: Subnet}]
- SmokeServerScaleUpPolicy:
- Type: AWS::AutoScaling::ScalingPolicy
- Properties:
- AdjustmentType: ChangeInCapacity
- AutoScalingGroupName: {Ref: SmokeServerGroup}
- Cooldown: '60'
- ScalingAdjustment: '1'
- SmokeServerScaleDownPolicy:
- Type: AWS::AutoScaling::ScalingPolicy
- Properties:
- AdjustmentType: ChangeInCapacity
- AutoScalingGroupName: {Ref: SmokeServerGroup}
- Cooldown: '60'
- ScalingAdjustment: '-1'
- MEMAlarmHigh:
- Type: AWS::CloudWatch::Alarm
- Properties:
- AlarmDescription: Scale-up if MEM > ScaleUpThreshold% for 10 seconds
- MetricName: MemoryUtilization
- Namespace: system/linux
- Statistic: Average
- Period: '10'
- EvaluationPeriods: '1'
- Threshold: {Ref: ScaleUpThreshold}
- AlarmActions: [{Ref: SmokeServerScaleUpPolicy}]
- Dimensions:
- - Name: AutoScalingGroupName
- Value: {Ref: SmokeServerGroup}
- ComparisonOperator: GreaterThanThreshold
- MEMAlarmLow:
- Type: AWS::CloudWatch::Alarm
- Properties:
- AlarmDescription: Scale-down if MEM < ScaleDownThreshold% for 10 seconds
- MetricName: MemoryUtilization
- Namespace: system/linux
- Statistic: Average
- Period: '10'
- EvaluationPeriods: '1'
- Threshold: {Ref: ScaleDownThreshold}
- AlarmActions: [{Ref: SmokeServerScaleDownPolicy}]
- Dimensions:
- - Name: AutoScalingGroupName
- Value: {Ref: SmokeServerGroup}
- ComparisonOperator: LessThanThreshold
- CfnUser:
- Type: AWS::IAM::User
- SmokeKeys:
- Type: AWS::IAM::AccessKey
- Properties:
- UserName: {Ref: CfnUser}
- SmokeSecurityGroup:
- Type: AWS::EC2::SecurityGroup
- Properties:
- GroupDescription: Standard firewall rules
- SecurityGroupIngress:
- - {IpProtocol: tcp, FromPort: '22', ToPort: '22', CidrIp: 0.0.0.0/0}
- - {IpProtocol: tcp, FromPort: '80', ToPort: '80', CidrIp: 0.0.0.0/0}
- LaunchConfig:
- Type: AWS::AutoScaling::LaunchConfiguration
- Metadata:
- AWS::CloudFormation::Init:
- config:
- files:
- /etc/cfn/cfn-credentials:
- content:
- Fn::Replace:
- - $AWSAccessKeyId: {Ref: SmokeKeys}
- $AWSSecretKey: {'Fn::GetAtt': [SmokeKeys, SecretAccessKey]}
- - |
- AWSAccessKeyId=$AWSAccessKeyId
- AWSSecretKey=$AWSSecretKey
- mode: '000400'
- owner: root
- group: root
- /root/watch_loop:
- content:
- Fn::Replace:
- - _hi_: {Ref: MEMAlarmHigh}
- _lo_: {Ref: MEMAlarmLow}
- - |
- #!/bin/bash
- while :
- do
- /opt/aws/bin/cfn-push-stats --watch _hi_ --mem-util
- /opt/aws/bin/cfn-push-stats --watch _lo_ --mem-util
- sleep 4
- done
- mode: '000700'
- owner: root
- group: root
- /root/consume_memory:
- content:
- Fn::Replace:
- - StackStart: {Ref: StackStart}
- ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
- ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
- ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
- - |
- #!/usr/bin/env python
- import psutil
- import time
- import datetime
- import sys
- a = []
- sleep_until_consume = ConsumeStartSeconds
- stack_start = StackStart
- consume_stop_time = stack_start + ConsumeStopSeconds
- memory_limit = ConsumeMemoryLimit
- if sleep_until_consume > 0:
- sys.stdout.flush()
- time.sleep(sleep_until_consume)
- while psutil.virtual_memory().percent < memory_limit:
- sys.stdout.flush()
- a.append(' ' * 10**5)
- time.sleep(0.1)
- sleep_until_exit = consume_stop_time - time.time()
- if sleep_until_exit > 0:
- time.sleep(sleep_until_exit)
- mode: '000700'
- owner: root
- group: root
- Properties:
- ImageId: {Ref: ImageId}
- InstanceType: {Ref: InstanceType}
- KeyName: {Ref: KeyName}
- SecurityGroups: [{Ref: SmokeSecurityGroup}]
- UserData:
- Fn::Base64:
- Fn::Replace:
- - ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
- ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
- ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
- - |
- #!/bin/bash -v
- /opt/aws/bin/cfn-init
- # report on memory consumption every 4 seconds
- /root/watch_loop &
- # wait ConsumeStartSeconds then ramp up memory consumption
- # until it is over ConsumeMemoryLimit%
- # then exits ConsumeStopSeconds seconds after stack launch
- /root/consume_memory > /root/consume_memory.log &