Merge "Do not set the mime-type for userdata"
diff --git a/common/clients.py b/common/clients.py
index 96a34b7..afdc477 100644
--- a/common/clients.py
+++ b/common/clients.py
@@ -123,6 +123,8 @@
password=self._password())
def _get_identity_client(self):
+ user_domain_id = self.conf.user_domain_id
+ project_domain_id = self.conf.project_domain_id
user_domain_name = self.conf.user_domain_name
project_domain_name = self.conf.project_domain_name
kwargs = {
@@ -134,6 +136,8 @@
# keystone v2 can't ignore domain details
if self.auth_version == '3':
kwargs.update({
+ 'user_domain_id': user_domain_id,
+ 'project_domain_id': project_domain_id,
'user_domain_name': user_domain_name,
'project_domain_name': project_domain_name})
auth = password.Password(**kwargs)
diff --git a/common/config.py b/common/config.py
index 3cef5aa..4aa7e67 100644
--- a/common/config.py
+++ b/common/config.py
@@ -53,6 +53,12 @@
cfg.StrOpt('project_domain_name',
help="Project domain name, if keystone v3 auth_url"
"is used"),
+ cfg.StrOpt('user_domain_id',
+ help="User domain id, if keystone v3 auth_url"
+ "is used"),
+ cfg.StrOpt('project_domain_id',
+ help="Project domain id, if keystone v3 auth_url"
+ "is used"),
cfg.StrOpt('region',
help="The region name to use"),
cfg.StrOpt('instance_type',
diff --git a/functional/test_cancel_update.py b/functional/test_cancel_update.py
index f6ddc07..bfeeda6 100644
--- a/functional/test_cancel_update.py
+++ b/functional/test_cancel_update.py
@@ -53,7 +53,7 @@
stack_identifier = self.stack_create(template=self.template,
parameters=parameters)
- parameters['InstanceType'] = 'm1.large'
+ parameters['InstanceType'] = self.conf.instance_type
self.update_stack(stack_identifier, self.template,
parameters=parameters,
expected_status='UPDATE_IN_PROGRESS')
diff --git a/functional/test_conditional_exposure.py b/functional/test_conditional_exposure.py
index bf6cc47..69cff79 100644
--- a/functional/test_conditional_exposure.py
+++ b/functional/test_conditional_exposure.py
@@ -23,13 +23,16 @@
unavailable_service = 'Sahara'
unavailable_template = """
heat_template_version: 2015-10-15
+parameters:
+ instance_type:
+ type: string
resources:
not_available:
type: OS::Sahara::NodeGroupTemplate
properties:
plugin_name: fake
hadoop_version: 0.1
- flavor: m1.large
+ flavor: {get_param: instance_type}
node_processes: []
"""
@@ -56,9 +59,11 @@
def test_unavailable_resources_not_created(self):
stack_name = self._stack_rand_name()
+ parameters = {'instance_type': self.conf.minimal_instance_type}
ex = self.assertRaises(exc.HTTPBadRequest,
self.client.stacks.create,
stack_name=stack_name,
+ parameters=parameters,
template=self.unavailable_template)
self.assertIn('ResourceTypeUnavailable', ex.message)
self.assertIn('OS::Sahara::NodeGroupTemplate', ex.message)
diff --git a/functional/test_event_sinks.py b/functional/test_event_sinks.py
index e4a23ff..ea66b7d 100644
--- a/functional/test_event_sinks.py
+++ b/functional/test_event_sinks.py
@@ -43,7 +43,11 @@
'os_username': self.conf.username,
'os_password': self.conf.password,
'os_project_name': self.conf.tenant_name,
- 'os_auth_url': self.conf.auth_url
+ 'os_auth_url': self.conf.auth_url,
+ 'os_user_domain_id': self.conf.user_domain_id,
+ 'os_project_domain_id': self.conf.project_domain_id,
+ 'os_user_domain_name': self.conf.user_domain_name,
+ 'os_project_domain_name': self.conf.project_domain_name
}
}
}
diff --git a/functional/test_heat_autoscaling.py b/functional/test_heat_autoscaling.py
index d41e203..25ceb1c 100644
--- a/functional/test_heat_autoscaling.py
+++ b/functional/test_heat_autoscaling.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
@@ -21,20 +22,38 @@
random_group:
type: OS::Heat::AutoScalingGroup
properties:
- max_size: 10
- min_size: 10
+ cooldown: 0
+ desired_capacity: 3
+ max_size: 5
+ min_size: 2
resource:
type: OS::Heat::RandomString
+ scale_up_policy:
+ type: OS::Heat::ScalingPolicy
+ properties:
+ adjustment_type: change_in_capacity
+ auto_scaling_group_id: { get_resource: random_group }
+ scaling_adjustment: 1
+
+ scale_down_policy:
+ type: OS::Heat::ScalingPolicy
+ properties:
+ adjustment_type: change_in_capacity
+ auto_scaling_group_id: { get_resource: random_group }
+ scaling_adjustment: -1
+
outputs:
all_values:
value: {get_attr: [random_group, outputs_list, value]}
value_0:
value: {get_attr: [random_group, resource.0.value]}
- value_5:
- value: {get_attr: [random_group, resource.5.value]}
- value_9:
- value: {get_attr: [random_group, resource.9.value]}
+ value_1:
+ value: {get_attr: [random_group, resource.1.value]}
+ value_2:
+ value: {get_attr: [random_group, resource.2.value]}
+ asg_size:
+ value: {get_attr: [random_group, current_size]}
'''
template_nested = '''
@@ -44,8 +63,9 @@
random_group:
type: OS::Heat::AutoScalingGroup
properties:
- max_size: 10
- min_size: 10
+ desired_capacity: 3
+ max_size: 5
+ min_size: 2
resource:
type: randomstr.yaml
@@ -54,10 +74,10 @@
value: {get_attr: [random_group, outputs_list, random_str]}
value_0:
value: {get_attr: [random_group, resource.0.random_str]}
- value_5:
- value: {get_attr: [random_group, resource.5.random_str]}
- value_9:
- value: {get_attr: [random_group, resource.9.random_str]}
+ value_1:
+ value: {get_attr: [random_group, resource.1.random_str]}
+ value_2:
+ value: {get_attr: [random_group, resource.2.random_str]}
'''
template_randomstr = '''
@@ -75,14 +95,74 @@
def _assert_output_values(self, stack_id):
stack = self.client.stacks.get(stack_id)
all_values = self._stack_output(stack, 'all_values')
- self.assertEqual(10, len(all_values))
+ self.assertEqual(3, len(all_values))
self.assertEqual(all_values[0], self._stack_output(stack, 'value_0'))
- self.assertEqual(all_values[5], self._stack_output(stack, 'value_5'))
- self.assertEqual(all_values[9], self._stack_output(stack, 'value_9'))
+ self.assertEqual(all_values[1], self._stack_output(stack, 'value_1'))
+ self.assertEqual(all_values[2], self._stack_output(stack, 'value_2'))
+
+ def test_asg_scale_up_max_size(self):
+ stack_id = self.stack_create(template=self.template,
+ expected_status='CREATE_COMPLETE')
+ stack = self.client.stacks.get(stack_id)
+ asg_size = self._stack_output(stack, 'asg_size')
+ # Ensure that initial desired capacity is met
+ self.assertEqual(3, asg_size)
+
+ # send scale up signals and ensure that asg honors max_size
+ asg = self.client.resources.get(stack_id, 'random_group')
+ max_size = 5
+ for num in range(asg_size+1, max_size+2):
+ expected_resources = num if num <= max_size else max_size
+ self.client.resources.signal(stack_id, 'scale_up_policy')
+ test.call_until_true(self.conf.build_timeout,
+ self.conf.build_interval,
+ self.check_autoscale_complete,
+ asg.physical_resource_id, expected_resources)
+
+ def test_asg_scale_down_min_size(self):
+ stack_id = self.stack_create(template=self.template,
+ expected_status='CREATE_COMPLETE')
+ stack = self.client.stacks.get(stack_id)
+ asg_size = self._stack_output(stack, 'asg_size')
+ # Ensure that initial desired capacity is met
+ self.assertEqual(3, asg_size)
+
+ # send scale down signals and ensure that asg honors min_size
+ asg = self.client.resources.get(stack_id, 'random_group')
+ min_size = 2
+ for num in range(asg_size-1, 0, -1):
+ expected_resources = num if num >= min_size else min_size
+ self.client.resources.signal(stack_id, 'scale_down_policy')
+ test.call_until_true(self.conf.build_timeout,
+ self.conf.build_interval,
+ self.check_autoscale_complete,
+ asg.physical_resource_id, expected_resources)
+
+ def test_asg_cooldown(self):
+ cooldown_tmpl = self.template.replace('cooldown: 0',
+ 'cooldown: 10')
+ stack_id = self.stack_create(template=cooldown_tmpl,
+ expected_status='CREATE_COMPLETE')
+ stack = self.client.stacks.get(stack_id)
+ asg_size = self._stack_output(stack, 'asg_size')
+ # Ensure that initial desired capacity is met
+ self.assertEqual(3, asg_size)
+
+ # send scale up signal.
+ # Since cooldown is in effect, number of resources should not change
+ asg = self.client.resources.get(stack_id, 'random_group')
+ expected_resources = 3
+ self.client.resources.signal(stack_id, 'scale_up_policy')
+ test.call_until_true(self.conf.build_timeout,
+ self.conf.build_interval,
+ self.check_autoscale_complete,
+ asg.physical_resource_id, expected_resources)
def test_path_attrs(self):
stack_id = self.stack_create(template=self.template)
- expected_resources = {'random_group': 'OS::Heat::AutoScalingGroup'}
+ expected_resources = {'random_group': 'OS::Heat::AutoScalingGroup',
+ 'scale_up_policy': 'OS::Heat::ScalingPolicy',
+ 'scale_down_policy': 'OS::Heat::ScalingPolicy'}
self.assertEqual(expected_resources, self.list_resources(stack_id))
self._assert_output_values(stack_id)
diff --git a/scenario/templates/test_volumes_create_from_backup.yaml b/scenario/templates/test_volumes_create_from_backup.yaml
index d6eadd1..ab1edf8 100644
--- a/scenario/templates/test_volumes_create_from_backup.yaml
+++ b/scenario/templates/test_volumes_create_from_backup.yaml
@@ -8,7 +8,6 @@
instance_type:
type: string
description: Type of the instance to be created.
- default: m1.small
image_id:
type: string
diff --git a/scenario/templates/test_volumes_delete_snapshot.yaml b/scenario/templates/test_volumes_delete_snapshot.yaml
index 08f84f1..3893b52 100644
--- a/scenario/templates/test_volumes_delete_snapshot.yaml
+++ b/scenario/templates/test_volumes_delete_snapshot.yaml
@@ -8,7 +8,6 @@
instance_type:
type: string
description: Type of the instance to be created.
- default: m1.small
image_id:
type: string
diff --git a/scenario/test_volumes.py b/scenario/test_volumes.py
index 603c8f2..47e583d 100644
--- a/scenario/test_volumes.py
+++ b/scenario/test_volumes.py
@@ -46,7 +46,7 @@
self.assertEqual(self.volume_description,
self._stack_output(stack, 'display_description'))
- def check_stack(self, stack_id):
+ def check_stack(self, stack_id, parameters):
stack = self.client.stacks.get(stack_id)
# Verify with cinder that the volume exists, with matching details
@@ -75,6 +75,7 @@
try:
stack_identifier2 = self.launch_stack(
template_name='test_volumes_create_from_backup.yaml',
+ parameters=parameters,
add_parameters={'backup_id': backup.id})
stack2 = self.client.stacks.get(stack_identifier2)
except exceptions.StackBuildErrorException:
@@ -125,4 +126,4 @@
)
# Check stack
- self.check_stack(stack_id)
+ self.check_stack(stack_id, parameters)