Merge "Increase cooldown in test_asg_cooldown"
diff --git a/api/gabbits/resources.yaml b/api/gabbits/resources.yaml
index 164f4cb..41da444 100644
--- a/api/gabbits/resources.yaml
+++ b/api/gabbits/resources.yaml
@@ -82,7 +82,7 @@
- name: signal resource
POST: $LAST_URL/signal
- status: 200
+ status: 400
- name: delete stack with resources
DELETE: /stacks/$ENVIRON['PREFIX']-rsrcstack
diff --git a/common/test.py b/common/test.py
index 9944bd1..d43dece 100644
--- a/common/test.py
+++ b/common/test.py
@@ -529,6 +529,20 @@
stack_link = [l for l in r.links if l.get('rel') == 'stack'][0]
return stack_link['href'].split("/")[-1]
+ def get_physical_resource_id(self, stack_identifier, resource_name):
+ try:
+ resource = self.client.resources.get(
+ stack_identifier, resource_name)
+ return resource.physical_resource_id
+ except Exception:
+ raise Exception('Resource (%s) not found in stack (%s)!' %
+ (stack_identifier, resource_name))
+
+ def get_stack_output(self, stack_identifier, output_key,
+ validate_errors=True):
+ stack = self.client.stacks.get(stack_identifier)
+ return self._stack_output(stack, output_key, validate_errors)
+
def check_input_values(self, group_resources, key, value):
# Check inputs for deployment and derived config
for r in group_resources:
@@ -551,12 +565,13 @@
parameters=None, environment=None, tags=None,
expected_status='CREATE_COMPLETE',
disable_rollback=True, enable_cleanup=True,
- environment_files=None):
+ environment_files=None, timeout=None):
name = stack_name or self._stack_rand_name()
templ = template or self.template
templ_files = files or {}
params = parameters or {}
env = environment or {}
+ timeout_mins = timeout or self.conf.build_timeout
self.client.stacks.create(
stack_name=name,
template=templ,
@@ -565,7 +580,8 @@
parameters=params,
environment=env,
tags=tags,
- environment_files=environment_files
+ environment_files=environment_files,
+ timeout_mins=timeout_mins
)
if expected_status not in ['ROLLBACK_COMPLETE'] and enable_cleanup:
self.addCleanup(self._stack_delete, name)
diff --git a/functional/test_create_update.py b/functional/test_create_update.py
index ae81463..3d4ca76 100644
--- a/functional/test_create_update.py
+++ b/functional/test_create_update.py
@@ -14,10 +14,11 @@
import copy
import json
+from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
test_template_one_resource = {
- 'heat_template_version': '2013-05-23',
+ 'heat_template_version': 'pike',
'description': 'Test template to create one instance.',
'resources': {
'test1': {
@@ -36,7 +37,7 @@
}
test_template_two_resource = {
- 'heat_template_version': '2013-05-23',
+ 'heat_template_version': 'pike',
'description': 'Test template to create two instance.',
'resources': {
'test1': {
@@ -673,3 +674,37 @@
template=template,
expected_status='UPDATE_FAILED')
self._stack_delete(stack_identifier)
+
+ def test_stack_update_with_conditions(self):
+ """Update manages new conditions added.
+
+ When a new resource is added during updates, the stacks handles the new
+ conditions correctly, and doesn't fail to load them while the update is
+ still in progress.
+ """
+ stack_identifier = self.stack_create(
+ template=test_template_one_resource)
+
+ updated_template = copy.deepcopy(test_template_two_resource)
+ updated_template['conditions'] = {'cond1': True}
+ updated_template['resources']['test3'] = {
+ 'type': 'OS::Heat::TestResource',
+ 'properties': {
+ 'value': {'if': ['cond1', 'val3', 'val4']}
+ }
+ }
+ test2_props = updated_template['resources']['test2']['properties']
+ test2_props['action_wait_secs'] = {'create': 30}
+
+ self.update_stack(stack_identifier,
+ template=updated_template,
+ expected_status='UPDATE_IN_PROGRESS')
+
+ def check_resources():
+ resources = self.list_resources(stack_identifier)
+ if len(resources) < 2:
+ return False
+ self.assertIn('test3', resources)
+ return True
+
+ self.assertTrue(test.call_until_true(20, 2, check_resources))
diff --git a/functional/test_create_update_neutron_trunk.py b/functional/test_create_update_neutron_trunk.py
new file mode 100644
index 0000000..b5a108a
--- /dev/null
+++ b/functional/test_create_update_neutron_trunk.py
@@ -0,0 +1,275 @@
+# Copyright (c) 2017 Ericsson.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import yaml
+
+from heat_integrationtests.functional import functional_base
+
+
+test_template = '''
+heat_template_version: pike
+description: Test template to create, update, delete trunk.
+resources:
+ parent_net:
+ type: OS::Neutron::Net
+ trunk_net_one:
+ type: OS::Neutron::Net
+ trunk_net_two:
+ type: OS::Neutron::Net
+ parent_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: { get_resource: parent_net }
+ cidr: 10.0.0.0/16
+ trunk_subnet_one:
+ type: OS::Neutron::Subnet
+ properties:
+ network: { get_resource: trunk_net_one }
+ cidr: 10.10.0.0/16
+ trunk_subnet_two:
+ type: OS::Neutron::Subnet
+ properties:
+ network: { get_resource: trunk_net_two }
+ cidr: 10.20.0.0/16
+ parent_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: parent_net }
+ name: trunk_parent_port
+ sub_port_one:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: trunk_net_one }
+ name: trunk_sub_port_one
+ sub_port_two:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: trunk_net_two }
+ name: trunk_sub_port_two
+ trunk:
+ type: OS::Neutron::Trunk
+ properties:
+ name: test_trunk
+ port: { get_resource: parent_port }
+ sub_ports:
+outputs:
+ trunk_parent_port:
+ value: { get_attr: [trunk, port_id] }
+'''
+
+
+class UpdateTrunkTest(functional_base.FunctionalTestsBase):
+
+ @staticmethod
+ def _sub_ports_dict_to_set(sub_ports):
+ new_sub_ports = copy.deepcopy(sub_ports)
+
+ # NOTE(lajos katona): In the template we have to give the sub port as
+ # port, but from trunk_details we receive back them with port_id.
+ # As an extra trunk_details contains the mac_address as well which is
+ # useless here.
+ # So here we have to make sure that the dictionary (input from
+ # template or output from trunk_details) have the same keys:
+ if any('mac_address' in d for d in new_sub_ports):
+ for sp in new_sub_ports:
+ sp['port'] = sp['port_id']
+ del sp['port_id']
+ del sp['mac_address']
+
+ # NOTE(lajos katona): We receive lists (trunk_details['sub_ports'] and
+ # the input to the template) and we can't be sure that the order is the
+ # same, so by using sets we can compare them.
+ sub_ports_set = {frozenset(d.items()) for d in new_sub_ports}
+ return sub_ports_set
+
+ def test_add_first_sub_port(self):
+ stack_identifier = self.stack_create(template=test_template)
+
+ parsed_template = yaml.safe_load(test_template)
+ new_sub_port = [{'port': {'get_resource': 'sub_port_one'},
+ 'segmentation_id': 10,
+ 'segmentation_type': 'vlan'}]
+ parsed_template['resources']['trunk']['properties'][
+ 'sub_ports'] = new_sub_port
+ updated_template = yaml.safe_dump(parsed_template)
+ self.update_stack(stack_identifier, updated_template)
+
+ # Fix the port_id in the template for assertion
+ new_sub_port[0]['port'] = self.get_physical_resource_id(
+ stack_identifier, 'sub_port_one')
+ parent_id = self.get_stack_output(
+ stack_identifier, 'trunk_parent_port')
+ parent_port = self.network_client.show_port(parent_id)['port']
+ trunk_sub_port = parent_port['trunk_details']['sub_ports']
+
+ self.assertEqual(self._sub_ports_dict_to_set(new_sub_port),
+ self._sub_ports_dict_to_set(trunk_sub_port))
+
+ def test_add_a_second_sub_port(self):
+ parsed_template = yaml.safe_load(test_template)
+ sub_ports = [{'port': {'get_resource': 'sub_port_one'},
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 10}, ]
+ parsed_template['resources']['trunk']['properties'][
+ 'sub_ports'] = sub_ports
+ template_with_sub_ports = yaml.safe_dump(parsed_template)
+
+ stack_identifier = self.stack_create(template=template_with_sub_ports)
+
+ new_sub_port = {'port': {'get_resource': 'sub_port_two'},
+ 'segmentation_id': 20,
+ 'segmentation_type': 'vlan'}
+ parsed_template['resources']['trunk']['properties'][
+ 'sub_ports'].append(new_sub_port)
+
+ updated_template = yaml.safe_dump(parsed_template)
+
+ self.update_stack(stack_identifier, updated_template)
+
+ # Fix the port_ids in the templates for assertion
+ sub_ports[0]['port'] = self.get_physical_resource_id(
+ stack_identifier, 'sub_port_one')
+ new_sub_port['port'] = self.get_physical_resource_id(
+ stack_identifier, 'sub_port_two')
+ expected_sub_ports = [sub_ports[0], new_sub_port]
+
+ parent_id = self.get_stack_output(
+ stack_identifier, 'trunk_parent_port')
+ parent_port = self.network_client.show_port(parent_id)['port']
+ trunk_sub_ports = parent_port['trunk_details']['sub_ports']
+
+ self.assertEqual(self._sub_ports_dict_to_set(expected_sub_ports),
+ self._sub_ports_dict_to_set(trunk_sub_ports))
+
+ def test_remove_sub_port_from_trunk(self):
+ sub_ports = [{'port': {'get_resource': 'sub_port_one'},
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 10},
+ {'port': {'get_resource': 'sub_port_two'},
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 20}]
+ parsed_template = yaml.safe_load(test_template)
+ parsed_template['resources']['trunk']['properties'][
+ 'sub_ports'] = sub_ports
+ template_with_sub_ports = yaml.safe_dump(parsed_template)
+
+ stack_identifier = self.stack_create(template=template_with_sub_ports)
+
+ sub_port_to_be_removed = {'port': {'get_resource': 'sub_port_two'},
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 20}
+ parsed_template['resources']['trunk'][
+ 'properties']['sub_ports'].remove(sub_port_to_be_removed)
+ updated_template = yaml.safe_dump(parsed_template)
+
+ self.update_stack(stack_identifier, updated_template)
+
+ # Fix the port_ids in the templates for assertion
+ sub_ports[0]['port'] = self.get_physical_resource_id(
+ stack_identifier, 'sub_port_one')
+ expected_sub_ports = [sub_ports[0]]
+
+ parent_id = self.get_stack_output(
+ stack_identifier, 'trunk_parent_port')
+ parent_port = self.network_client.show_port(parent_id)['port']
+ trunk_sub_ports = parent_port['trunk_details']['sub_ports']
+
+ self.assertEqual(self._sub_ports_dict_to_set(expected_sub_ports),
+ self._sub_ports_dict_to_set(trunk_sub_ports))
+
+ def test_remove_last_sub_port_from_trunk(self):
+ sub_ports = [{'port': {'get_resource': 'sub_port_one'},
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 10}]
+ parsed_template = yaml.safe_load(test_template)
+ parsed_template['resources']['trunk']['properties'][
+ 'sub_ports'] = sub_ports
+
+ template_with_sub_ports = yaml.safe_dump(parsed_template)
+ stack_identifier = self.stack_create(template=template_with_sub_ports)
+
+ sub_port_to_be_removed = {'port': {'get_resource': 'sub_port_one'},
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 10}
+
+ parsed_template['resources']['trunk'][
+ 'properties']['sub_ports'] = []
+ updated_template = yaml.safe_dump(parsed_template)
+
+ self.update_stack(stack_identifier, updated_template)
+
+ sub_port_to_be_removed['port'] = self.get_physical_resource_id(
+ stack_identifier, 'sub_port_one')
+ parent_id = self.get_stack_output(
+ stack_identifier, 'trunk_parent_port')
+ parent_port = self.network_client.show_port(parent_id)['port']
+ trunk_sub_ports = parent_port['trunk_details']['sub_ports']
+
+ self.assertNotEqual(
+ self._sub_ports_dict_to_set([sub_port_to_be_removed]),
+ self._sub_ports_dict_to_set(trunk_sub_ports))
+ self.assertFalse(trunk_sub_ports,
+ 'The returned sub ports (%s) in trunk_details is '
+ 'not empty!' % trunk_sub_ports)
+
+ def test_update_existing_sub_port_on_trunk(self):
+ sub_ports = [{'port': {'get_resource': 'sub_port_one'},
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 10}]
+ parsed_template = yaml.safe_load(test_template)
+ parsed_template['resources']['trunk']['properties'][
+ 'sub_ports'] = sub_ports
+
+ template_with_sub_ports = yaml.safe_dump(parsed_template)
+ stack_identifier = self.stack_create(template=template_with_sub_ports)
+
+ sub_port_id = self.get_physical_resource_id(
+ stack_identifier, 'sub_port_one')
+ parsed_template['resources']['trunk']['properties']['sub_ports'][0][
+ 'segmentation_id'] = 99
+ updated_template = yaml.safe_dump(parsed_template)
+
+ self.update_stack(stack_identifier, updated_template)
+ updated_sub_port = {'port': sub_port_id,
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 99}
+ parent_id = self.get_stack_output(
+ stack_identifier, 'trunk_parent_port')
+ parent_port = self.network_client.show_port(parent_id)['port']
+ trunk_sub_ports = parent_port['trunk_details']['sub_ports']
+
+ self.assertEqual(self._sub_ports_dict_to_set([updated_sub_port]),
+ self._sub_ports_dict_to_set(trunk_sub_ports))
+
+ def test_update_trunk_name_and_description(self):
+ new_name = 'pineapple'
+ new_description = 'This is a test trunk'
+
+ stack_identifier = self.stack_create(template=test_template)
+ parsed_template = yaml.safe_load(test_template)
+ parsed_template['resources']['trunk']['properties']['name'] = new_name
+ parsed_template['resources']['trunk']['properties'][
+ 'description'] = new_description
+ updated_template = yaml.safe_dump(parsed_template)
+ self.update_stack(stack_identifier, template=updated_template)
+
+ parent_id = self.get_stack_output(
+ stack_identifier, 'trunk_parent_port')
+ parent_port = self.network_client.show_port(parent_id)['port']
+ trunk_id = parent_port['trunk_details']['trunk_id']
+
+ trunk = self.network_client.show_trunk(trunk_id)['trunk']
+ self.assertEqual(new_name, trunk['name'])
+ self.assertEqual(new_description, trunk['description'])
diff --git a/functional/test_nested_get_attr.py b/functional/test_nested_get_attr.py
new file mode 100644
index 0000000..fff89a4
--- /dev/null
+++ b/functional/test_nested_get_attr.py
@@ -0,0 +1,165 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Using nested get_attr functions isn't a good idea - in particular, this
+# actually working depends on correct dependencies between the two resources
+# whose attributes are being fetched, and these dependencies are non-local to
+# where the get_attr calls are used. Nevertheless, it did sort-of work, and
+# this test will help keep it that way.
+
+from heat_integrationtests.functional import functional_base
+
+
+initial_template = '''
+heat_template_version: ocata
+resources:
+ dict_resource:
+ type: OS::Heat::Value
+ properties:
+ value:
+ blarg: wibble
+ foo: bar
+ baz: quux
+ fred: barney
+ # These dependencies are required because we only want to read the
+ # attribute values for a given resource once, and therefore we do so in
+ # dependency order. This is necessarily true for a convergence traversal,
+ # but also happens when we're fetching the resource attributes e.g. to show
+ # the output values. The key1/key2 attribute values must be stored before
+ # we attempt to calculate the dep_attrs for dict_resource in order to
+ # correctly determine which attributes of dict_resource are used.
+ depends_on:
+ - key1
+ - key2
+ - indirect_key3_dep
+ key1:
+ type: OS::Heat::Value
+ properties:
+ value: blarg
+ key2:
+ type: OS::Heat::Value
+ properties:
+ value: foo
+ key3:
+ type: OS::Heat::Value
+ properties:
+ value: fred
+ value1:
+ type: OS::Heat::Value
+ properties:
+ value:
+ get_attr:
+ - dict_resource
+ - value
+ - {get_attr: [key1, value]}
+ indirect_key3_dep:
+ type: OS::Heat::Value
+ properties:
+ value: ignored
+ depends_on: key3
+outputs:
+ value1:
+ value: {get_attr: [value1, value]}
+ value2:
+ value: {get_attr: [dict_resource, value, {get_attr: [key2, value]}]}
+ value3:
+ value: {get_attr: [dict_resource, value, {get_attr: [key3, value]}]}
+'''
+
+update_template = '''
+heat_template_version: ocata
+resources:
+ dict_resource:
+ type: OS::Heat::Value
+ properties:
+ value:
+ blarg: wibble
+ foo: bar
+ baz: quux
+ fred: barney
+ depends_on:
+ - key1
+ - key2
+ - indirect_key3_dep
+ - key4
+ key1:
+ type: OS::Heat::Value
+ properties:
+ value: foo
+ key2:
+ type: OS::Heat::Value
+ properties:
+ value: fred
+ key3:
+ type: OS::Heat::Value
+ properties:
+ value: blarg
+ key4:
+ type: OS::Heat::Value
+ properties:
+ value: baz
+ value1:
+ type: OS::Heat::Value
+ properties:
+ value:
+ get_attr:
+ - dict_resource
+ - value
+ - {get_attr: [key1, value]}
+ value4:
+ type: OS::Heat::Value
+ properties:
+ value:
+ get_attr:
+ - dict_resource
+ - value
+ - {get_attr: [key4, value]}
+ indirect_key3_dep:
+ type: OS::Heat::Value
+ properties:
+ value: ignored
+ depends_on: key3
+outputs:
+ value1:
+ value: {get_attr: [value1, value]}
+ value2:
+ value: {get_attr: [dict_resource, value, {get_attr: [key2, value]}]}
+ value3:
+ value: {get_attr: [dict_resource, value, {get_attr: [key3, value]}]}
+ value4:
+ value: {get_attr: [value4, value]}
+'''
+
+
+class NestedGetAttrTest(functional_base.FunctionalTestsBase):
+ def assertOutput(self, value, stack_identifier, key):
+ op = self.client.stacks.output_show(stack_identifier, key)['output']
+ self.assertEqual(key, op['output_key'])
+ if 'output_error' in op:
+ raise Exception(op['output_error'])
+ self.assertEqual(value, op['output_value'])
+
+ def test_nested_get_attr_create(self):
+ stack_identifier = self.stack_create(template=initial_template)
+
+ self.assertOutput('wibble', stack_identifier, 'value1')
+ self.assertOutput('bar', stack_identifier, 'value2')
+ self.assertOutput('barney', stack_identifier, 'value3')
+
+ def test_nested_get_attr_update(self):
+ stack_identifier = self.stack_create(template=initial_template)
+ self.update_stack(stack_identifier, template=update_template)
+
+ self.assertOutput('bar', stack_identifier, 'value1')
+ self.assertOutput('barney', stack_identifier, 'value2')
+ self.assertOutput('wibble', stack_identifier, 'value3')
+ self.assertOutput('quux', stack_identifier, 'value4')
diff --git a/functional/test_preview.py b/functional/test_preview.py
index 4b9d77c..54f8a79 100644
--- a/functional/test_preview.py
+++ b/functional/test_preview.py
@@ -10,12 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from heat_integrationtests.common import test
+from heat_integrationtests.functional import functional_base
from heatclient import exc
import six
-class StackPreviewTest(test.HeatIntegrationTest):
+class StackPreviewTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2015-04-30
parameters:
diff --git a/functional/test_resource_group.py b/functional/test_resource_group.py
index 8d8cc89..3f47ca5 100644
--- a/functional/test_resource_group.py
+++ b/functional/test_resource_group.py
@@ -82,7 +82,9 @@
# Prove validation works for non-zero create/update
template_two_nested = self.template.replace("count: 0", "count: 2")
- expected_err = "Value 'BAD' is not an integer"
+ expected_err = ("resources.random_group<nested_stack>.resources."
+ "0<provider.yaml>.resources.random: : "
+ "Value 'BAD' is not an integer")
ex = self.assertRaises(exc.HTTPBadRequest, self.update_stack,
stack_identifier, template_two_nested,
environment=env, files=files)
diff --git a/functional/test_resources_list.py b/functional/test_resources_list.py
index 257afc5..f57cf67 100644
--- a/functional/test_resources_list.py
+++ b/functional/test_resources_list.py
@@ -41,3 +41,10 @@
filters={'name': 'test2'})
self.assertEqual('CREATE_COMPLETE', test2.resource_status)
+
+ def test_required_by(self):
+ stack_identifier = self.stack_create(template=test_template_depend)
+ [test1] = self.client.resources.list(stack_identifier,
+ filters={'name': 'test1'})
+
+ self.assertEqual(['test2'], test1.required_by)
diff --git a/functional/test_software_config.py b/functional/test_software_config.py
index 8a2e193..8c1cd53 100644
--- a/functional/test_software_config.py
+++ b/functional/test_software_config.py
@@ -22,6 +22,7 @@
from oslo_utils import timeutils
from heat_integrationtests.common import exceptions
+from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
@@ -101,6 +102,30 @@
for config_stack in config_stacks:
self._wait_for_stack_status(config_stack, 'CREATE_COMPLETE')
+ def test_deployments_timeout_failed(self):
+ parms = {'flavor': self.conf.minimal_instance_type,
+ 'network': self.conf.fixed_network_name,
+ 'image': self.conf.minimal_image_ref}
+ stack_identifier = self.stack_create(
+ parameters=parms,
+ template=self.server_template,
+ enable_cleanup=self.enable_cleanup)
+ server_stack = self.client.stacks.get(stack_identifier)
+ server = server_stack.outputs[0]['output_value']
+ config_stack = self.deploy_config(server, 3, 1)
+ self._wait_for_stack_status(config_stack, 'CREATE_FAILED')
+ kwargs = {'server_id': server}
+
+ def check_deployment_status():
+ sd_list = self.client.software_deployments.list(**kwargs)
+ for sd in sd_list:
+ if sd.status != 'FAILED':
+ return False
+ return True
+
+ self.assertTrue(test.call_until_true(
+ 20, 0, check_deployment_status))
+
def deploy_many_configs(self, stack, server, config_stacks,
stack_count, deploys_per_stack,
deploy_count_start):
@@ -112,7 +137,7 @@
self.wait_for_deploy_metadata_set(stack, new_count)
return new_count
- def deploy_config(self, server, deploy_count):
+ def deploy_config(self, server, deploy_count, timeout=None):
parms = {'server': server}
template = yaml.safe_load(self.config_template)
resources = template['resources']
@@ -123,7 +148,8 @@
parameters=parms,
template=template,
enable_cleanup=self.enable_cleanup,
- expected_status=None)
+ expected_status=None,
+ timeout=timeout)
def wait_for_deploy_metadata_set(self, stack, deploy_count):
build_timeout = self.conf.build_timeout
diff --git a/functional/test_software_deployment_group.py b/functional/test_software_deployment_group.py
index 4e8b868..22143dc 100644
--- a/functional/test_software_deployment_group.py
+++ b/functional/test_software_deployment_group.py
@@ -84,8 +84,14 @@
expected_status='CREATE_IN_PROGRESS')
self._wait_for_resource_status(
stack_identifier, 'deployment', 'CREATE_IN_PROGRESS')
+
+ # Wait for all deployment resources to become IN_PROGRESS, since only
+ # IN_PROGRESS resources get signalled
nested_identifier = self.assert_resource_is_a_stack(
stack_identifier, 'deployment')
+ self._wait_for_stack_status(nested_identifier, 'CREATE_IN_PROGRESS')
+ self._wait_for_all_resource_status(nested_identifier,
+ 'CREATE_IN_PROGRESS')
group_resources = self.list_group_resources(
stack_identifier, 'deployment', minimal=False)
@@ -94,7 +100,10 @@
signal_required=True,
resources_to_signal=group_resources)
- self.check_input_values(group_resources, 'foo', 'foo_input')
+ created_group_resources = self.list_group_resources(
+ stack_identifier, 'deployment', minimal=False)
+ self.assertEqual(4, len(created_group_resources))
+ self.check_input_values(created_group_resources, 'foo', 'foo_input')
self.update_stack(stack_identifier,
template=template,
@@ -102,12 +111,11 @@
expected_status='UPDATE_IN_PROGRESS')
nested_identifier = self.assert_resource_is_a_stack(
stack_identifier, 'deployment')
- self.assertEqual(4, len(group_resources))
self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE',
signal_required=True,
resources_to_signal=group_resources)
- self.check_input_values(group_resources, 'foo', 'input2')
+ self.check_input_values(created_group_resources, 'foo', 'input2')
# We explicitly test delete here, vs just via cleanup and check
# the nested stack is gone
diff --git a/functional/test_stack_outputs.py b/functional/test_stack_outputs.py
index 536e589..161e0b3 100644
--- a/functional/test_stack_outputs.py
+++ b/functional/test_stack_outputs.py
@@ -61,3 +61,41 @@
stack_identifier, 'resource_output_b')['output']
self.assertEqual(expected_output_a, actual_output_a)
self.assertEqual(expected_output_b, actual_output_b)
+
+ before_template = '''
+heat_template_version: 2015-10-15
+resources:
+ test_resource_a:
+ type: OS::Heat::TestResource
+ properties:
+ value: 'foo'
+outputs:
+'''
+
+ after_template = '''
+heat_template_version: 2015-10-15
+resources:
+ test_resource_a:
+ type: OS::Heat::TestResource
+ properties:
+ value: 'foo'
+ test_resource_b:
+ type: OS::Heat::TestResource
+ properties:
+ value: {get_attr: [test_resource_a, output]}
+outputs:
+ output_value:
+ description: 'Output of resource b'
+ value: {get_attr: [test_resource_b, output]}
+'''
+
+ def test_outputs_update_new_resource(self):
+ stack_identifier = self.stack_create(template=self.before_template)
+ self.update_stack(stack_identifier, template=self.after_template)
+
+ expected_output_value = {
+ u'output_value': u'foo', u'output_key': u'output_value',
+ u'description': u'Output of resource b'}
+ actual_output_value = self.client.stacks.output_show(
+ stack_identifier, 'output_value')['output']
+ self.assertEqual(expected_output_value, actual_output_value)
diff --git a/functional/test_template_resource.py b/functional/test_template_resource.py
index ef2f4f5..c05912c 100644
--- a/functional/test_template_resource.py
+++ b/functional/test_template_resource.py
@@ -740,7 +740,7 @@
self.stack_resume(stack_identifier=stack_identifier)
-class ValidateFacadeTest(test.HeatIntegrationTest):
+class ValidateFacadeTest(functional_base.FunctionalTestsBase):
"""Prove that nested stack errors don't suck."""
template = '''
diff --git a/scenario/templates/test_aodh_alarm.yaml b/scenario/templates/test_aodh_alarm.yaml
index 9218f56..d4c9745 100644
--- a/scenario/templates/test_aodh_alarm.yaml
+++ b/scenario/templates/test_aodh_alarm.yaml
@@ -25,7 +25,10 @@
period: 60
evaluation_periods: 1
alarm_actions:
- - {get_attr: [scaleup_policy, alarm_url]}
+ - str_replace:
+ template: trust+url
+ params:
+ url: {get_attr: [scaleup_policy, signal_url]}
matching_metadata:
metadata.metering.stack_id: {get_param: "OS::stack_id"}
outputs:
diff --git a/scenario/templates/test_base_resources.yaml b/scenario/templates/test_base_resources.yaml
new file mode 100644
index 0000000..bff6185
--- /dev/null
+++ b/scenario/templates/test_base_resources.yaml
@@ -0,0 +1,110 @@
+heat_template_version: 2014-10-16
+
+description: >
+ This HOT template that just defines a single server.
+ Contains just base features to verify base heat support.
+
+parameters:
+ key_name:
+ type: string
+ default: key-01
+ description: Name of an existing key pair to use for the server
+ flavor:
+ type: string
+ description: Flavor for the server to be created
+ default: m1.small
+ constraints:
+ - custom_constraint: nova.flavor
+ image:
+ type: string
+ description: Image ID or image name to use for the server
+ constraints:
+ - custom_constraint: glance.image
+ vol_size:
+ type: number
+ description: The size of the Cinder volume
+ default: 1
+ private_net_name:
+ type: string
+ default: private-net-01
+ description: Name of private network to be created
+ private_net_cidr:
+ type: string
+ default: 192.168.101.0/24
+ description: Private network address (CIDR notation)
+ private_net_gateway:
+ type: string
+ default: 192.168.101.1
+ description: Private network gateway address
+ private_net_pool_start:
+ type: string
+ default: 192.168.101.2
+ description: Start of private network IP address allocation pool
+ private_net_pool_end:
+ type: string
+ default: 192.168.101.127
+ description: End of private network IP address allocation pool
+ echo_foo:
+ default: fooooo
+ type: string
+
+resources:
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: private_net_name }
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: { get_param: private_net_cidr }
+ gateway_ip: { get_param: private_net_gateway }
+ allocation_pools:
+ - start: { get_param: private_net_pool_start }
+ end: { get_param: private_net_pool_end }
+
+ server_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+
+ key:
+ type: OS::Nova::KeyPair
+ properties:
+ name: { get_param: key_name }
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ key_name: { get_resource: key }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ networks:
+ - port: { get_resource: server_port }
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/bash
+ echo echo_foo
+ params:
+ echo_foo: { get_param: echo_foo }
+
+ vol:
+ type: OS::Cinder::Volume
+ properties:
+ size: { get_param: vol_size }
+
+ vol_att:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ instance_uuid: { get_resource: server }
+ volume_id: { get_resource: vol }
+ mountpoint: /dev/vdb
+
+outputs:
+ server_networks:
+ description: The networks of the deployed server
+ value: { get_attr: [server, networks] }
diff --git a/scenario/test_base_resources.py b/scenario/test_base_resources.py
new file mode 100644
index 0000000..80194a0
--- /dev/null
+++ b/scenario/test_base_resources.py
@@ -0,0 +1,73 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.common import test
+from heat_integrationtests.scenario import scenario_base
+from heatclient.common import template_utils
+
+
+class BasicResourcesTest(scenario_base.ScenarioTestsBase):
+
+ def setUp(self):
+ super(BasicResourcesTest, self).setUp()
+ if not self.conf.image_ref:
+ raise self.skipException("No image configured to test")
+ if not self.conf.instance_type:
+ raise self.skipException("No flavor configured to test")
+
+ def check_stack(self):
+ sid = self.stack_identifier
+ # Check that stack were created
+ self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
+ server_resource = self.client.resources.get(sid, 'server')
+ server_id = server_resource.physical_resource_id
+ server = self.compute_client.servers.get(server_id)
+ self.assertEqual(server.id, server_id)
+
+ stack = self.client.stacks.get(sid)
+
+ server_networks = self._stack_output(stack, 'server_networks')
+ self.assertIn(self.private_net_name, server_networks)
+
+ def test_base_resources_integration(self):
+ """Define test for base resources interation from core porjects
+
+ The alternative scenario is the following:
+ 1. Create a stack with basic resources from core projects.
+ 2. Check that all stack resources are created successfully.
+ 3. Wait for deployment.
+ 4. Check that stack was created.
+ 5. Check stack outputs.
+ """
+
+ self.private_net_name = test.rand_name('heat-net')
+ parameters = {
+ 'key_name': test.rand_name('heat-key'),
+ 'flavor': self.conf.instance_type,
+ 'image': self.conf.image_ref,
+ 'vol_size': self.conf.volume_size,
+ 'private_net_name': self.private_net_name
+ }
+
+ env_files, env = template_utils.process_environment_and_files(
+ self.conf.boot_config_env)
+
+ # Launch stack
+ self.stack_identifier = self.launch_stack(
+ template_name='test_base_resources.yaml',
+ parameters=parameters,
+ expected_status=None,
+ environment=env
+ )
+
+ # Check stack
+ self.check_stack()