Merge "Functional test for asg max_size and min_size"
diff --git a/common/clients.py b/common/clients.py
index 8913595..afdc477 100644
--- a/common/clients.py
+++ b/common/clients.py
@@ -16,9 +16,9 @@
from cinderclient import client as cinder_client
from heat.common.i18n import _
from heatclient import client as heat_client
-from keystoneclient.auth.identity.generic import password
-from keystoneclient import exceptions as kc_exceptions
-from keystoneclient import session
+from keystoneauth1 import exceptions as kc_exceptions
+from keystoneauth1.identity.generic import password
+from keystoneauth1 import session
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from swiftclient import client as swift_client
@@ -51,10 +51,7 @@
def get_endpoint_url(self, service_type, region=None):
kwargs = {
'service_type': service_type,
- 'endpoint_type': 'publicURL'}
- if region:
- kwargs.update({'attr': 'region',
- 'filter_value': region})
+ 'region_name': region}
return self.auth_ref.service_catalog.url_for(**kwargs)
@@ -67,20 +64,21 @@
CINDERCLIENT_VERSION = '2'
HEATCLIENT_VERSION = '1'
- NOVACLIENT_VERSION = '2'
+ NOVA_API_VERSION = '2.1'
CEILOMETER_VERSION = '2'
- def __init__(self, conf):
+ def __init__(self, conf, admin_credentials=False):
self.conf = conf
+ self.admin_credentials = admin_credentials
+
if self.conf.auth_url.find('/v'):
- self.v2_auth_url = self.conf.auth_url.replace('/v3', '/v2.0')
self.auth_version = self.conf.auth_url.split('/v')[1]
else:
raise ValueError(_('Incorrectly specified auth_url config: no '
'version found.'))
-
self.insecure = self.conf.disable_ssl_certificate_validation
self.ca_file = self.conf.ca_file
+
self.identity_client = self._get_identity_client()
self.orchestration_client = self._get_orchestration_client()
self.compute_client = self._get_compute_client()
@@ -89,6 +87,21 @@
self.object_client = self._get_object_client()
self.metering_client = self._get_metering_client()
+ def _username(self):
+ if self.admin_credentials:
+ return self.conf.admin_username
+ return self.conf.username
+
+ def _password(self):
+ if self.admin_credentials:
+ return self.conf.admin_password
+ return self.conf.password
+
+ def _tenant_name(self):
+ if self.admin_credentials:
+ return self.conf.admin_tenant_name
+ return self.conf.tenant_name
+
def _get_orchestration_client(self):
endpoint = os.environ.get('HEAT_URL')
if os.environ.get('OS_NO_CLIENT_AUTH') == 'True':
@@ -106,21 +119,25 @@
self.HEATCLIENT_VERSION,
endpoint,
token=token,
- username=self.conf.username,
- password=self.conf.password)
+ username=self._username(),
+ password=self._password())
def _get_identity_client(self):
+ user_domain_id = self.conf.user_domain_id
+ project_domain_id = self.conf.project_domain_id
user_domain_name = self.conf.user_domain_name
project_domain_name = self.conf.project_domain_name
kwargs = {
- 'username': self.conf.username,
- 'password': self.conf.password,
- 'tenant_name': self.conf.tenant_name,
+ 'username': self._username(),
+ 'password': self._password(),
+ 'tenant_name': self._tenant_name(),
'auth_url': self.conf.auth_url
}
# keystone v2 can't ignore domain details
if self.auth_version == '3':
kwargs.update({
+ 'user_domain_id': user_domain_id,
+ 'project_domain_id': project_domain_id,
'user_domain_name': user_domain_name,
'project_domain_name': project_domain_name})
auth = password.Password(**kwargs)
@@ -134,19 +151,10 @@
def _get_compute_client(self):
region = self.conf.region
-
- client_args = (
- self.conf.username,
- self.conf.password,
- self.conf.tenant_name,
- # novaclient can not use v3 url
- self.v2_auth_url
- )
-
# Create our default Nova client to use in testing
return nova_client.Client(
- self.NOVACLIENT_VERSION,
- *client_args,
+ self.NOVA_API_VERSION,
+ session=self.identity_client.session,
service_type='compute',
endpoint_type='publicURL',
region_name=region,
@@ -158,12 +166,8 @@
def _get_network_client(self):
return neutron_client.Client(
- username=self.conf.username,
- password=self.conf.password,
- tenant_name=self.conf.tenant_name,
+ session=self.identity_client.session,
endpoint_type='publicURL',
- # neutronclient can not use v3 url
- auth_url=self.v2_auth_url,
insecure=self.insecure,
ca_cert=self.ca_file)
@@ -172,11 +176,7 @@
endpoint_type = 'publicURL'
return cinder_client.Client(
self.CINDERCLIENT_VERSION,
- self.conf.username,
- self.conf.password,
- self.conf.tenant_name,
- # cinderclient can not use v3 url
- self.v2_auth_url,
+ session=self.identity_client.session,
region_name=region,
endpoint_type=endpoint_type,
insecure=self.insecure,
@@ -184,10 +184,11 @@
http_log_debug=True)
def _get_object_client(self):
+ # swiftclient does not support keystone sessions yet
args = {
'auth_version': self.auth_version,
- 'tenant_name': self.conf.tenant_name,
- 'user': self.conf.username,
+ 'tenant_name': self._tenant_name(),
+ 'user': self._username(),
'key': self.conf.password,
'authurl': self.conf.auth_url,
'os_options': {'endpoint_type': 'publicURL'},
@@ -197,8 +198,6 @@
return swift_client.Connection(**args)
def _get_metering_client(self):
- user_domain_name = self.conf.user_domain_name
- project_domain_name = self.conf.project_domain_name
try:
endpoint = self.identity_client.get_endpoint_url('metering',
self.conf.region)
@@ -206,22 +205,12 @@
return None
else:
args = {
- 'username': self.conf.username,
- 'password': self.conf.password,
- 'tenant_name': self.conf.tenant_name,
- 'auth_url': self.conf.auth_url,
+ 'session': self.identity_client.session,
'insecure': self.insecure,
'cacert': self.ca_file,
'region_name': self.conf.region,
'endpoint_type': 'publicURL',
'service_type': 'metering',
}
- # ceilometerclient can't ignore domain details for
- # v2 auth_url
- if self.auth_version == '3':
- args.update(
- {'user_domain_name': user_domain_name,
- 'project_domain_name': project_domain_name})
-
return ceilometer_client.Client(self.CEILOMETER_VERSION,
endpoint, **args)
diff --git a/common/config.py b/common/config.py
index e99d034..4aa7e67 100644
--- a/common/config.py
+++ b/common/config.py
@@ -10,20 +10,29 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-
from oslo_config import cfg
-import heat_integrationtests
+CONF = None
+service_available_group = cfg.OptGroup(name="service_available",
+ title="Available OpenStack Services")
-IntegrationTestGroup = [
+ServiceAvailableGroup = [
+ cfg.BoolOpt("heat_plugin",
+ default=True,
+ help="Whether or not heat is expected to be available"),
+]
+heat_group = cfg.OptGroup(name="heat_plugin",
+ title="Heat Service Options")
+
+HeatGroup = [
+ cfg.StrOpt("catalog_type",
+ default="orchestration",
+ help="Catalog type of the orchestration service."),
cfg.StrOpt('username',
- default=os.environ.get('OS_USERNAME'),
help="Username to use for non admin API requests."),
cfg.StrOpt('password',
- default=os.environ.get('OS_PASSWORD'),
help="Non admin API key to use when authenticating.",
secret=True),
cfg.StrOpt('admin_username',
@@ -32,22 +41,25 @@
help="Admin API key to use when authentication.",
secret=True),
cfg.StrOpt('tenant_name',
- default=(os.environ.get('OS_PROJECT_NAME') or
- os.environ.get('OS_TENANT_NAME')),
help="Tenant name to use for API requests."),
+ cfg.StrOpt('admin_tenant_name',
+ default='admin',
+ help="Admin tenant name to use for admin API requests."),
cfg.StrOpt('auth_url',
- default=os.environ.get('OS_AUTH_URL'),
help="Full URI of the OpenStack Identity API (Keystone)"),
cfg.StrOpt('user_domain_name',
- default=os.environ.get('OS_USER_DOMAIN_NAME'),
help="User domain name, if keystone v3 auth_url"
"is used"),
cfg.StrOpt('project_domain_name',
- default=os.environ.get('OS_PROJECT_DOMAIN_NAME'),
help="Project domain name, if keystone v3 auth_url"
"is used"),
+ cfg.StrOpt('user_domain_id',
+ help="User domain id, if keystone v3 auth_url"
+ "is used"),
+ cfg.StrOpt('project_domain_id',
+ help="Project domain id, if keystone v3 auth_url"
+ "is used"),
cfg.StrOpt('region',
- default=os.environ.get('OS_REGION_NAME'),
help="The region name to use"),
cfg.StrOpt('instance_type',
help="Instance type for tests. Needs to be big enough for a "
@@ -65,7 +77,6 @@
default=False,
help="Set to True if using self-signed SSL certificates."),
cfg.StrOpt('ca_file',
- default=None,
help="CA certificate to pass for servers that have "
"https endpoint."),
cfg.IntOpt('build_interval',
@@ -119,7 +130,7 @@
cfg.ListOpt('skip_scenario_test_list',
help="List of scenario test class or class.method "
"names to skip ex. NeutronLoadBalancerTest, "
- "CeilometerAlarmTest.test_alarm"),
+ "AodhAlarmTest.test_alarm"),
cfg.ListOpt('skip_test_stack_action_list',
help="List of stack actions in tests to skip "
"ex. ABANDON, ADOPT, SUSPEND, RESUME"),
@@ -131,7 +142,7 @@
help="Timeout in seconds to wait for connectivity to "
"server."),
cfg.IntOpt('sighup_timeout',
- default=30,
+ default=120,
help="Timeout in seconds to wait for adding or removing child"
"process after receiving of sighup signal"),
cfg.IntOpt('sighup_config_edit_retries',
@@ -146,24 +157,6 @@
]
-def init_conf(read_conf=True):
-
- default_config_files = None
- if read_conf:
- confpath = os.path.join(
- os.path.dirname(os.path.realpath(heat_integrationtests.__file__)),
- 'heat_integrationtests.conf')
- if os.path.isfile(confpath):
- default_config_files = [confpath]
-
- conf = cfg.ConfigOpts()
- conf(args=[], project='heat_integrationtests',
- default_config_files=default_config_files)
-
- for opt in IntegrationTestGroup:
- conf.register_opt(opt)
- return conf
-
-
def list_opts():
- yield None, IntegrationTestGroup
+ yield heat_group.name, HeatGroup
+ yield service_available_group.name, ServiceAvailableGroup
diff --git a/common/test.py b/common/test.py
index 976ae8b..b422012 100644
--- a/common/test.py
+++ b/common/test.py
@@ -73,7 +73,7 @@
def setUp(self):
super(HeatIntegrationTest, self).setUp()
- self.conf = config.init_conf()
+ self.conf = config.CONF.heat_plugin
self.assertIsNotNone(self.conf.auth_url,
'No auth_url configured')
@@ -81,8 +81,16 @@
'No username configured')
self.assertIsNotNone(self.conf.password,
'No password configured')
+ self.setup_clients(self.conf)
+ self.useFixture(fixtures.FakeLogger(format=_LOG_FORMAT))
+ self.updated_time = {}
+ if self.conf.disable_ssl_certificate_validation:
+ self.verify_cert = False
+ else:
+ self.verify_cert = self.conf.ca_file or True
- self.manager = clients.ClientManager(self.conf)
+ def setup_clients(self, conf, admin_credentials=False):
+ self.manager = clients.ClientManager(conf, admin_credentials)
self.identity_client = self.manager.identity_client
self.orchestration_client = self.manager.orchestration_client
self.compute_client = self.manager.compute_client
@@ -90,12 +98,11 @@
self.volume_client = self.manager.volume_client
self.object_client = self.manager.object_client
self.metering_client = self.manager.metering_client
- self.useFixture(fixtures.FakeLogger(format=_LOG_FORMAT))
- self.updated_time = {}
- if self.conf.disable_ssl_certificate_validation:
- self.verify_cert = False
- else:
- self.verify_cert = self.conf.ca_file or True
+
+ self.client = self.orchestration_client
+
+ def setup_clients_for_admin(self):
+ self.setup_clients(self.conf, True)
def get_remote_client(self, server_or_ip, username, private_key=None):
if isinstance(server_or_ip, six.string_types):
@@ -278,6 +285,10 @@
stack_identifier) != stack.updated_time:
self.updated_time[stack_identifier] = stack.updated_time
return True
+ elif status == 'DELETE_COMPLETE' and stack.deletion_time is None:
+ # Wait for deleted_time to be filled, so that we have more
+ # confidence the operation is finished.
+ return False
else:
return True
@@ -301,7 +312,9 @@
def _wait_for_stack_status(self, stack_identifier, status,
failure_pattern=None,
- success_on_not_found=False):
+ success_on_not_found=False,
+ signal_required=False,
+ resources_to_signal=None):
"""Waits for a Stack to reach a given status.
Note this compares the full $action_$status, e.g
@@ -333,7 +346,8 @@
if self._verify_status(stack, stack_identifier, status,
fail_regexp):
return
-
+ if signal_required:
+ self.signal_resources(resources_to_signal)
time.sleep(build_interval)
message = ('Stack %s failed to reach %s status within '
@@ -377,7 +391,6 @@
env = environment or {}
env_files = files or {}
parameters = parameters or {}
- stack_name = stack_identifier.split('/')[0]
self.updated_time[stack_identifier] = self.client.stacks.get(
stack_identifier, resolve_outputs=False).updated_time
@@ -385,7 +398,6 @@
self._handle_in_progress(
self.client.stacks.update,
stack_id=stack_identifier,
- stack_name=stack_name,
template=template,
files=env_files,
disable_rollback=disable_rollback,
@@ -403,6 +415,25 @@
self._wait_for_stack_status(**kwargs)
+ def cancel_update_stack(self, stack_identifier,
+ expected_status='ROLLBACK_COMPLETE'):
+
+ stack_name = stack_identifier.split('/')[0]
+
+ self.updated_time[stack_identifier] = self.client.stacks.get(
+ stack_identifier, resolve_outputs=False).updated_time
+
+ self.client.actions.cancel_update(stack_name)
+
+ kwargs = {'stack_identifier': stack_identifier,
+ 'status': expected_status}
+ if expected_status in ['ROLLBACK_COMPLETE']:
+ # To trigger rollback you would intentionally fail the stack
+ # Hence check for rollback failures
+ kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
+
+ self._wait_for_stack_status(**kwargs)
+
def preview_update_stack(self, stack_identifier, template,
environment=None, files=None, parameters=None,
tags=None, disable_rollback=True,
@@ -410,11 +441,9 @@
env = environment or {}
env_files = files or {}
parameters = parameters or {}
- stack_name = stack_identifier.split('/')[0]
return self.client.stacks.preview_update(
stack_id=stack_identifier,
- stack_name=stack_name,
template=template,
files=env_files,
disable_rollback=disable_rollback,
@@ -486,6 +515,28 @@
resources = self.client.resources.list(stack_identifier)
return dict((r.resource_name, r.resource_type) for r in resources)
+ def get_resource_stack_id(self, r):
+ stack_link = [l for l in r.links if l.get('rel') == 'stack'][0]
+ return stack_link['href'].split("/")[-1]
+
+ def check_input_values(self, group_resources, key, value):
+ # Check inputs for deployment and derived config
+ for r in group_resources:
+ d = self.client.software_deployments.get(
+ r.physical_resource_id)
+ self.assertEqual({key: value}, d.input_values)
+ c = self.client.software_configs.get(
+ d.config_id)
+ foo_input_c = [i for i in c.inputs if i.get('name') == key][0]
+ self.assertEqual(value, foo_input_c.get('value'))
+
+ def signal_resources(self, resources):
+ # Signal all IN_PROGRESS resources
+ for r in resources:
+ if 'IN_PROGRESS' in r.resource_status:
+ stack_id = self.get_resource_stack_id(r)
+ self.client.resources.signal(stack_id, r.resource_name)
+
def stack_create(self, stack_name=None, template=None, files=None,
parameters=None, environment=None, tags=None,
expected_status='CREATE_COMPLETE',
@@ -558,8 +609,7 @@
'SUSPEND' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_identifier)
self.skipTest('Testing Stack suspend disabled in conf, skipping')
- stack_name = stack_identifier.split('/')[0]
- self._handle_in_progress(self.client.actions.suspend, stack_name)
+ self._handle_in_progress(self.client.actions.suspend, stack_identifier)
# improve debugging by first checking the resource's state.
self._wait_for_all_resource_status(stack_identifier,
'SUSPEND_COMPLETE')
@@ -570,8 +620,7 @@
'RESUME' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_identifier)
self.skipTest('Testing Stack resume disabled in conf, skipping')
- stack_name = stack_identifier.split('/')[0]
- self._handle_in_progress(self.client.actions.resume, stack_name)
+ self._handle_in_progress(self.client.actions.resume, stack_identifier)
# improve debugging by first checking the resource's state.
self._wait_for_all_resource_status(stack_identifier,
'RESUME_COMPLETE')
diff --git a/functional/functional_base.py b/functional/functional_base.py
index 9f76011..73ccf1d 100644
--- a/functional/functional_base.py
+++ b/functional/functional_base.py
@@ -20,7 +20,6 @@
def setUp(self):
super(FunctionalTestsBase, self).setUp()
self.check_skip()
- self.client = self.orchestration_client
def check_skip(self):
test_cls_name = reflection.get_class_name(self, fully_qualified=False)
diff --git a/functional/test_admin_actions.py b/functional/test_admin_actions.py
new file mode 100644
index 0000000..2c9ff6e
--- /dev/null
+++ b/functional/test_admin_actions.py
@@ -0,0 +1,101 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.functional import functional_base
+
+# Simple stack
+test_template = {
+ 'heat_template_version': '2013-05-23',
+ 'resources': {
+ 'test1': {
+ 'type': 'OS::Heat::TestResource',
+ 'properties': {
+ 'value': 'Test1'
+ }
+ }
+ }
+}
+
+# Nested stack
+rsg_template = {
+ 'heat_template_version': '2013-05-23',
+ 'resources': {
+ 'random_group': {
+ 'type': 'OS::Heat::ResourceGroup',
+ 'properties': {
+ 'count': 2,
+ 'resource_def': {
+ 'type': 'OS::Heat::RandomString',
+ 'properties': {
+ 'length': 30,
+ 'salt': 'initial'
+ }
+ }
+ }
+ }
+ }
+}
+
+
+class AdminActionsTest(functional_base.FunctionalTestsBase):
+
+ def setUp(self):
+ super(AdminActionsTest, self).setUp()
+ if not self.conf.admin_username or not self.conf.admin_password:
+ self.skipTest('No admin creds found, skipping')
+
+ def create_stack_setup_admin_client(self, template=test_template):
+ # Create the stack with the default user
+ self.stack_identifier = self.stack_create(template=template)
+
+ # Setup admin clients
+ self.setup_clients_for_admin()
+
+ def test_admin_simple_stack_actions(self):
+ self.create_stack_setup_admin_client()
+
+ updated_template = test_template.copy()
+ props = updated_template['resources']['test1']['properties']
+ props['value'] = 'new_value'
+
+ # Update, suspend and resume stack
+ self.update_stack(self.stack_identifier,
+ template=updated_template)
+ self.stack_suspend(self.stack_identifier)
+ self.stack_resume(self.stack_identifier)
+
+ # List stack resources
+ initial_resources = {'test1': 'OS::Heat::TestResource'}
+ self.assertEqual(initial_resources,
+ self.list_resources(self.stack_identifier))
+ # Delete stack
+ self._stack_delete(self.stack_identifier)
+
+ def test_admin_complex_stack_actions(self):
+ self.create_stack_setup_admin_client(template=rsg_template)
+
+ updated_template = rsg_template.copy()
+ props = updated_template['resources']['random_group']['properties']
+ props['count'] = 3
+
+ # Update, suspend and resume stack
+ self.update_stack(self.stack_identifier,
+ template=updated_template)
+ self.stack_suspend(self.stack_identifier)
+ self.stack_resume(self.stack_identifier)
+
+ # List stack resources
+ resources = {'random_group': 'OS::Heat::ResourceGroup'}
+ self.assertEqual(resources,
+ self.list_resources(self.stack_identifier))
+ # Delete stack
+ self._stack_delete(self.stack_identifier)
diff --git a/functional/test_autoscaling.py b/functional/test_autoscaling.py
index ebc7f61..369fa00 100644
--- a/functional/test_autoscaling.py
+++ b/functional/test_autoscaling.py
@@ -34,7 +34,8 @@
"Parameters" : {"size": {"Type": "String", "Default": "1"},
"AZ": {"Type": "String", "Default": "nova"},
"image": {"Type": "String"},
- "flavor": {"Type": "String"}},
+ "flavor": {"Type": "String"},
+ "user_data": {"Type": "String", "Default": "jsconfig data"}},
"Resources": {
"JobServerGroup": {
"Type" : "AWS::AutoScaling::AutoScalingGroup",
@@ -53,7 +54,7 @@
"ImageId" : {"Ref": "image"},
"InstanceType" : {"Ref": "flavor"},
"SecurityGroups" : [ "sg-1" ],
- "UserData" : "jsconfig data"
+ "UserData" : {"Ref": "user_data"}
}
}
},
@@ -79,7 +80,7 @@
random1:
type: OS::Heat::RandomString
properties:
- salt: {get_param: ImageId}
+ salt: {get_param: UserData}
outputs:
PublicIp: {value: {get_attr: [random1, value]}}
AvailabilityZone: {value: 'not-used11'}
@@ -116,8 +117,6 @@
def setUp(self):
super(AutoscalingGroupTest, self).setUp()
- if not self.conf.image_ref:
- raise self.skipException("No image configured to test")
if not self.conf.minimal_image_ref:
raise self.skipException("No minimal image configured to test")
if not self.conf.instance_type:
@@ -152,7 +151,7 @@
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 4,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
files=files, environment=env)
@@ -169,7 +168,7 @@
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
@@ -181,7 +180,7 @@
# Increase min size to 5
env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 5,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
self.update_stack(stack_identifier, self.template,
environment=env2, files=files)
@@ -198,7 +197,7 @@
env = {'resource_registry':
{'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': '1',
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
@@ -211,8 +210,9 @@
{'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': '1',
'AZ': 'wibble',
- 'image': self.conf.image_ref,
- 'flavor': self.conf.instance_type}}
+ 'image': self.conf.minimal_image_ref,
+ 'flavor': self.conf.instance_type,
+ 'user_data': 'new data'}}
self.update_stack(stack_identifier, self.template,
environment=env2, files=files)
@@ -230,7 +230,7 @@
files = {'provider.yaml': self.bad_instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
self.client.stacks.create(
@@ -265,7 +265,7 @@
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
@@ -317,7 +317,7 @@
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 4,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
files=files, environment=env)
@@ -353,7 +353,7 @@
size = 10
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': size,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_name = self._stack_rand_name()
stack_identifier = self.stack_create(
@@ -424,7 +424,7 @@
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['ImageId'] = self.conf.minimal_image_ref
+ config['Properties']['UserData'] = 'new data'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=10,
@@ -443,7 +443,7 @@
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['ImageId'] = self.conf.minimal_image_ref
+ config['Properties']['UserData'] = 'new data'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=8,
@@ -458,7 +458,7 @@
policy['MinInstancesInService'] = '0'
policy['MaxBatchSize'] = '20'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['ImageId'] = self.conf.minimal_image_ref
+ config['Properties']['UserData'] = 'new data'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=10,
@@ -474,7 +474,7 @@
policy['MaxBatchSize'] = '1'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['ImageId'] = self.conf.minimal_image_ref
+ config['Properties']['UserData'] = 'new data'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=9,
@@ -495,7 +495,7 @@
policy['MaxBatchSize'] = '3'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['InstanceType'] = 'm1.tiny'
+ config['Properties']['InstanceType'] = self.conf.minimal_instance_type
self.update_instance_group(updt_template,
num_updates_expected_on_updt=10,
@@ -516,7 +516,7 @@
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['InstanceType'] = 'm1.tiny'
+ config['Properties']['InstanceType'] = self.conf.minimal_instance_type
self.update_instance_group(updt_template,
num_updates_expected_on_updt=8,
@@ -624,7 +624,7 @@
{'custom_lb': {'AWS::EC2::Instance': 'lb.yaml'}},
'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
def check_instance_count(self, stack_identifier, expected):
@@ -726,8 +726,9 @@
# suspend the top level stack.
self.client.actions.suspend(stack_id=stack_identifier)
- self._wait_for_resource_status(
- stack_identifier, 'JobServerGroup', 'SUSPEND_COMPLETE')
+
+ # Wait for stack to reach SUSPEND_COMPLETE
+ self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
# Send a signal and an exception will raise
ex = self.assertRaises(exc.BadRequest,
diff --git a/functional/test_aws_stack.py b/functional/test_aws_stack.py
index 13c4278..296ed8d 100644
--- a/functional/test_aws_stack.py
+++ b/functional/test_aws_stack.py
@@ -14,8 +14,6 @@
import json
import random
-from oslo_log import log as logging
-
from six.moves.urllib import parse
from swiftclient import utils as swiftclient_utils
import yaml
@@ -23,8 +21,6 @@
from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
-LOG = logging.getLogger(__name__)
-
class AwsStackTest(functional_base.FunctionalTestsBase):
test_template = '''
diff --git a/functional/test_cancel_update.py b/functional/test_cancel_update.py
new file mode 100644
index 0000000..f6ddc07
--- /dev/null
+++ b/functional/test_cancel_update.py
@@ -0,0 +1,61 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.functional import functional_base
+
+
+class CancelUpdateTest(functional_base.FunctionalTestsBase):
+
+ template = '''
+heat_template_version: '2013-05-23'
+parameters:
+ InstanceType:
+ type: string
+ ImageId:
+ type: string
+ network:
+ type: string
+resources:
+ port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: network}
+ Server:
+ type: OS::Nova::Server
+ properties:
+ flavor_update_policy: REPLACE
+ image: {get_param: ImageId}
+ flavor: {get_param: InstanceType}
+ networks:
+ - port: {get_resource: port}
+'''
+
+ def setUp(self):
+ super(CancelUpdateTest, self).setUp()
+ if not self.conf.minimal_image_ref:
+ raise self.skipException("No minimal image configured to test")
+ if not self.conf.minimal_instance_type:
+ raise self.skipException("No minimal flavor configured to test.")
+
+ def test_cancel_update_server_with_port(self):
+ parameters = {'InstanceType': self.conf.minimal_instance_type,
+ 'ImageId': self.conf.minimal_image_ref,
+ 'network': self.conf.fixed_network_name}
+
+ stack_identifier = self.stack_create(template=self.template,
+ parameters=parameters)
+ parameters['InstanceType'] = 'm1.large'
+ self.update_stack(stack_identifier, self.template,
+ parameters=parameters,
+ expected_status='UPDATE_IN_PROGRESS')
+
+ self.cancel_update_stack(stack_identifier)
diff --git a/functional/test_conditional_exposure.py b/functional/test_conditional_exposure.py
index c1175f1..bf6cc47 100644
--- a/functional/test_conditional_exposure.py
+++ b/functional/test_conditional_exposure.py
@@ -65,7 +65,7 @@
class RoleBasedExposureTest(functional_base.FunctionalTestsBase):
- forbidden_resource_type = "OS::Nova::Flavor"
+
fl_tmpl = """
heat_template_version: 2015-10-15
@@ -77,21 +77,75 @@
vcpus: 10
"""
- def test_non_admin_forbidden_create_flavors(self):
- """Fail to create Flavor resource w/o admin role.
+ cvt_tmpl = """
+heat_template_version: 2015-10-15
+
+resources:
+ cvt:
+ type: OS::Cinder::VolumeType
+ properties:
+ name: cvt_test
+"""
+
+ host_aggr_tmpl = """
+heat_template_version: 2015-10-15
+parameters:
+ az:
+ type: string
+ default: nova
+resources:
+ cvt:
+ type: OS::Nova::HostAggregate
+ properties:
+ name: aggregate_test
+ availability_zone: {get_param: az}
+"""
+
+ scenarios = [
+ ('r_nova_flavor', dict(
+ stack_name='s_nova_flavor',
+ template=fl_tmpl,
+ forbidden_r_type="OS::Nova::Flavor",
+ test_creation=True)),
+ ('r_nova_host_aggregate', dict(
+ stack_name='s_nova_ost_aggregate',
+ template=host_aggr_tmpl,
+ forbidden_r_type="OS::Nova::HostAggregate",
+ test_creation=True)),
+ ('r_cinder_vtype', dict(
+ stack_name='s_cinder_vtype',
+ template=cvt_tmpl,
+ forbidden_r_type="OS::Cinder::VolumeType",
+ test_creation=True)),
+ ('r_cinder_vtype_encrypt', dict(
+ forbidden_r_type="OS::Cinder::EncryptedVolumeType",
+ test_creation=False)),
+ ('r_neutron_qos', dict(
+ forbidden_r_type="OS::Neutron::QoSPolicy",
+ test_creation=False)),
+ ('r_neutron_qos_bandwidth_limit', dict(
+ forbidden_r_type="OS::Neutron::QoSBandwidthLimitRule",
+ test_creation=False)),
+ ('r_manila_share_type', dict(
+ forbidden_r_type="OS::Manila::ShareType",
+ test_creation=False))
+ ]
+
+ def test_non_admin_forbidden_create_resources(self):
+ """Fail to create resource w/o admin role.
Integration tests job runs as normal OpenStack user,
- and OS::Nova:Flavor is configured to require
+ and the resources above are configured to require
admin role in default policy file of Heat.
"""
- stack_name = self._stack_rand_name()
- ex = self.assertRaises(exc.Forbidden,
- self.client.stacks.create,
- stack_name=stack_name,
- template=self.fl_tmpl)
- self.assertIn(self.forbidden_resource_type, ex.message)
+ if self.test_creation:
+ ex = self.assertRaises(exc.Forbidden,
+ self.client.stacks.create,
+ stack_name=self.stack_name,
+ template=self.template)
+ self.assertIn(self.forbidden_r_type, ex.message)
def test_forbidden_resource_not_listed(self):
resources = self.client.resource_types.list()
- self.assertNotIn(self.forbidden_resource_type,
+ self.assertNotIn(self.forbidden_r_type,
(r.resource_type for r in resources))
diff --git a/functional/test_conditions.py b/functional/test_conditions.py
new file mode 100644
index 0000000..ebb3c08
--- /dev/null
+++ b/functional/test_conditions.py
@@ -0,0 +1,619 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.functional import functional_base
+
+
+cfn_template = '''
+AWSTemplateFormatVersion: 2010-09-09
+Parameters:
+ env_type:
+ Default: test
+ Type: String
+ AllowedValues: [prod, test]
+ zone:
+ Type: String
+ Default: beijing
+Conditions:
+ Prod: {"Fn::Equals" : [{Ref: env_type}, "prod"]}
+ Test:
+ Fn::Not:
+ - Fn::Equals:
+ - Ref: env_type
+ - prod
+ Beijing_Prod:
+ Fn::And:
+ - Fn::Equals:
+ - Ref: env_type
+ - prod
+ - Fn::Equals:
+ - Ref: zone
+ - beijing
+ Xian_Zone:
+ Fn::Equals:
+ - Ref: zone
+ - xian
+ Xianyang_Zone:
+ Fn::Equals:
+ - Ref: zone
+ - xianyang
+ Fujian_Zone:
+ Fn::Or:
+ - Fn::Equals:
+ - Ref: zone
+ - fuzhou
+ - Fn::Equals:
+ - Ref: zone
+ - xiamen
+ Fujian_Prod:
+ Fn::And:
+ - Fujian_Zone
+ - Prod
+ Shannxi_Provice:
+ Fn::Or:
+ - Xian_Zone
+ - Xianyang_Zone
+ Not_Shannxi:
+ Fn::Not: [Shannxi_Provice]
+Resources:
+ test_res:
+ Type: OS::Heat::TestResource
+ Properties:
+ value: {"Fn::If": ["Prod", "env_is_prod", "env_is_test"]}
+ prod_res:
+ Type: OS::Heat::TestResource
+ Properties:
+ value: prod_res
+ Condition: Prod
+ test_res1:
+ Type: OS::Heat::TestResource
+ Properties:
+ value: just in test env
+ Condition: Test
+ beijing_prod_res:
+ Type: OS::Heat::TestResource
+ Properties:
+ value: beijing_prod_res
+ Condition: Beijing_Prod
+ fujian_res:
+ Type: OS::Heat::TestResource
+ Condition: Fujian_Zone
+ Properties:
+ value: fujian_res
+ fujian_prod_res:
+ Type: OS::Heat::TestResource
+ Condition: Fujian_Prod
+ Properties:
+ value: fujian_prod_res
+ shannxi_res:
+ Type: OS::Heat::TestResource
+ Condition: Shannxi_Provice
+ Properties:
+ value: shannxi_res
+ not_shannxi_res:
+ Type: OS::Heat::TestResource
+ Condition: Not_Shannxi
+ Properties:
+ value: not_shannxi_res
+Outputs:
+ res_value:
+ Value: {"Fn::GetAtt": [prod_res, output]}
+ Condition: Prod
+ test_res_value:
+ Value: {"Fn::GetAtt": [test_res, output]}
+ prod_resource:
+ Value: {"Fn::If": [Prod, {Ref: prod_res}, 'no_prod_res']}
+ test_res1_value:
+ Value: {"Fn::If": [Test, {"Fn::GetAtt": [test_res1, output]},
+ 'no_test_res1']}
+ beijing_prod_res:
+ Value: {"Fn::If": [Beijing_Prod, {Ref: beijing_prod_res}, 'no_prod_res']}
+'''
+
+hot_template = '''
+heat_template_version: 2016-10-14
+parameters:
+ env_type:
+ default: test
+ type: string
+ constraints:
+ - allowed_values: [prod, test]
+ zone:
+ type: string
+ default: beijing
+conditions:
+ prod: {equals : [{get_param: env_type}, "prod"]}
+ test:
+ not:
+ equals:
+ - get_param: env_type
+ - prod
+ beijing_prod:
+ and:
+ - equals:
+ - get_param: zone
+ - beijing
+ - equals:
+ - get_param: env_type
+ - prod
+ xian_zone:
+ equals:
+ - get_param: zone
+ - xian
+ xianyang_zone:
+ equals:
+ - get_param: zone
+ - xianyang
+ fujian_zone:
+ or:
+ - equals:
+ - get_param: zone
+ - fuzhou
+ - equals:
+ - get_param: zone
+ - xiamen
+ fujian_prod:
+ and:
+ - fujian_zone
+ - prod
+ shannxi_provice:
+ or:
+ - xian_zone
+ - xianyang_zone
+ not_shannxi:
+ not: shannxi_provice
+resources:
+ test_res:
+ type: OS::Heat::TestResource
+ properties:
+ value: {if: ["prod", "env_is_prod", "env_is_test"]}
+ prod_res:
+ type: OS::Heat::TestResource
+ properties:
+ value: prod_res
+ condition: prod
+ test_res1:
+ type: OS::Heat::TestResource
+ properties:
+ value: just in test env
+ condition: test
+ beijing_prod_res:
+ type: OS::Heat::TestResource
+ properties:
+ value: beijing_prod_res
+ condition: beijing_prod
+ fujian_res:
+ type: OS::Heat::TestResource
+ condition: fujian_zone
+ properties:
+ value: fujian_res
+ fujian_prod_res:
+ type: OS::Heat::TestResource
+ condition: fujian_prod
+ properties:
+ value: fujian_prod_res
+ shannxi_res:
+ type: OS::Heat::TestResource
+ condition: shannxi_provice
+ properties:
+ value: shannxi_res
+ not_shannxi_res:
+ type: OS::Heat::TestResource
+ condition: not_shannxi
+ properties:
+ value: not_shannxi_res
+outputs:
+ res_value:
+ value: {get_attr: [prod_res, output]}
+ condition: prod
+ test_res_value:
+ value: {get_attr: [test_res, output]}
+ prod_resource:
+ value: {if: [prod, {get_resource: prod_res}, 'no_prod_res']}
+ test_res1_value:
+ value: {if: [test, {get_attr: [test_res1, output]}, 'no_test_res1']}
+ beijing_prod_res:
+ value: {if: [beijing_prod, {get_resource: beijing_prod_res},
+ 'no_prod_res']}
+'''
+
+before_rename_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+ env_type:
+ default: test
+ type: string
+conditions:
+ cd1: {equals : [{get_param: env_type}, "prod"]}
+resources:
+ test:
+ type: OS::Heat::TestResource
+ properties:
+ value: {if: [cd1, 'prod', 'test']}
+'''
+
+after_rename_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+ env_type:
+ default: prod
+ type: string
+conditions:
+ cd2: {equals : [{get_param: env_type}, "prod"]}
+resources:
+ test:
+ type: OS::Heat::TestResource
+ properties:
+ value: {if: [cd2, 'prod', 'test']}
+ test2:
+ type: OS::Heat::TestResource
+ properties:
+ value: {if: [cd2, 'prod', 'test']}
+'''
+
+fail_rename_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+ env_type:
+ default: prod
+ type: string
+conditions:
+ cd3: {equals : [{get_param: env_type}, "prod"]}
+resources:
+ test:
+ type: OS::Heat::TestResource
+ properties:
+ value: {if: [cd3, 'prod', 'test']}
+ test2:
+ type: OS::Heat::TestResource
+ properties:
+ value: {if: [cd3, 'prod', 'test']}
+ test_fail:
+ type: OS::Heat::TestResource
+ properties:
+ fail: True
+ depends_on: [test, test2]
+'''
+
+recover_rename_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+ env_type:
+ default: prod
+ type: string
+conditions:
+ cd3: {equals : [{get_param: env_type}, "prod"]}
+resources:
+ test2:
+ type: OS::Heat::TestResource
+ properties:
+ value: {if: [cd3, 'prod', 'test']}
+ test_fail:
+ type: OS::Heat::TestResource
+ properties:
+ fail: False
+ depends_on: [test2]
+'''
+
+
+class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
+
+ def res_assert_for_prod(self, resources, bj_prod=True, fj_zone=False,
+ shannxi_provice=False):
+ res_names = [res.resource_name for res in resources]
+ if bj_prod:
+ self.assertEqual(4, len(resources))
+ self.assertIn('beijing_prod_res', res_names)
+ self.assertIn('not_shannxi_res', res_names)
+ elif fj_zone:
+ self.assertEqual(5, len(resources))
+ self.assertIn('fujian_res', res_names)
+ self.assertNotIn('beijing_prod_res', res_names)
+ self.assertIn('not_shannxi_res', res_names)
+ self.assertIn('fujian_prod_res', res_names)
+ elif shannxi_provice:
+ self.assertEqual(3, len(resources))
+ self.assertIn('shannxi_res', res_names)
+ else:
+ self.assertEqual(3, len(resources))
+ self.assertIn('not_shannxi_res', res_names)
+ self.assertIn('prod_res', res_names)
+ self.assertIn('test_res', res_names)
+
+ def res_assert_for_test(self, resources, fj_zone=False,
+ shannxi_provice=False):
+ res_names = [res.resource_name for res in resources]
+
+ if fj_zone:
+ self.assertEqual(4, len(resources))
+ self.assertIn('fujian_res', res_names)
+ self.assertIn('not_shannxi_res', res_names)
+ elif shannxi_provice:
+ self.assertEqual(3, len(resources))
+ self.assertNotIn('fujian_res', res_names)
+ self.assertIn('shannxi_res', res_names)
+ else:
+ self.assertEqual(3, len(resources))
+ self.assertIn('not_shannxi_res', res_names)
+ self.assertIn('test_res', res_names)
+ self.assertIn('test_res1', res_names)
+ self.assertNotIn('prod_res', res_names)
+
+ def output_assert_for_prod(self, stack_id, bj_prod=True):
+ output = self.client.stacks.output_show(stack_id,
+ 'res_value')['output']
+ self.assertEqual('prod_res', output['output_value'])
+
+ test_res_value = self.client.stacks.output_show(
+ stack_id, 'test_res_value')['output']
+ self.assertEqual('env_is_prod', test_res_value['output_value'])
+
+ prod_resource = self.client.stacks.output_show(
+ stack_id, 'prod_resource')['output']
+ self.assertNotEqual('no_prod_res', prod_resource['output_value'])
+
+ test_res_output = self.client.stacks.output_show(
+ stack_id, 'test_res1_value')['output']
+ self.assertEqual('no_test_res1', test_res_output['output_value'])
+
+ beijing_prod_res = self.client.stacks.output_show(
+ stack_id, 'beijing_prod_res')['output']
+ if bj_prod:
+ self.assertNotEqual('no_prod_res',
+ beijing_prod_res['output_value'])
+ else:
+ self.assertEqual('no_prod_res', beijing_prod_res['output_value'])
+
+ def output_assert_for_test(self, stack_id):
+ output = self.client.stacks.output_show(stack_id,
+ 'res_value')['output']
+ self.assertIsNone(output['output_value'])
+
+ test_res_value = self.client.stacks.output_show(
+ stack_id, 'test_res_value')['output']
+ self.assertEqual('env_is_test', test_res_value['output_value'])
+
+ prod_resource = self.client.stacks.output_show(
+ stack_id, 'prod_resource')['output']
+ self.assertEqual('no_prod_res', prod_resource['output_value'])
+
+ test_res_output = self.client.stacks.output_show(
+ stack_id, 'test_res1_value')['output']
+ self.assertEqual('just in test env',
+ test_res_output['output_value'])
+
+ beijing_prod_res = self.client.stacks.output_show(
+ stack_id, 'beijing_prod_res')['output']
+ self.assertEqual('no_prod_res', beijing_prod_res['output_value'])
+
+ def test_stack_create_update_cfn_template_test_to_prod(self):
+ stack_identifier = self.stack_create(template=cfn_template)
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources)
+ self.output_assert_for_test(stack_identifier)
+
+ parms = {'zone': 'fuzhou'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources, fj_zone=True)
+ self.output_assert_for_test(stack_identifier)
+
+ parms = {'zone': 'xianyang'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources, shannxi_provice=True)
+ self.output_assert_for_test(stack_identifier)
+
+ parms = {'env_type': 'prod'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources)
+ self.output_assert_for_prod(stack_identifier)
+
+ parms = {'env_type': 'prod',
+ 'zone': 'shanghai'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, False)
+ self.output_assert_for_prod(stack_identifier, False)
+
+ parms = {'env_type': 'prod',
+ 'zone': 'xiamen'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, bj_prod=False, fj_zone=True)
+ self.output_assert_for_prod(stack_identifier, False)
+
+ parms = {'env_type': 'prod',
+ 'zone': 'xianyang'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
+ shannxi_provice=True)
+ self.output_assert_for_prod(stack_identifier, False)
+
+ def test_stack_create_update_cfn_template_prod_to_test(self):
+ parms = {'env_type': 'prod'}
+ stack_identifier = self.stack_create(template=cfn_template,
+ parameters=parms)
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources)
+ self.output_assert_for_prod(stack_identifier)
+
+ parms = {'zone': 'xiamen',
+ 'env_type': 'prod'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, bj_prod=False, fj_zone=True)
+ self.output_assert_for_prod(stack_identifier, bj_prod=False)
+
+ parms = {'zone': 'xianyang',
+ 'env_type': 'prod'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
+ shannxi_provice=True)
+ self.output_assert_for_prod(stack_identifier, bj_prod=False)
+
+ parms = {'zone': 'shanghai',
+ 'env_type': 'prod'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
+ shannxi_provice=False)
+ self.output_assert_for_prod(stack_identifier, bj_prod=False)
+
+ parms = {'env_type': 'test'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources)
+ self.output_assert_for_test(stack_identifier)
+
+ parms = {'env_type': 'test',
+ 'zone': 'fuzhou'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources, fj_zone=True)
+ self.output_assert_for_test(stack_identifier)
+
+ parms = {'env_type': 'test',
+ 'zone': 'xianyang'}
+ self.update_stack(stack_identifier,
+ template=cfn_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources, fj_zone=False,
+ shannxi_provice=True)
+ self.output_assert_for_test(stack_identifier)
+
+ def test_stack_create_update_hot_template_test_to_prod(self):
+ stack_identifier = self.stack_create(template=hot_template)
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources)
+ self.output_assert_for_test(stack_identifier)
+
+ parms = {'zone': 'xianyang'}
+ self.update_stack(stack_identifier,
+ template=hot_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources, shannxi_provice=True)
+ self.output_assert_for_test(stack_identifier)
+
+ parms = {'env_type': 'prod'}
+ self.update_stack(stack_identifier,
+ template=hot_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources)
+ self.output_assert_for_prod(stack_identifier)
+
+ parms = {'env_type': 'prod',
+ 'zone': 'shanghai'}
+ self.update_stack(stack_identifier,
+ template=hot_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, False)
+ self.output_assert_for_prod(stack_identifier, False)
+
+ parms = {'env_type': 'prod',
+ 'zone': 'xianyang'}
+ self.update_stack(stack_identifier,
+ template=hot_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, False, shannxi_provice=True)
+ self.output_assert_for_prod(stack_identifier, False)
+
+ def test_stack_create_update_hot_template_prod_to_test(self):
+ parms = {'env_type': 'prod'}
+ stack_identifier = self.stack_create(template=hot_template,
+ parameters=parms)
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources)
+ self.output_assert_for_prod(stack_identifier)
+
+ parms = {'env_type': 'prod',
+ 'zone': 'xianyang'}
+ self.update_stack(stack_identifier,
+ template=hot_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_prod(resources, False, shannxi_provice=True)
+ self.output_assert_for_prod(stack_identifier, False)
+
+ parms = {'env_type': 'test'}
+ self.update_stack(stack_identifier,
+ template=hot_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources)
+ self.output_assert_for_test(stack_identifier)
+
+ parms = {'env_type': 'test',
+ 'zone': 'xianyang'}
+ self.update_stack(stack_identifier,
+ template=hot_template,
+ parameters=parms)
+
+ resources = self.client.resources.list(stack_identifier)
+ self.res_assert_for_test(resources, fj_zone=False,
+ shannxi_provice=True)
+ self.output_assert_for_test(stack_identifier)
+
+ def test_condition_rename(self):
+ stack_identifier = self.stack_create(template=before_rename_tmpl)
+ self.update_stack(stack_identifier, template=after_rename_tmpl)
+ self.update_stack(stack_identifier, template=fail_rename_tmpl,
+ expected_status='UPDATE_FAILED')
+ self.update_stack(stack_identifier, template=recover_rename_tmpl)
diff --git a/functional/test_create_update.py b/functional/test_create_update.py
index 8c78951..771404a 100644
--- a/functional/test_create_update.py
+++ b/functional/test_create_update.py
@@ -26,7 +26,7 @@
'value': 'Test1',
'fail': False,
'update_replace': False,
- 'wait_secs': 0,
+ 'wait_secs': 1,
'action_wait_secs': {'create': 1},
'client_name': 'nova',
'entity_name': 'servers',
@@ -74,9 +74,6 @@
class CreateStackTest(functional_base.FunctionalTestsBase):
- def setUp(self):
- super(CreateStackTest, self).setUp()
-
def test_create_rollback(self):
values = {'fail': True, 'value': 'test_create_rollback'}
template = _change_rsrc_properties(test_template_one_resource,
@@ -150,11 +147,9 @@
properties:
value: Test
fail: {get_param: do_fail}
+ wait_secs: 1
'''
- def setUp(self):
- super(UpdateStackTest, self).setUp()
-
def test_stack_update_nochange(self):
template = _change_rsrc_properties(test_template_one_resource,
['test1'],
diff --git a/functional/test_create_update_neutron_port.py b/functional/test_create_update_neutron_port.py
index 575d21c..2109012 100644
--- a/functional/test_create_update_neutron_port.py
+++ b/functional/test_create_update_neutron_port.py
@@ -26,6 +26,7 @@
subnet:
type: OS::Neutron::Subnet
properties:
+ enable_dhcp: false
network: { get_resource: net }
cidr: 11.11.11.0/24
port:
@@ -50,9 +51,6 @@
class UpdatePortTest(functional_base.FunctionalTestsBase):
- def setUp(self):
- super(UpdatePortTest, self).setUp()
-
def get_port_id_and_ip(self, stack_identifier):
resources = self.client.resources.list(stack_identifier)
port_id = [res.physical_resource_id for res in resources
@@ -72,9 +70,8 @@
self.update_stack(stack_identifier, templ_no_ip,
parameters=parameters)
- new_id, new_ip = self.get_port_id_and_ip(stack_identifier)
- # port id and ip should be different
- self.assertNotEqual(_ip, new_ip)
+ new_id, _ = self.get_port_id_and_ip(stack_identifier)
+ # port id should be different
self.assertNotEqual(_id, new_id)
def test_stack_update_replace_with_ip(self):
@@ -161,6 +158,5 @@
self.update_stack(stack_identifier, templ_no_ip)
new_id, new_ip = self.get_port_id_and_ip(stack_identifier)
- # port should be updated with the same id, but different ip
- self.assertNotEqual(_ip, new_ip)
+ # port should be updated with the same id
self.assertEqual(_id, new_id)
diff --git a/functional/test_create_update_neutron_subnet.py b/functional/test_create_update_neutron_subnet.py
index ceb74a9..b745619 100644
--- a/functional/test_create_update_neutron_subnet.py
+++ b/functional/test_create_update_neutron_subnet.py
@@ -33,12 +33,29 @@
value: {get_attr: [subnet, gateway_ip]}
'''
+test_template_with_translation = '''
+heat_template_version: 2016-10-14
+description: Test template to create/update subnet with translation
+parameters:
+ net_cidr:
+ type: string
+resources:
+ net:
+ type: OS::Neutron::Net
+ net_value:
+ type: OS::Heat::Value
+ properties:
+ value: {get_resource: net}
+ subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: { get_attr: [net_value, value] }
+ cidr: {get_param: net_cidr}
+'''
+
class UpdateSubnetTest(functional_base.FunctionalTestsBase):
- def setUp(self):
- super(UpdateSubnetTest, self).setUp()
-
def get_outputs(self, stack_identifier, output_key):
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, output_key)
@@ -128,3 +145,14 @@
new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
# new gateway_ip should be None
self.assertIsNone(new_gw_ip)
+
+ def test_update_with_network_translation(self):
+ # Just create and update where network is translated properly.
+ env = {'parameters': {'net_cidr': '11.11.11.0/24'}}
+ stack_identifier = self.stack_create(
+ template=test_template_with_translation,
+ environment=env)
+ env = {'parameters': {'net_cidr': '11.11.12.0/24'}}
+ self.update_stack(stack_identifier,
+ template=test_template_with_translation,
+ environment=env)
diff --git a/functional/test_default_parameters.py b/functional/test_default_parameters.py
index 7201969..aa2ca35 100644
--- a/functional/test_default_parameters.py
+++ b/functional/test_default_parameters.py
@@ -63,9 +63,6 @@
expect1=12, expect2=12)),
]
- def setUp(self):
- super(DefaultParametersTest, self).setUp()
-
def test_defaults(self):
env = {'parameters': {}, 'parameter_defaults': {}}
if self.param:
diff --git a/functional/test_delete.py b/functional/test_delete.py
new file mode 100644
index 0000000..92b1c74
--- /dev/null
+++ b/functional/test_delete.py
@@ -0,0 +1,42 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from heat_integrationtests.functional import functional_base
+
+
+class DeleteInProgressTest(functional_base.FunctionalTestsBase):
+
+ root_template = '''
+heat_template_version: 2013-05-23
+resources:
+ rg:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: 125
+ resource_def:
+ type: empty.yaml
+'''
+
+ empty_template = '''
+heat_template_version: 2013-05-23
+resources:
+'''
+
+ def test_delete_nested_stacks_create_in_progress(self):
+ files = {'empty.yaml': self.empty_template}
+ identifier = self.stack_create(template=self.root_template,
+ files=files,
+ expected_status='CREATE_IN_PROGRESS')
+ time.sleep(20)
+ self._stack_delete(identifier)
diff --git a/functional/test_encrypted_parameter.py b/functional/test_encrypted_parameter.py
index 21dc925..5ec8a27 100644
--- a/functional/test_encrypted_parameter.py
+++ b/functional/test_encrypted_parameter.py
@@ -43,9 +43,6 @@
value: { get_param: foo }
'''
- def setUp(self):
- super(EncryptedParametersTest, self).setUp()
-
def test_db_encryption(self):
# Create a stack with the value of 'foo' to be encrypted
foo_param = 'my_encrypted_foo'
diff --git a/functional/test_encryption_vol_type.py b/functional/test_encryption_vol_type.py
index 2679990..b34b094 100644
--- a/functional/test_encryption_vol_type.py
+++ b/functional/test_encryption_vol_type.py
@@ -11,8 +11,6 @@
# under the License.
-from heat_integrationtests.common import clients
-from heat_integrationtests.common import config
from heat_integrationtests.functional import functional_base
test_encryption_vol_type = {
@@ -44,15 +42,10 @@
super(EncryptionVolTypeTest, self).setUp()
if not self.conf.admin_username or not self.conf.admin_password:
self.skipTest('No admin creds found, skipping')
- self.conf = config.init_conf()
# cinder security policy usage of volume type is limited
# to being used by administrators only.
- # Temporarily switch to admin
- self.conf.username = self.conf.admin_username
- self.conf.password = self.conf.admin_password
- self.manager = clients.ClientManager(self.conf)
- self.client = self.manager.orchestration_client
- self.volume_client = self.manager.volume_client
+ # Switch to admin
+ self.setup_clients_for_admin()
def check_stack(self, sid):
vt = 'my_volume_type'
diff --git a/functional/test_env_merge.py b/functional/test_env_merge.py
index 5e222b8..819770a 100644
--- a/functional/test_env_merge.py
+++ b/functional/test_env_merge.py
@@ -92,4 +92,4 @@
# by checking to see that it has a value
r3b = self.client.resources.get(stack_id, 'r3b')
r3b_attrs = r3b.attributes
- self.assertTrue('value' in r3b_attrs)
+ self.assertIn('value', r3b_attrs)
diff --git a/functional/test_event_sinks.py b/functional/test_event_sinks.py
index e4a23ff..ea66b7d 100644
--- a/functional/test_event_sinks.py
+++ b/functional/test_event_sinks.py
@@ -43,7 +43,11 @@
'os_username': self.conf.username,
'os_password': self.conf.password,
'os_project_name': self.conf.tenant_name,
- 'os_auth_url': self.conf.auth_url
+ 'os_auth_url': self.conf.auth_url,
+ 'os_user_domain_id': self.conf.user_domain_id,
+ 'os_project_domain_id': self.conf.project_domain_id,
+ 'os_user_domain_name': self.conf.user_domain_name,
+ 'os_project_domain_name': self.conf.project_domain_name
}
}
}
diff --git a/functional/test_external_ref.py b/functional/test_external_ref.py
new file mode 100644
index 0000000..2601ca7
--- /dev/null
+++ b/functional/test_external_ref.py
@@ -0,0 +1,83 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.functional import functional_base
+
+
+class ExternalReferencesTest(functional_base.FunctionalTestsBase):
+
+ TEMPLATE = '''
+heat_template_version: 2016-10-14
+resources:
+ test1:
+ type: OS::Heat::TestResource
+'''
+ TEMPLATE_WITH_EX_REF = '''
+heat_template_version: 2016-10-14
+resources:
+ test1:
+ type: OS::Heat::TestResource
+ external_id: foobar
+outputs:
+ str:
+ value: {get_resource: test1}
+'''
+
+ def test_create_with_external_ref(self):
+ stack_name = self._stack_rand_name()
+ stack_identifier = self.stack_create(
+ stack_name=stack_name,
+ template=self.TEMPLATE_WITH_EX_REF,
+ files={},
+ disable_rollback=True,
+ parameters={},
+ environment={}
+ )
+
+ stack = self.client.stacks.get(stack_identifier)
+
+ self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+ expected_resources = {'test1': 'OS::Heat::TestResource'}
+ self.assertEqual(expected_resources,
+ self.list_resources(stack_identifier))
+ stack = self.client.stacks.get(stack_identifier)
+ self.assertEqual(
+ [{'description': 'No description given',
+ 'output_key': 'str',
+ 'output_value': 'foobar'}], stack.outputs)
+
+ def test_update_with_external_ref(self):
+ stack_name = self._stack_rand_name()
+ stack_identifier = self.stack_create(
+ stack_name=stack_name,
+ template=self.TEMPLATE,
+ files={},
+ disable_rollback=True,
+ parameters={},
+ environment={}
+ )
+ stack = self.client.stacks.get(stack_identifier)
+
+ self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+ expected_resources = {'test1': 'OS::Heat::TestResource'}
+ self.assertEqual(expected_resources,
+ self.list_resources(stack_identifier))
+ stack = self.client.stacks.get(stack_identifier)
+ self.assertEqual([], stack.outputs)
+
+ stack_name = stack_identifier.split('/')[0]
+ kwargs = {'stack_id': stack_identifier, 'stack_name': stack_name,
+ 'template': self.TEMPLATE_WITH_EX_REF, 'files': {},
+ 'disable_rollback': True, 'parameters': {}, 'environment': {}
+ }
+ self.client.stacks.update(**kwargs)
+ self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
diff --git a/functional/test_heat_autoscaling.py b/functional/test_heat_autoscaling.py
index 357612c..0fa9240 100644
--- a/functional/test_heat_autoscaling.py
+++ b/functional/test_heat_autoscaling.py
@@ -92,9 +92,6 @@
value: {get_attr: [random_str, value]}
'''
- def setUp(self):
- super(HeatAutoscalingTest, self).setUp()
-
def _assert_output_values(self, stack_id):
stack = self.client.stacks.get(stack_id)
all_values = self._stack_output(stack, 'all_values')
@@ -180,9 +177,6 @@
scaling_adjustment: 1
'''
- def setUp(self):
- super(AutoScalingGroupUpdateWithNoChanges, self).setUp()
-
def test_as_group_update_without_resource_changes(self):
stack_identifier = self.stack_create(template=self.template)
new_template = self.template.replace(
diff --git a/functional/test_instance_group.py b/functional/test_instance_group.py
index 02a2858..44b3aa9 100644
--- a/functional/test_instance_group.py
+++ b/functional/test_instance_group.py
@@ -27,7 +27,8 @@
"Parameters" : {"size": {"Type": "String", "Default": "1"},
"AZ": {"Type": "String", "Default": "nova"},
"image": {"Type": "String"},
- "flavor": {"Type": "String"}},
+ "flavor": {"Type": "String"},
+ "user_data": {"Type": "String", "Default": "jsconfig data"}},
"Resources": {
"JobServerGroup": {
"Type": "OS::Heat::InstanceGroup",
@@ -45,7 +46,7 @@
"ImageId" : {"Ref": "image"},
"InstanceType" : {"Ref": "flavor"},
"SecurityGroups" : [ "sg-1" ],
- "UserData" : "jsconfig data"
+ "UserData" : {"Ref": "user_data"}
}
}
},
@@ -71,7 +72,7 @@
random1:
type: OS::Heat::RandomString
properties:
- salt: {get_param: ImageId}
+ salt: {get_param: UserData}
outputs:
PublicIp:
value: {get_attr: [random1, value]}
@@ -105,8 +106,6 @@
def setUp(self):
super(InstanceGroupTest, self).setUp()
- if not self.conf.image_ref:
- raise self.skipException("No image configured to test")
if not self.conf.minimal_image_ref:
raise self.skipException("No minimal image configured to test")
if not self.conf.instance_type:
@@ -141,7 +140,7 @@
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 4,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
files=files, environment=env)
@@ -158,7 +157,7 @@
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
@@ -170,7 +169,7 @@
# Increase min size to 5
env2 = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 5,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
self.update_stack(stack_identifier, self.template,
environment=env2, files=files)
@@ -187,7 +186,7 @@
env = {'resource_registry':
{'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 1,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
@@ -200,8 +199,9 @@
{'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': '2',
'AZ': 'wibble',
- 'image': self.conf.image_ref,
- 'flavor': self.conf.instance_type}}
+ 'image': self.conf.minimal_image_ref,
+ 'flavor': self.conf.instance_type,
+ 'user_data': 'new data'}}
self.update_stack(stack_identifier, self.template,
environment=env2, files=files)
@@ -219,7 +219,7 @@
files = {'provider.yaml': self.bad_instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
self.client.stacks.create(
@@ -254,7 +254,7 @@
files = {'provider.yaml': self.instance_template}
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': 2,
- 'image': self.conf.image_ref,
+ 'image': self.conf.minimal_image_ref,
'flavor': self.conf.instance_type}}
stack_identifier = self.stack_create(template=self.template,
@@ -323,8 +323,8 @@
size = 5
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': size,
- 'image': self.conf.image_ref,
- 'flavor': self.conf.instance_type}}
+ 'image': self.conf.minimal_image_ref,
+ 'flavor': self.conf.minimal_instance_type}}
stack_name = self._stack_rand_name()
stack_identifier = self.stack_create(
stack_name=stack_name,
@@ -394,7 +394,7 @@
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['ImageId'] = self.conf.minimal_image_ref
+ config['Properties']['UserData'] = 'new data'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=5,
@@ -414,7 +414,7 @@
policy['MinInstancesInService'] = '4'
policy['MaxBatchSize'] = '4'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['ImageId'] = self.conf.minimal_image_ref
+ config['Properties']['UserData'] = 'new data'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=2,
@@ -430,7 +430,7 @@
policy['MinInstancesInService'] = '0'
policy['MaxBatchSize'] = '20'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['ImageId'] = self.conf.minimal_image_ref
+ config['Properties']['UserData'] = 'new data'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=5,
@@ -447,7 +447,7 @@
policy['MaxBatchSize'] = '2'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['ImageId'] = self.conf.minimal_image_ref
+ config['Properties']['UserData'] = 'new data'
self.update_instance_group(updt_template,
num_updates_expected_on_updt=3,
@@ -469,7 +469,7 @@
policy['MaxBatchSize'] = '3'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['InstanceType'] = 'm1.tiny'
+ config['Properties']['InstanceType'] = self.conf.instance_type
self.update_instance_group(updt_template,
num_updates_expected_on_updt=5,
@@ -491,7 +491,7 @@
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
- config['Properties']['InstanceType'] = 'm1.tiny'
+ config['Properties']['InstanceType'] = self.conf.instance_type
self.update_instance_group(updt_template,
num_updates_expected_on_updt=2,
diff --git a/functional/test_lbaasv2.py b/functional/test_lbaasv2.py
index 983c48a..e7f56ef 100644
--- a/functional/test_lbaasv2.py
+++ b/functional/test_lbaasv2.py
@@ -10,23 +10,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
from heat_integrationtests.functional import functional_base
-LOG = logging.getLogger(__name__)
-
class LoadBalancerv2Test(functional_base.FunctionalTestsBase):
create_template = '''
heat_template_version: 2016-04-08
+parameters:
+ subnet:
+ type: string
resources:
loadbalancer:
type: OS::Neutron::LBaaS::LoadBalancer
properties:
description: aLoadBalancer
- vip_subnet: private-subnet
+ vip_subnet: { get_param: subnet }
listener:
type: OS::Neutron::LBaaS::Listener
properties:
@@ -48,7 +48,7 @@
address: 1.1.1.1
pool: { get_resource: pool }
protocol_port: 1111
- subnet: private-subnet
+ subnet: { get_param: subnet }
weight: 255
# pm2
healthmonitor:
@@ -79,7 +79,7 @@
address: 2.2.2.2
pool: { get_resource: pool }
protocol_port: 2222
- subnet: private-subnet
+ subnet: { get_param: subnet }
weight: 222
'''
@@ -89,7 +89,11 @@
self.skipTest('LBaasv2 extension not available, skipping')
def test_create_update_loadbalancer(self):
- stack_identifier = self.stack_create(template=self.create_template)
+ parameters = {
+ 'subnet': self.conf.fixed_subnet_name,
+ }
+ stack_identifier = self.stack_create(template=self.create_template,
+ parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
@@ -101,7 +105,8 @@
template = template.replace('aLoadBalancer', 'updatedLoadBalancer')
template = template.replace('aPool', 'updatedPool')
template = template.replace('aListener', 'updatedListener')
- self.update_stack(stack_identifier, template=template)
+ self.update_stack(stack_identifier, template=template,
+ parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
@@ -121,7 +126,11 @@
self.assertEqual('updatedListener', output['description'])
def test_add_delete_poolmember(self):
- stack_identifier = self.stack_create(template=self.create_template)
+ parameters = {
+ 'subnet': self.conf.fixed_subnet_name,
+ }
+ stack_identifier = self.stack_create(template=self.create_template,
+ parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
@@ -129,14 +138,16 @@
self.assertEqual(1, len(output['members']))
# add pool member
template = self.create_template.replace('# pm2', self.add_member)
- self.update_stack(stack_identifier, template=template)
+ self.update_stack(stack_identifier, template=template,
+ parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
output = self._stack_output(stack, 'pool')
self.assertEqual(2, len(output['members']))
# delete pool member
- self.update_stack(stack_identifier, template=self.create_template)
+ self.update_stack(stack_identifier, template=self.create_template,
+ parameters=parameters)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
diff --git a/functional/test_nova_server_networks.py b/functional/test_nova_server_networks.py
index ae550b2..e631e3e 100644
--- a/functional/test_nova_server_networks.py
+++ b/functional/test_nova_server_networks.py
@@ -31,6 +31,16 @@
properties:
network: {get_resource: net}
cidr: 11.11.11.0/24
+ security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: the_sg
+ description: Ping and SSH
+ rules:
+ - protocol: icmp
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
server:
type: OS::Nova::Server
properties:
@@ -39,23 +49,55 @@
networks:
- subnet: {get_resource: subnet}
fixed_ip: 11.11.11.11
+ security_groups:
+ - {get_resource: security_group}
outputs:
networks:
value: {get_attr: [server, networks]}
'''
+server_with_port_template = '''
+heat_template_version: 2016-04-08
+description: Test template to test nova server with port.
+parameters:
+ flavor:
+ type: string
+ image:
+ type: string
+resources:
+ net:
+ type: OS::Neutron::Net
+ properties:
+ name: my_net
+ subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: {get_resource: net}
+ cidr: 11.11.11.0/24
+ port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: net}
+ fixed_ips:
+ - subnet: {get_resource: subnet}
+ ip_address: 11.11.11.11
+ server:
+ type: OS::Nova::Server
+ properties:
+ image: {get_param: image}
+ flavor: {get_param: flavor}
+ networks:
+ - port: {get_resource: port}
+'''
+
class CreateServerTest(functional_base.FunctionalTestsBase):
- def setUp(self):
- super(CreateServerTest, self).setUp()
-
def get_outputs(self, stack_identifier, output_key):
stack = self.client.stacks.get(stack_identifier)
- output = self._stack_output(stack, output_key)
- return output
+ return self._stack_output(stack, output_key)
- def test_create_server_with_subnet_fixed_ip(self):
+ def test_create_server_with_subnet_fixed_ip_sec_group(self):
parms = {'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref}
stack_identifier = self.stack_create(
@@ -65,6 +107,12 @@
networks = self.get_outputs(stack_identifier, 'networks')
self.assertEqual(['11.11.11.11'], networks['my_net'])
+ server_resource = self.client.resources.get(
+ stack_identifier, 'server')
+ server_id = server_resource.physical_resource_id
+ server = self.compute_client.servers.get(server_id)
+ self.assertEqual([{"name": "the_sg"}], server.security_groups)
+
def test_create_update_server_with_subnet(self):
parms = {'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref}
@@ -86,3 +134,13 @@
parameters=parms)
new_networks = self.get_outputs(stack_identifier, 'networks')
self.assertNotEqual(['11.11.11.22'], new_networks['my_net'])
+
+ def test_create_server_with_port(self):
+ parms = {'flavor': self.conf.minimal_instance_type,
+ 'image': self.conf.minimal_image_ref}
+ # We just want to make sure we can create the server, no need to assert
+ # anything
+ self.stack_create(
+ template=server_with_port_template,
+ stack_name='server_with_port',
+ parameters=parms)
diff --git a/functional/test_purge.py b/functional/test_purge.py
index 42feee3..fd652a9 100644
--- a/functional/test_purge.py
+++ b/functional/test_purge.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
from oslo_concurrency import processutils
from heat_integrationtests.functional import functional_base
@@ -30,6 +32,7 @@
stacks = dict((stack.id, stack) for stack in
self.client.stacks.list(show_deleted=True))
self.assertIn(stack_identifier.split('/')[1], stacks)
+ time.sleep(1)
cmd = "heat-manage purge_deleted 0"
processutils.execute(cmd, shell=True)
stacks = dict((stack.id, stack) for stack in
@@ -40,6 +43,7 @@
stack_identifier = self.stack_create(template=self.template,
tags="foo,bar")
self._stack_delete(stack_identifier)
+ time.sleep(1)
cmd = "heat-manage purge_deleted 0"
processutils.execute(cmd, shell=True)
stacks = dict((stack.id, stack) for stack in
diff --git a/functional/test_replace_deprecated.py b/functional/test_replace_deprecated.py
new file mode 100644
index 0000000..5e7fdc6
--- /dev/null
+++ b/functional/test_replace_deprecated.py
@@ -0,0 +1,92 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import yaml
+
+from heat_integrationtests.functional import functional_base
+
+
+class ReplaceDeprecatedResourceTest(functional_base.FunctionalTestsBase):
+ template = '''
+heat_template_version: "2013-05-23"
+parameters:
+ flavor:
+ type: string
+ image:
+ type: string
+ network:
+ type: string
+
+resources:
+ config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ config: xxxx
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ image: {get_param: image}
+ flavor: {get_param: flavor}
+ networks: [{network: {get_param: network} }]
+ user_data_format: SOFTWARE_CONFIG
+ dep:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ config: {get_resource: config}
+ servers: {'0': {get_resource: server}}
+ signal_transport: NO_SIGNAL
+outputs:
+ server:
+ value: {get_resource: server}
+'''
+
+ deployment_group_snippet = '''
+type: OS::Heat::SoftwareDeploymentGroup
+properties:
+ config: {get_resource: config}
+ servers: {'0': {get_resource: server}}
+ signal_transport: NO_SIGNAL
+'''
+ enable_cleanup = True
+
+ def test_replace_software_deployments(self):
+ parms = {'flavor': self.conf.minimal_instance_type,
+ 'network': self.conf.fixed_network_name,
+ 'image': self.conf.minimal_image_ref
+ }
+ deployments_template = yaml.safe_load(self.template)
+ stack_identifier = self.stack_create(
+ parameters=parms,
+ template=deployments_template,
+ enable_cleanup=self.enable_cleanup)
+ expected_resources = {'config': 'OS::Heat::SoftwareConfig',
+ 'dep': 'OS::Heat::SoftwareDeployments',
+ 'server': 'OS::Nova::Server'}
+ resource = self.client.resources.get(stack_identifier, 'server')
+ self.assertEqual(expected_resources,
+ self.list_resources(stack_identifier))
+ initial_phy_id = resource.physical_resource_id
+ resources = deployments_template['resources']
+ resources['dep'] = yaml.safe_load(self.deployment_group_snippet)
+ self.update_stack(
+ stack_identifier,
+ deployments_template,
+ parameters=parms)
+ resource = self.client.resources.get(stack_identifier, 'server')
+ self.assertEqual(initial_phy_id,
+ resource.physical_resource_id)
+ expected_new_resources = {'config': 'OS::Heat::SoftwareConfig',
+ 'dep': 'OS::Heat::SoftwareDeploymentGroup',
+ 'server': 'OS::Nova::Server'}
+ self.assertEqual(expected_new_resources,
+ self.list_resources(stack_identifier))
diff --git a/functional/test_resource_chain.py b/functional/test_resource_chain.py
index 1086e74..2898ebe 100644
--- a/functional/test_resource_chain.py
+++ b/functional/test_resource_chain.py
@@ -58,7 +58,7 @@
# Verify
stack = self.client.stacks.get(stack_id)
- self.assertTrue(stack is not None)
+ self.assertIsNotNone(stack)
# Top-level resource for chain
expected = {'my-chain': 'OS::Heat::ResourceChain'}
@@ -74,15 +74,15 @@
# Outputs
resource_ids = self._stack_output(stack, 'resource-ids')
- self.assertTrue(resource_ids is not None)
+ self.assertIsNotNone(resource_ids)
self.assertEqual(2, len(resource_ids))
resource_value = self._stack_output(stack, 'resource-0-value')
- self.assertTrue(resource_value is not None)
+ self.assertIsNotNone(resource_value)
self.assertEqual(8, len(resource_value)) # from parameter
resource_attrs = self._stack_output(stack, 'all-resource-attrs')
- self.assertTrue(resource_attrs is not None)
+ self.assertIsNotNone(resource_attrs)
self.assertIsInstance(resource_attrs, dict)
self.assertEqual(2, len(resource_attrs))
self.assertEqual(8, len(resource_attrs['0']))
diff --git a/functional/test_resource_group.py b/functional/test_resource_group.py
index 1e9edd5..8d8cc89 100644
--- a/functional/test_resource_group.py
+++ b/functional/test_resource_group.py
@@ -42,13 +42,10 @@
value: {get_attr: [random_group, value]}
'''
- def setUp(self):
- super(ResourceGroupTest, self).setUp()
-
def test_resource_group_zero_novalidate(self):
# Nested resources should be validated only when size > 0
# This allows features to be disabled via size=0 without
- # triggering validation of nested resource custom contraints
+ # triggering validation of nested resource custom constraints
# e.g images etc in the nested schema.
nested_template_fail = '''
heat_template_version: 2013-05-23
@@ -299,6 +296,7 @@
files=files1)
self.assertEqual({u'random_group': u'OS::Heat::ResourceGroup'},
self.list_resources(stack_identifier))
+ self.assertEqual(files1, self.client.stacks.files(stack_identifier))
initial_nested_ident = self.group_nested_identifier(stack_identifier,
'random_group')
@@ -315,6 +313,7 @@
updated_nested_ident = self.group_nested_identifier(stack_identifier,
'random_group')
self.assertEqual(initial_nested_ident, updated_nested_ident)
+ self.assertEqual(files2, self.client.stacks.files(stack_identifier))
# compare the output, we expect a change.
stack1 = self.client.stacks.get(stack_identifier)
@@ -375,9 +374,6 @@
)),
]
- def setUp(self):
- super(ResourceGroupTestNullParams, self).setUp()
-
def test_create_pass_zero_parameter(self):
templ = self.template.replace('type: empty',
'type: %s' % self.p_type)
@@ -415,9 +411,6 @@
value: {get_attr: [group1, resource.1.value]}
'''
- def setUp(self):
- super(ResourceGroupAdoptTest, self).setUp()
-
def _yaml_to_json(self, yaml_templ):
return yaml.safe_load(yaml_templ)
@@ -487,9 +480,6 @@
wait_secs: 2
'''
- def setUp(self):
- super(ResourceGroupErrorResourceTest, self).setUp()
-
def test_fail(self):
stack_identifier = self.stack_create(
template=self.template,
diff --git a/functional/test_resources_list.py b/functional/test_resources_list.py
new file mode 100644
index 0000000..257afc5
--- /dev/null
+++ b/functional/test_resources_list.py
@@ -0,0 +1,43 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.functional import functional_base
+
+
+test_template_depend = {
+ 'heat_template_version': '2013-05-23',
+ 'resources': {
+ 'test1': {
+ 'type': 'OS::Heat::TestResource',
+ 'properties': {
+ 'value': 'Test1',
+ }
+ },
+ 'test2': {
+ 'type': 'OS::Heat::TestResource',
+ 'depends_on': ['test1'],
+ 'properties': {
+ 'value': 'Test2',
+ }
+ }
+ }
+}
+
+
+class ResourcesList(functional_base.FunctionalTestsBase):
+
+ def test_filtering_with_depend(self):
+ stack_identifier = self.stack_create(template=test_template_depend)
+ [test2] = self.client.resources.list(stack_identifier,
+ filters={'name': 'test2'})
+
+ self.assertEqual('CREATE_COMPLETE', test2.resource_status)
diff --git a/functional/test_software_deployment_group.py b/functional/test_software_deployment_group.py
new file mode 100644
index 0000000..4e8b868
--- /dev/null
+++ b/functional/test_software_deployment_group.py
@@ -0,0 +1,142 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.functional import functional_base
+
+
+class SoftwareDeploymentGroupTest(functional_base.FunctionalTestsBase):
+ sd_template = '''
+heat_template_version: 2016-10-14
+
+parameters:
+ input:
+ type: string
+ default: foo_input
+
+resources:
+ config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: foo
+
+ deployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: config}
+ input_values:
+ foo: {get_param: input}
+ servers:
+ '0': dummy0
+ '1': dummy1
+ '2': dummy2
+ '3': dummy3
+'''
+
+ sd_template_with_upd_policy = '''
+heat_template_version: 2016-10-14
+
+parameters:
+ input:
+ type: string
+ default: foo_input
+
+resources:
+ config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: foo
+
+ deployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ update_policy:
+ rolling_update:
+ max_batch_size: 2
+ pause_time: 1
+ properties:
+ config: {get_resource: config}
+ input_values:
+ foo: {get_param: input}
+ servers:
+ '0': dummy0
+ '1': dummy1
+ '2': dummy2
+ '3': dummy3
+'''
+ enable_cleanup = True
+
+ def deployment_crud(self, template):
+ stack_identifier = self.stack_create(
+ template=template,
+ enable_cleanup=self.enable_cleanup,
+ expected_status='CREATE_IN_PROGRESS')
+ self._wait_for_resource_status(
+ stack_identifier, 'deployment', 'CREATE_IN_PROGRESS')
+ nested_identifier = self.assert_resource_is_a_stack(
+ stack_identifier, 'deployment')
+ group_resources = self.list_group_resources(
+ stack_identifier, 'deployment', minimal=False)
+
+ self.assertEqual(4, len(group_resources))
+ self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE',
+ signal_required=True,
+ resources_to_signal=group_resources)
+
+ self.check_input_values(group_resources, 'foo', 'foo_input')
+
+ self.update_stack(stack_identifier,
+ template=template,
+ environment={'parameters': {'input': 'input2'}},
+ expected_status='UPDATE_IN_PROGRESS')
+ nested_identifier = self.assert_resource_is_a_stack(
+ stack_identifier, 'deployment')
+ self.assertEqual(4, len(group_resources))
+ self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE',
+ signal_required=True,
+ resources_to_signal=group_resources)
+
+ self.check_input_values(group_resources, 'foo', 'input2')
+
+ # We explicitly test delete here, vs just via cleanup and check
+ # the nested stack is gone
+ self._stack_delete(stack_identifier)
+ self._wait_for_stack_status(
+ nested_identifier, 'DELETE_COMPLETE',
+ success_on_not_found=True)
+
+ def test_deployment_crud(self):
+ self.deployment_crud(self.sd_template)
+
+ def test_deployment_crud_with_rolling_update(self):
+ self.deployment_crud(self.sd_template_with_upd_policy)
+
+ def test_deployments_create_delete_in_progress(self):
+ stack_identifier = self.stack_create(
+ template=self.sd_template,
+ enable_cleanup=self.enable_cleanup,
+ expected_status='CREATE_IN_PROGRESS')
+ self._wait_for_resource_status(
+ stack_identifier, 'deployment', 'CREATE_IN_PROGRESS')
+ nested_identifier = self.assert_resource_is_a_stack(
+ stack_identifier, 'deployment')
+ group_resources = self.list_group_resources(
+ stack_identifier, 'deployment', minimal=False)
+
+ self.assertEqual(4, len(group_resources))
+ # Now test delete while the stacks are still IN_PROGRESS
+ self._stack_delete(stack_identifier)
+ self._wait_for_stack_status(
+ nested_identifier, 'DELETE_COMPLETE',
+ success_on_not_found=True)
diff --git a/functional/test_stack_events.py b/functional/test_stack_events.py
index b1b2339..d5a7fad 100644
--- a/functional/test_stack_events.py
+++ b/functional/test_stack_events.py
@@ -32,9 +32,6 @@
value: { get_resource: test_resource }
'''
- def setUp(self):
- super(StackEventsTest, self).setUp()
-
def _verify_event_fields(self, event, event_characteristics):
self.assertIsNotNone(event_characteristics)
self.assertIsNotNone(event.event_time)
@@ -77,7 +74,7 @@
stack_event.resource_name)
# Resource events are a subset of the original stack event list
- self.assertTrue(len(resource_events) < len(stack_events))
+ self.assertLess(len(resource_events), len(stack_events))
# Get the event details for each resource event
for resource_event in resource_events:
diff --git a/functional/test_swiftsignal_update.py b/functional/test_swiftsignal_update.py
index 5321014..0cf2f81 100644
--- a/functional/test_swiftsignal_update.py
+++ b/functional/test_swiftsignal_update.py
@@ -33,9 +33,6 @@
class SwiftSignalHandleUpdateTest(functional_base.FunctionalTestsBase):
- def setUp(self):
- super(SwiftSignalHandleUpdateTest, self).setUp()
-
def test_stack_update_same_template_replace_no_url(self):
stack_identifier = self.stack_create(template=test_template)
stack = self.client.stacks.get(stack_identifier)
diff --git a/functional/test_template_resource.py b/functional/test_template_resource.py
index ebfd73e..ef2f4f5 100644
--- a/functional/test_template_resource.py
+++ b/functional/test_template_resource.py
@@ -47,9 +47,6 @@
"OS::Heat::RandomString": nested.yaml
'''
- def setUp(self):
- super(TemplateResourceTest, self).setUp()
-
def test_nested_env(self):
main_templ = '''
heat_template_version: 2013-05-23
@@ -172,9 +169,6 @@
"My::NestedSecret": nested.yaml
'''
- def setUp(self):
- super(NestedAttributesTest, self).setUp()
-
def test_stack_ref(self):
nested_templ = '''
heat_template_version: 2014-10-16
@@ -496,9 +490,6 @@
expect=NOCHANGE)),
]
- def setUp(self):
- super(TemplateResourceUpdateTest, self).setUp()
-
def test_template_resource_update_template_schema(self):
stack_identifier = self.stack_create(
template=self.main_template,
@@ -598,9 +589,6 @@
Value: {'Fn::GetAtt': [RealRandom, value]}
'''
- def setUp(self):
- super(TemplateResourceAdoptTest, self).setUp()
-
def _yaml_to_json(self, yaml_templ):
return yaml.safe_load(yaml_templ)
@@ -680,9 +668,6 @@
Value: {'Fn::GetAtt': [RealRandom, value]}
'''
- def setUp(self):
- super(TemplateResourceCheckTest, self).setUp()
-
def test_check(self):
stack_identifier = self.stack_create(
template=self.main_template,
@@ -712,9 +697,6 @@
wait_secs: 2
'''
- def setUp(self):
- super(TemplateResourceErrorMessageTest, self).setUp()
-
def test_fail(self):
stack_identifier = self.stack_create(
template=self.template,
@@ -747,9 +729,6 @@
type: OS::Heat::RandomString
'''
- def setUp(self):
- super(TemplateResourceSuspendResumeTest, self).setUp()
-
def test_suspend_resume(self):
"""Basic test for template resource suspend resume."""
stack_identifier = self.stack_create(
diff --git a/functional/test_templates.py b/functional/test_templates.py
index 82a1af4..8464d19 100644
--- a/functional/test_templates.py
+++ b/functional/test_templates.py
@@ -39,22 +39,20 @@
}
}
- def setUp(self):
- super(TemplateAPITest, self).setUp()
-
def test_get_stack_template(self):
stack_identifier = self.stack_create(
template=self.template
)
template_from_client = self.client.stacks.template(stack_identifier)
- self.assertDictEqual(self.template, template_from_client)
+ self.assertEqual(self.template, template_from_client)
def test_template_version(self):
template_versions = self.client.template_versions.list()
supported_template_versions = ["2013-05-23", "2014-10-16",
"2015-04-30", "2015-10-15",
"2012-12-12", "2010-09-09",
- "2016-04-08", "2016-10-14"]
+ "2016-04-08", "2016-10-14", "newton",
+ "2017-02-24", "ocata"]
for template in template_versions:
self.assertIn(template.version.split(".")[1],
supported_template_versions)
diff --git a/functional/test_unicode_template.py b/functional/test_unicode_template.py
index 924c110..32c02a5 100644
--- a/functional/test_unicode_template.py
+++ b/functional/test_unicode_template.py
@@ -35,9 +35,6 @@
value: {get_attr: [\u8d44\u6e90, value]}
'''
- def setUp(self):
- super(StackUnicodeTemplateTest, self).setUp()
-
def _assert_results(self, result):
self.assertTrue(result['disable_rollback'])
self.assertIsNone(result['parent'])
diff --git a/functional/test_update_restricted.py b/functional/test_update_restricted.py
index ae1907b..7087c0c 100644
--- a/functional/test_update_restricted.py
+++ b/functional/test_update_restricted.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
from heat_integrationtests.functional import functional_base
test_template = {
@@ -71,6 +73,9 @@
self._check_for_restriction_reason(resource_events,
reason_update_restrict))
+ # Ensure the timestamp changes, since this will be very quick
+ time.sleep(1)
+
# check update succeeds - with only 'replace' restricted
self.update_stack(stack_identifier, update_template,
env_replace_restrict,
@@ -95,7 +100,7 @@
# check replace fails - with 'both' restricted
self.update_stack(stack_identifier, update_template,
- env_replace_restrict,
+ env_both_restrict,
expected_status='UPDATE_FAILED')
self.assertTrue(self.verify_resource_status(stack_identifier, 'bar',
@@ -105,6 +110,9 @@
self._check_for_restriction_reason(resource_events,
reason_replace_restrict))
+ # Ensure the timestamp changes, since this will be very quick
+ time.sleep(1)
+
# check replace fails - with only 'replace' restricted
self.update_stack(stack_identifier, update_template,
env_replace_restrict,
@@ -139,6 +147,9 @@
self._check_for_restriction_reason(resource_events,
reason_replace_restrict))
+ # Ensure the timestamp changes, since this will be very quick
+ time.sleep(1)
+
# check replace fails - with only 'replace' restricted
self.update_stack(stack_identifier, update_template,
env_replace_restrict,
diff --git a/scenario/scenario_base.py b/scenario/scenario_base.py
index d41c9a1..c48d64d 100644
--- a/scenario/scenario_base.py
+++ b/scenario/scenario_base.py
@@ -21,8 +21,6 @@
def setUp(self):
super(ScenarioTestsBase, self).setUp()
self.check_skip()
-
- self.client = self.orchestration_client
self.sub_dir = 'templates'
self.assign_keypair()
@@ -30,11 +28,6 @@
raise self.skipException("No default network configured to test")
self.net = self._get_network()
- if not self.conf.image_ref:
- raise self.skipException("No image configured to test")
- if not self.conf.instance_type:
- raise self.skipException("No flavor configured to test")
-
if not self.conf.minimal_image_ref:
raise self.skipException("No minimal image configured to test")
if not self.conf.minimal_instance_type:
diff --git a/scenario/templates/test_ceilometer_alarm.yaml b/scenario/templates/test_aodh_alarm.yaml
similarity index 96%
rename from scenario/templates/test_ceilometer_alarm.yaml
rename to scenario/templates/test_aodh_alarm.yaml
index 01bc790..9218f56 100644
--- a/scenario/templates/test_ceilometer_alarm.yaml
+++ b/scenario/templates/test_aodh_alarm.yaml
@@ -15,7 +15,7 @@
cooldown: 0
scaling_adjustment: 1
alarm:
- type: OS::Ceilometer::Alarm
+ type: OS::Aodh::Alarm
properties:
description: Scale-up if the average CPU > 50% for 1 minute
meter_name: test_meter
diff --git a/scenario/templates/test_server_signal.yaml b/scenario/templates/test_server_signal.yaml
new file mode 100644
index 0000000..dfb1155
--- /dev/null
+++ b/scenario/templates/test_server_signal.yaml
@@ -0,0 +1,104 @@
+heat_template_version: 2013-05-23
+description: |
+ Template which uses a wait condition to confirm that a minimal
+ signalling works in a created network
+parameters:
+ key_name:
+ type: string
+ flavor:
+ type: string
+ image:
+ type: string
+ subnet_cidr:
+ type: string
+ default: 10.100.0.0/16
+ timeout:
+ type: number
+ public_net:
+ type: string
+ default: public
+ private_net:
+ type: string
+ default: heat-net
+ dns_servers:
+ type: comma_delimited_list
+ default: ["8.8.8.8", "8.8.4.4"]
+resources:
+ sg:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: the_sg
+ description: Ping and SSH
+ rules:
+ - protocol: icmp
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+
+ floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: {get_param: public_net}
+
+ network:
+ type: OS::Neutron::Net
+
+ subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: {get_resource: network}
+ ip_version: 4
+ cidr: {get_param: subnet_cidr}
+ dns_nameservers: {get_param: dns_servers}
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: {get_param: public_net}
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router: {get_resource: router}
+ subnet: {get_resource: subnet}
+
+ wait_handle:
+ type: OS::Heat::WaitConditionHandle
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ image: {get_param: image}
+ flavor: {get_param: flavor}
+ key_name: {get_param: key_name}
+ networks:
+ - subnet: {get_resource: subnet}
+ security_groups:
+ - {get_resource: sg}
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ wc_notify --data-binary '{"status": "SUCCESS", "data": "test complete"}'
+ params:
+ wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
+
+ server_floating_ip_assoc:
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: {get_resource: floating_ip}
+ port_id: {get_attr: [server, addresses, {get_resource: network}, 0, port]}
+
+ wait_condition:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: {get_resource: wait_handle}
+ timeout: {get_param: timeout}
+
+outputs:
+ server_ip:
+ value: {get_attr: [floating_ip, floating_ip_address]}
+ wc_data:
+ value: {get_attr: [wait_condition, data]}
diff --git a/scenario/test_ceilometer_alarm.py b/scenario/test_aodh_alarm.py
similarity index 89%
rename from scenario/test_ceilometer_alarm.py
rename to scenario/test_aodh_alarm.py
index aa29861..90288a2 100644
--- a/scenario/test_ceilometer_alarm.py
+++ b/scenario/test_aodh_alarm.py
@@ -18,12 +18,12 @@
LOG = logging.getLogger(__name__)
-class CeilometerAlarmTest(scenario_base.ScenarioTestsBase):
- """Class is responsible for testing of ceilometer usage."""
+class AodhAlarmTest(scenario_base.ScenarioTestsBase):
+ """Class is responsible for testing of aodh usage."""
def setUp(self):
- super(CeilometerAlarmTest, self).setUp()
+ super(AodhAlarmTest, self).setUp()
self.template = self._load_template(__file__,
- 'test_ceilometer_alarm.yaml',
+ 'test_aodh_alarm.yaml',
'templates')
def check_instance_count(self, stack_identifier, expected):
diff --git a/scenario/test_autoscaling_lbv2.py b/scenario/test_autoscaling_lbv2.py
index b3a1842..78cde53 100644
--- a/scenario/test_autoscaling_lbv2.py
+++ b/scenario/test_autoscaling_lbv2.py
@@ -46,6 +46,8 @@
# skip unsuccessful requests
if r.status_code == 200:
resp.add(r.text)
+ if len(resp) == expected_num:
+ break
self.assertEqual(expected_num, len(resp))
def test_autoscaling_loadbalancer_neutron(self):
diff --git a/scenario/test_server_cfn_init.py b/scenario/test_server_cfn_init.py
index d3ee7ee..c7d84e3 100644
--- a/scenario/test_server_cfn_init.py
+++ b/scenario/test_server_cfn_init.py
@@ -21,6 +21,10 @@
def setUp(self):
super(CfnInitIntegrationTest, self).setUp()
+ if not self.conf.image_ref:
+ raise self.skipException("No image configured to test")
+ if not self.conf.instance_type:
+ raise self.skipException("No flavor configured to test")
def check_stack(self, sid):
# Check status of all resources
@@ -36,9 +40,6 @@
try:
self._wait_for_resource_status(
sid, 'WaitCondition', 'CREATE_COMPLETE')
- except (exceptions.StackResourceBuildErrorException,
- exceptions.TimeoutException) as e:
- raise e
finally:
# attempt to log the server console regardless of WaitCondition
# going to complete. This allows successful and failed cloud-init
@@ -87,9 +88,9 @@
server_ip, username='ec2-user')
linux_client.validate_authentication()
except (exceptions.ServerUnreachable,
- exceptions.SSHTimeout) as e:
+ exceptions.SSHTimeout):
self._log_console_output(servers=[server])
- raise e
+ raise
def test_server_cfn_init(self):
"""Check cfn-init and cfn-signal availability on the created server.
diff --git a/scenario/test_server_signal.py b/scenario/test_server_signal.py
new file mode 100644
index 0000000..4567e43
--- /dev/null
+++ b/scenario/test_server_signal.py
@@ -0,0 +1,74 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from heat_integrationtests.common import exceptions
+from heat_integrationtests.scenario import scenario_base
+
+
+class ServerSignalIntegrationTest(scenario_base.ScenarioTestsBase):
+ """Test a server in a created network can signal to heat."""
+
+ def test_server_signal(self):
+ """Check a server in a created network can signal to heat."""
+ parameters = {
+ 'key_name': self.keypair_name,
+ 'flavor': self.conf.minimal_instance_type,
+ 'image': self.conf.minimal_image_ref,
+ 'timeout': self.conf.build_timeout,
+ }
+
+ # Launch stack
+ sid = self.launch_stack(
+ template_name="test_server_signal.yaml",
+ parameters=parameters,
+ expected_status=None
+ )
+
+ # Check status of all resources
+ for res in ('sg', 'floating_ip', 'network', 'router', 'subnet',
+ 'router_interface', 'wait_handle', 'server',
+ 'server_floating_ip_assoc'):
+ self._wait_for_resource_status(
+ sid, res, 'CREATE_COMPLETE')
+
+ server_resource = self.client.resources.get(sid, 'server')
+ server_id = server_resource.physical_resource_id
+ server = self.compute_client.servers.get(server_id)
+
+ try:
+ self._wait_for_resource_status(
+ sid, 'wait_condition', 'CREATE_COMPLETE')
+ except (exceptions.StackResourceBuildErrorException,
+ exceptions.TimeoutException):
+ raise
+ finally:
+ # attempt to log the server console regardless of WaitCondition
+ # going to complete. This allows successful and failed cloud-init
+ # logs to be compared
+ self._log_console_output(servers=[server])
+
+ stack = self.client.stacks.get(sid)
+
+ wc_data = json.loads(
+ self._stack_output(stack, 'wc_data'))
+ self.assertEqual({'1': 'test complete'}, wc_data)
+
+ server_ip = self._stack_output(stack, 'server_ip')
+
+ # Check that created server is reachable
+ if not self._ping_ip_address(server_ip):
+ self._log_console_output(servers=[server])
+ self.fail(
+ "Timed out waiting for %s to become reachable" % server_ip)
diff --git a/scenario/test_server_software_config.py b/scenario/test_server_software_config.py
index 75de02e..f4c7da5 100644
--- a/scenario/test_server_software_config.py
+++ b/scenario/test_server_software_config.py
@@ -13,7 +13,6 @@
from heatclient.common import template_utils
import six
-from heat_integrationtests.common import exceptions
from heat_integrationtests.scenario import scenario_base
CFG1_SH = '''#!/bin/sh
@@ -43,6 +42,13 @@
class SoftwareConfigIntegrationTest(scenario_base.ScenarioTestsBase):
+ def setUp(self):
+ super(SoftwareConfigIntegrationTest, self).setUp()
+ if not self.conf.image_ref:
+ raise self.skipException("No image configured to test")
+ if not self.conf.instance_type:
+ raise self.skipException("No flavor configured to test")
+
def check_stack(self):
sid = self.stack_identifier
# Check that all stack resources were created
@@ -69,9 +75,6 @@
for res in ('dep2a', 'dep2b', 'dep1', 'dep3'):
self._wait_for_resource_status(
sid, res, 'CREATE_COMPLETE')
- except (exceptions.StackResourceBuildErrorException,
- exceptions.TimeoutException) as e:
- raise e
finally:
# attempt to log the server console regardless of deployments
# going to complete. This allows successful and failed boot
@@ -97,7 +100,7 @@
res1['result'])
self.assertEqual(0, res1['status_code'])
self.assertEqual('Output to stderr\n', res1['stderr'])
- self.assertTrue(len(res1['stdout']) > 0)
+ self.assertGreater(len(res1['stdout']), 0)
res2 = self._stack_output(stack, 'res2')
self.assertEqual(
@@ -115,7 +118,7 @@
res3['result'])
self.assertEqual(0, res3['status_code'])
self.assertEqual('', res3['stderr'])
- self.assertTrue(len(res1['stdout']) > 0)
+ self.assertGreater(len(res1['stdout']), 0)
dep1_resource = self.client.resources.get(sid, 'dep1')
dep1_id = dep1_resource.physical_resource_id
diff --git a/scenario/test_volumes.py b/scenario/test_volumes.py
index 7980d81..603c8f2 100644
--- a/scenario/test_volumes.py
+++ b/scenario/test_volumes.py
@@ -77,9 +77,8 @@
template_name='test_volumes_create_from_backup.yaml',
add_parameters={'backup_id': backup.id})
stack2 = self.client.stacks.get(stack_identifier2)
- except exceptions.StackBuildErrorException as e:
- LOG.error("Halting test due to bug: #1382300")
- LOG.exception(e)
+ except exceptions.StackBuildErrorException:
+ LOG.exception("Halting test due to bug: #1382300")
return
# Verify with cinder that the volume exists, with matching details