Merge "Move test_notifications to functional"
diff --git a/common/clients.py b/common/clients.py
index 5a4dd5a..1ba3a21 100644
--- a/common/clients.py
+++ b/common/clients.py
@@ -20,10 +20,6 @@
import novaclient.client
import swiftclient
-import logging
-
-LOG = logging.getLogger(__name__)
-
class ClientManager(object):
"""
diff --git a/common/config.py b/common/config.py
index d4d41b0..158d087 100644
--- a/common/config.py
+++ b/common/config.py
@@ -68,6 +68,12 @@
cfg.StrOpt('fixed_network_name',
default='private',
help="Visible fixed network name "),
+ cfg.StrOpt('boot_config_env',
+ default='heat_integrationtests/scenario/templates'
+ '/boot_config_none_env.yaml',
+ help="Path to environment file which defines the "
+ "resource type Heat::InstallConfigAgent. Needs to "
+ "be appropriate for the image_ref."),
cfg.StrOpt('fixed_subnet_name',
default='private-subnet',
help="Visible fixed sub-network name "),
@@ -97,6 +103,10 @@
cfg.BoolOpt('skip_stack_abandon_tests',
default=False,
help="Skip Stack Abandon Integration tests"),
+ cfg.IntOpt('connectivity_timeout',
+ default=120,
+ help="Timeout in seconds to wait for connectivity to "
+ "server."),
]
diff --git a/common/remote_client.py b/common/remote_client.py
index 5365ceb..2955418 100644
--- a/common/remote_client.py
+++ b/common/remote_client.py
@@ -11,12 +11,12 @@
# under the License.
import cStringIO
-import logging
import re
import select
import socket
import time
+from oslo_log import log as logging
import paramiko
import six
diff --git a/common/test.py b/common/test.py
index 8ffbea3..30b10a8 100644
--- a/common/test.py
+++ b/common/test.py
@@ -10,15 +10,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
import os
import random
import re
import subprocess
import time
+import urllib
import fixtures
from heatclient import exc as heat_exceptions
+from oslo_log import log as logging
from oslo_utils import timeutils
import six
import testscenarios
@@ -107,12 +108,28 @@
return linux_client
+ def check_connectivity(self, check_ip):
+ def try_connect(ip):
+ try:
+ urllib.urlopen('http://%s/' % ip)
+ return True
+ except IOError:
+ return False
+
+ timeout = self.conf.connectivity_timeout
+ elapsed_time = 0
+ while not try_connect(check_ip):
+ time.sleep(10)
+ elapsed_time += 10
+ if elapsed_time > timeout:
+ raise exceptions.TimeoutException()
+
def _log_console_output(self, servers=None):
if not servers:
servers = self.compute_client.servers.list()
for server in servers:
- LOG.debug('Console output for %s', server.id)
- LOG.debug(server.get_console_output())
+ LOG.info('Console output for %s', server.id)
+ LOG.info(server.get_console_output())
def _load_template(self, base_file, file_name, sub_dir=None):
sub_dir = sub_dir or ''
@@ -147,10 +164,12 @@
def _stack_rand_name(cls):
return rand_name(cls.__name__)
- def _get_default_network(self):
+ def _get_network(self, net_name=None):
+ if net_name is None:
+ net_name = self.conf.fixed_network_name
networks = self.network_client.list_networks()
for net in networks['networks']:
- if net['name'] == self.conf.fixed_network_name:
+ if net['name'] == net_name:
return net
@staticmethod
@@ -204,7 +223,7 @@
message = ('Resource %s failed to reach %s status within '
'the required time (%s s).' %
- (res.resource_name, status, build_timeout))
+ (resource_name, status, build_timeout))
raise exceptions.TimeoutException(message)
def _wait_for_stack_status(self, stack_identifier, status,
@@ -243,7 +262,7 @@
message = ('Stack %s failed to reach %s status within '
'the required time (%s s).' %
- (stack.stack_name, status, build_timeout))
+ (stack_identifier, status, build_timeout))
raise exceptions.TimeoutException(message)
def _stack_delete(self, stack_identifier):
diff --git a/functional/test_autoscaling.py b/functional/test_autoscaling.py
index 60fa66f..d640915 100644
--- a/functional/test_autoscaling.py
+++ b/functional/test_autoscaling.py
@@ -12,8 +12,8 @@
import copy
import json
-import logging
+from oslo_log import log as logging
from testtools import matchers
from heat_integrationtests.common import test
diff --git a/functional/test_aws_stack.py b/functional/test_aws_stack.py
index 2e2cd9d..5aabe95 100644
--- a/functional/test_aws_stack.py
+++ b/functional/test_aws_stack.py
@@ -12,10 +12,10 @@
import hashlib
import json
-import logging
import random
import urlparse
+from oslo_log import log as logging
from swiftclient import utils as swiftclient_utils
import yaml
diff --git a/functional/test_heat_autoscaling.py b/functional/test_heat_autoscaling.py
index 340038c..0e6e0cb 100644
--- a/functional/test_heat_autoscaling.py
+++ b/functional/test_heat_autoscaling.py
@@ -97,3 +97,39 @@
expected_resources = {'random_group': 'OS::Heat::AutoScalingGroup'}
self.assertEqual(expected_resources, self.list_resources(stack_id))
self._assert_output_values(stack_id)
+
+
+class AutoScalingGroupUpdateWithNoChanges(test.HeatIntegrationTest):
+
+ template = '''
+heat_template_version: 2013-05-23
+
+resources:
+ test_group:
+ type: OS::Heat::AutoScalingGroup
+ properties:
+ desired_capacity: 0
+ max_size: 0
+ min_size: 0
+ resource:
+ type: OS::Heat::RandomString
+ test_policy:
+ type: OS::Heat::ScalingPolicy
+ properties:
+ adjustment_type: change_in_capacity
+ auto_scaling_group_id: { get_resource: test_group }
+ scaling_adjustment: 1
+'''
+
+ def setUp(self):
+ super(AutoScalingGroupUpdateWithNoChanges, self).setUp()
+ self.client = self.orchestration_client
+
+ def test_as_group_update_without_resource_changes(self):
+ stack_identifier = self.stack_create(template=self.template)
+ new_template = self.template.replace(
+ 'scaling_adjustment: 1',
+ 'scaling_adjustment: 2')
+
+ self.update_stack(stack_identifier, template=new_template)
+ self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
diff --git a/functional/test_instance_group.py b/functional/test_instance_group.py
index 84c63cd..5c88bed 100644
--- a/functional/test_instance_group.py
+++ b/functional/test_instance_group.py
@@ -12,16 +12,12 @@
import copy
import json
-import logging
from testtools import matchers
from heat_integrationtests.common import test
-LOG = logging.getLogger(__name__)
-
-
class InstanceGroupTest(test.HeatIntegrationTest):
template = '''
@@ -319,7 +315,7 @@
# setup stack from the initial template
files = {'provider.yaml': self.instance_template}
- size = 10
+ size = 5
env = {'resource_registry': {'AWS::EC2::Instance': 'provider.yaml'},
'parameters': {'size': size,
'image': self.conf.image_ref,
@@ -396,7 +392,7 @@
config['Properties']['ImageId'] = self.conf.minimal_image_ref
self.update_instance_group(updt_template,
- num_updates_expected_on_updt=10,
+ num_updates_expected_on_updt=5,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
update_replace=True)
@@ -409,15 +405,15 @@
updt_template = self.ig_tmpl_with_updt_policy()
grp = updt_template['Resources']['JobServerGroup']
policy = grp['UpdatePolicy']['RollingUpdate']
- policy['MinInstancesInService'] = '8'
+ policy['MinInstancesInService'] = '4'
policy['MaxBatchSize'] = '4'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = self.conf.minimal_image_ref
self.update_instance_group(updt_template,
- num_updates_expected_on_updt=8,
- num_creates_expected_on_updt=2,
- num_deletes_expected_on_updt=2,
+ num_updates_expected_on_updt=2,
+ num_creates_expected_on_updt=3,
+ num_deletes_expected_on_updt=3,
update_replace=True)
def test_instance_group_update_replace_huge_batch_size(self):
@@ -433,7 +429,7 @@
config['Properties']['ImageId'] = self.conf.minimal_image_ref
self.update_instance_group(updt_template,
- num_updates_expected_on_updt=10,
+ num_updates_expected_on_updt=5,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
update_replace=True)
@@ -446,15 +442,15 @@
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['RollingUpdate']
policy['MinInstancesInService'] = '20'
- policy['MaxBatchSize'] = '1'
+ policy['MaxBatchSize'] = '2'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['ImageId'] = self.conf.minimal_image_ref
self.update_instance_group(updt_template,
- num_updates_expected_on_updt=9,
- num_creates_expected_on_updt=1,
- num_deletes_expected_on_updt=1,
+ num_updates_expected_on_updt=3,
+ num_creates_expected_on_updt=2,
+ num_deletes_expected_on_updt=2,
update_replace=True)
def test_instance_group_update_no_replace(self):
@@ -473,7 +469,7 @@
config['Properties']['InstanceType'] = 'm1.tiny'
self.update_instance_group(updt_template,
- num_updates_expected_on_updt=10,
+ num_updates_expected_on_updt=5,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
update_replace=False)
@@ -487,14 +483,14 @@
updt_template = self.ig_tmpl_with_updt_policy()
group = updt_template['Resources']['JobServerGroup']
policy = group['UpdatePolicy']['RollingUpdate']
- policy['MinInstancesInService'] = '8'
+ policy['MinInstancesInService'] = '4'
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['JobServerConfig']
config['Properties']['InstanceType'] = 'm1.tiny'
self.update_instance_group(updt_template,
- num_updates_expected_on_updt=8,
- num_creates_expected_on_updt=2,
- num_deletes_expected_on_updt=2,
+ num_updates_expected_on_updt=2,
+ num_creates_expected_on_updt=3,
+ num_deletes_expected_on_updt=3,
update_replace=False)
diff --git a/functional/test_remote_stack.py b/functional/test_remote_stack.py
index 1b6f961..7579eb0 100644
--- a/functional/test_remote_stack.py
+++ b/functional/test_remote_stack.py
@@ -10,15 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
from heatclient import exc
import six
from heat_integrationtests.common import test
-LOG = logging.getLogger(__name__)
-
class RemoteStackTest(test.HeatIntegrationTest):
template = '''
diff --git a/functional/test_template_resource.py b/functional/test_template_resource.py
index 85f5799..5893679 100644
--- a/functional/test_template_resource.py
+++ b/functional/test_template_resource.py
@@ -11,16 +11,12 @@
# under the License.
import json
-import logging
import yaml
from heat_integrationtests.common import test
-LOG = logging.getLogger(__name__)
-
-
class TemplateResourceTest(test.HeatIntegrationTest):
"""Prove that we can use the registry in a nested provider."""
diff --git a/functional/test_update.py b/functional/test_update.py
index ea436ad..3904311 100644
--- a/functional/test_update.py
+++ b/functional/test_update.py
@@ -10,14 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
from heat_integrationtests.common import test
-LOG = logging.getLogger(__name__)
-
-
class UpdateStackTest(test.HeatIntegrationTest):
template = '''
diff --git a/functional/test_validation.py b/functional/test_validation.py
index 2d9669d..2df6356 100644
--- a/functional/test_validation.py
+++ b/functional/test_validation.py
@@ -10,14 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
from heat_integrationtests.common import test
-LOG = logging.getLogger(__name__)
-
-
class StackValidationTest(test.HeatIntegrationTest):
def setUp(self):
diff --git a/scenario/scenario_base.py b/scenario/scenario_base.py
new file mode 100644
index 0000000..77c3624
--- /dev/null
+++ b/scenario/scenario_base.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.common import test
+
+
+class ScenarioTestsBase(test.HeatIntegrationTest):
+ "This class define common parameters for scenario tests"
+
+ def setUp(self):
+ super(ScenarioTestsBase, self).setUp()
+ self.client = self.orchestration_client
+ self.sub_dir = 'templates'
+ self.assign_keypair()
+
+ if not self.conf.fixed_network_name:
+ raise self.skipException("No default network configured to test")
+ self.net = self._get_network()
+
+ if not self.conf.image_ref:
+ raise self.skipException("No image configured to test")
+ if not self.conf.instance_type:
+ raise self.skipException("No flavor configured to test")
+
+ def launch_stack(self, template_name, expected_status='CREATE_COMPLETE',
+ parameters=None, **kwargs):
+ template = self._load_template(__file__, template_name, self.sub_dir)
+
+ parameters = parameters or {}
+
+ if kwargs.get('add_parameters'):
+ parameters.update(kwargs['add_parameters'])
+
+ stack_id = self.stack_create(
+ stack_name=kwargs.get('stack_name'),
+ template=template,
+ files=kwargs.get('files'),
+ parameters=parameters,
+ environment=kwargs.get('environment'),
+ expected_status=expected_status
+ )
+
+ return stack_id
diff --git a/scenario/templates/boot_config_none_env.yaml b/scenario/templates/boot_config_none_env.yaml
new file mode 100644
index 0000000..91d130c
--- /dev/null
+++ b/scenario/templates/boot_config_none_env.yaml
@@ -0,0 +1,5 @@
+# Defines a Heat::InstallConfigAgent config resource which performs no config.
+# This environment can be used when the image already has the required agents
+# installed and configured.
+resource_registry:
+ "Heat::InstallConfigAgent": "OS::Heat::SoftwareConfig"
\ No newline at end of file
diff --git a/scenario/templates/test_neutron_loadbalancer.yaml b/scenario/templates/test_neutron_loadbalancer.yaml
new file mode 100644
index 0000000..fad7db8
--- /dev/null
+++ b/scenario/templates/test_neutron_loadbalancer.yaml
@@ -0,0 +1,108 @@
+heat_template_version: 2014-10-16
+
+description: |
+ Template which tests neutron load balancing resources
+
+parameters:
+ key_name:
+ type: string
+ flavor:
+ type: string
+ image:
+ type: string
+ private_subnet_id:
+ type: string
+ external_network_id:
+ type: string
+ port:
+ type: string
+ default: '80'
+
+resources:
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Add security group rules for servers
+ name: security-group
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: tcp
+ port_range_min: { get_param: port }
+ port_range_max: { get_param: port }
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: icmp
+
+ server1 :
+ type: OS::Nova::Server
+ properties:
+ name: Server1
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: key_name }
+ security_groups: [{ get_resource: sec_group }]
+ user_data:
+ list_join:
+ - ''
+ - - '#!/bin/bash -v
+
+ '
+ - 'echo $(hostname) > index.html
+
+ '
+ - 'python -m SimpleHTTPServer '
+ - { get_param: port }
+
+ server2 :
+ type: OS::Nova::Server
+ properties:
+ name: Server2
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ key_name: { get_param: key_name }
+ security_groups: [{ get_resource: sec_group }]
+ user_data:
+ list_join:
+ - ''
+ - - '#!/bin/bash -v
+
+ '
+ - 'echo $(hostname) > index.html
+
+ '
+ - 'python -m SimpleHTTPServer '
+ - { get_param: port }
+
+ health_monitor:
+ type: OS::Neutron::HealthMonitor
+ properties:
+ delay: 3
+ type: HTTP
+ timeout: 3
+ max_retries: 3
+
+ test_pool:
+ type: OS::Neutron::Pool
+ properties:
+ lb_method: ROUND_ROBIN
+ protocol: HTTP
+ subnet: { get_param: private_subnet_id }
+ monitors:
+ - { get_resource: health_monitor }
+ vip:
+ protocol_port: { get_param: port }
+
+ floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: external_network_id }
+ port_id:
+ { get_attr: [test_pool, vip, 'port_id'] }
+
+ LBaaS:
+ type: OS::Neutron::LoadBalancer
+ properties:
+ pool_id: { get_resource: test_pool }
+ protocol_port: { get_param: port }
+ members:
+ - { get_resource: server1 }
+
diff --git a/scenario/templates/test_server_cfn_init.yaml b/scenario/templates/test_server_cfn_init.yaml
index 681e539..ffb8e9b 100644
--- a/scenario/templates/test_server_cfn_init.yaml
+++ b/scenario/templates/test_server_cfn_init.yaml
@@ -27,6 +27,12 @@
Type: AWS::IAM::AccessKey
Properties:
UserName: {Ref: CfnUser}
+
+ IPAddress:
+ Type: AWS::EC2::EIP
+ Properties:
+ InstanceId: {Ref: SmokeServer}
+
SmokeServer:
Type: AWS::EC2::Instance
Metadata:
@@ -78,4 +84,4 @@
SmokeServerIp:
Description: IP address of server
Value:
- Fn::GetAtt: [SmokeServer, PublicIp]
+ Ref: IPAddress
diff --git a/scenario/templates/test_server_software_config.yaml b/scenario/templates/test_server_software_config.yaml
index e6ecae4..bf8fa9b 100644
--- a/scenario/templates/test_server_software_config.yaml
+++ b/scenario/templates/test_server_software_config.yaml
@@ -11,6 +11,9 @@
signal_transport:
type: string
default: CFN_SIGNAL
+ software_config_transport:
+ type: string
+ default: POLL_SERVER_CFN
dep1_foo:
default: fooooo
type: string
@@ -133,11 +136,7 @@
signal_transport: {get_param: signal_transport}
cfg_user_data:
- type: OS::Heat::SoftwareConfig
- properties:
- config: |
- #!/bin/sh
- echo "user data script"
+ type: Heat::InstallConfigAgent
server:
type: OS::Nova::Server
@@ -150,8 +149,8 @@
networks:
- network: {get_param: network}
user_data_format: SOFTWARE_CONFIG
- software_config_transport: POLL_TEMP_URL
- user_data: {get_resource: cfg_user_data}
+ software_config_transport: {get_param: software_config_transport}
+ user_data: {get_attr: [cfg_user_data, config]}
outputs:
res1:
diff --git a/scenario/test_neutron_autoscaling.py b/scenario/test_neutron_autoscaling.py
index 0e3c404..2ba085b 100644
--- a/scenario/test_neutron_autoscaling.py
+++ b/scenario/test_neutron_autoscaling.py
@@ -10,59 +10,62 @@
# License for the specific language governing permissions and limitations
# under the License.
-from heat_integrationtests.common import test
+from heat_integrationtests.scenario import scenario_base
-class NeutronAutoscalingTest(test.HeatIntegrationTest):
+class NeutronAutoscalingTest(scenario_base.ScenarioTestsBase):
"""
The class is responsible for testing of neutron resources autoscaling.
"""
def setUp(self):
super(NeutronAutoscalingTest, self).setUp()
- self.client = self.orchestration_client
if not self.conf.minimal_image_ref:
raise self.skipException("No minimal image configured to test")
- if not self.conf.instance_type:
- raise self.skipException("No flavor configured to test")
if not self.conf.fixed_subnet_name:
raise self.skipException("No sub-network configured to test")
+ self.template_name = 'test_neutron_autoscaling.yaml'
def test_neutron_autoscaling(self):
"""
- Check autoscaling of load balancer members in heat.
+ Check autoscaling of load balancer members in Heat.
The alternative scenario is the following:
- 1. Initialize environment variables.
- 2. Create a stack with a load balancer.
- 3. Check that the load balancer created
+ 1. Launch a stack with a load balancer.
+ 2. Check that the load balancer created
one load balancer member for stack.
- 4. Update stack definition: increase desired capacity of stack.
- 5. Check that number of members in load balancer was increased.
+ 3. Update stack definition: increase desired capacity of stack.
+ 4. Check that number of members in load balancer was increased.
"""
- # Init env variables
- env = {'parameters': {"image_id": self.conf.minimal_image_ref,
- "capacity": "1",
- "instance_type": self.conf.instance_type,
- "fixed_subnet_name": self.conf.fixed_subnet_name,
- }}
+ parameters = {
+ "image_id": self.conf.minimal_image_ref,
+ "capacity": "1",
+ "instance_type": self.conf.instance_type,
+ "fixed_subnet_name": self.conf.fixed_subnet_name,
+ }
- template = self._load_template(__file__,
- 'test_neutron_autoscaling.yaml',
- 'templates')
- # Create stack
- stack_id = self.stack_create(template=template,
- environment=env)
+ # Launch stack
+ stack_id = self.launch_stack(
+ template_name=self.template_name,
+ parameters=parameters
+ )
+ # Check number of members
members = self.network_client.list_members()
self.assertEqual(1, len(members["members"]))
# Increase desired capacity and update the stack
- env["parameters"]["capacity"] = "2"
- self.update_stack(stack_id,
- template=template,
- environment=env)
+ template = self._load_template(
+ __file__, self.template_name, self.sub_dir
+ )
+ parameters["capacity"] = "2"
+ self.update_stack(
+ stack_id,
+ template=template,
+ parameters=parameters
+ )
+ # Check number of members
upd_members = self.network_client.list_members()
self.assertEqual(2, len(upd_members["members"]))
diff --git a/scenario/test_neutron_loadbalancer.py b/scenario/test_neutron_loadbalancer.py
new file mode 100644
index 0000000..bc2d77c
--- /dev/null
+++ b/scenario/test_neutron_loadbalancer.py
@@ -0,0 +1,118 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+import urllib
+
+from heat_integrationtests.scenario import scenario_base
+
+
+class NeutronLoadBalancerTest(scenario_base.ScenarioTestsBase):
+ """
+ The class is responsible for testing of neutron resources balancer.
+ """
+
+ def setUp(self):
+ super(NeutronLoadBalancerTest, self).setUp()
+ self.public_net = self._get_network('public')
+ self.template_name = 'test_neutron_loadbalancer.yaml'
+
+ def collect_responses(self, ip, expected_resp):
+ resp = set()
+ for count in range(10):
+ time.sleep(1)
+ resp.add(urllib.urlopen('http://%s/' % ip).read())
+
+ self.assertEqual(expected_resp, resp)
+
+ def test_neutron_loadbalancer(self):
+ """
+ Check work of Neutron LBaaS resource in Heat.
+
+ The alternative scenario is the following:
+ 1. Launch a stack with a load balancer, two servers,
+ but use only one as a LB member.
+ 2. Check connection to the servers and LB.
+ 3. Collect info about responces, which were received by LB from
+ its members (responces have to be received only from 'server1').
+ 4. Update stack definition: include 'server2' into LBaaS.
+ 5. Check that number of members in LB was increased and
+ responces were received from 'server1' and 'server2'.
+ """
+
+ parameters = {
+ 'key_name': self.keypair_name,
+ 'flavor': self.conf.instance_type,
+ 'image': self.conf.image_ref,
+ 'private_subnet_id': self.net['subnets'][0],
+ 'external_network_id': self.public_net['id']
+ }
+
+ # Launch stack
+ sid = self.launch_stack(
+ template_name=self.template_name,
+ parameters=parameters
+ )
+
+ server1_id = self.client.resources.get(
+ sid, 'server1').physical_resource_id
+ server2_id = self.client.resources.get(
+ sid, 'server2').physical_resource_id
+ floating_ip_id = self.client.resources.get(
+ sid, 'floating_ip').physical_resource_id
+ floating_ip = self.network_client.show_floatingip(
+ floating_ip_id)['floatingip']['floating_ip_address']
+ pool_id = self.client.resources.get(
+ sid, 'test_pool').physical_resource_id
+ vip_id = self.network_client.show_pool(pool_id)['pool']['vip_id']
+
+ vip = self.network_client.show_vip(vip_id)['vip']['address']
+ server1_ip = self.compute_client.servers.get(
+ server1_id).networks['private'][0]
+ server2_ip = self.compute_client.servers.get(
+ server2_id).networks['private'][0]
+
+ # Check connection and info about received responces
+ self.check_connectivity(server1_ip)
+ self.collect_responses(server1_ip, {'server1\n'})
+
+ self.check_connectivity(server2_ip)
+ self.collect_responses(server2_ip, {'server2\n'})
+
+ self.check_connectivity(vip)
+ self.collect_responses(vip, {'server1\n'})
+
+ self.check_connectivity(floating_ip)
+ self.collect_responses(floating_ip, {'server1\n'})
+
+ # Include 'server2' to LB and update the stack
+ template = self._load_template(
+ __file__, self.template_name, self.sub_dir
+ )
+
+ template = template.replace(
+ '- { get_resource: server1 }',
+ '- { get_resource: server1 }\n - { get_resource: server2 }\n'
+ )
+
+ self.update_stack(
+ sid,
+ template=template,
+ parameters=parameters
+ )
+
+ self.check_connectivity(vip)
+ self.collect_responses(vip, {'server1\n', 'server2\n'})
+
+ self.check_connectivity(floating_ip)
+ self.collect_responses(floating_ip, {'server1\n', 'server2\n'})
diff --git a/scenario/test_server_cfn_init.py b/scenario/test_server_cfn_init.py
index cc5d9d2..a49606c 100644
--- a/scenario/test_server_cfn_init.py
+++ b/scenario/test_server_cfn_init.py
@@ -11,62 +11,29 @@
# under the License.
import json
-import logging
from heat_integrationtests.common import exceptions
-from heat_integrationtests.common import test
-
-LOG = logging.getLogger(__name__)
+from heat_integrationtests.scenario import scenario_base
-class CfnInitIntegrationTest(test.HeatIntegrationTest):
+class CfnInitIntegrationTest(scenario_base.ScenarioTestsBase):
+ """
+ The class is responsible for testing cfn-init and cfn-signal workability
+ """
def setUp(self):
super(CfnInitIntegrationTest, self).setUp()
- if not self.conf.image_ref:
- raise self.skipException("No image configured to test")
- self.assign_keypair()
- self.client = self.orchestration_client
- self.template_name = 'test_server_cfn_init.yaml'
- self.sub_dir = 'templates'
-
- def launch_stack(self):
- net = self._get_default_network()
- parameters = {
- 'key_name': self.keypair_name,
- 'flavor': self.conf.instance_type,
- 'image': self.conf.image_ref,
- 'timeout': self.conf.build_timeout,
- 'subnet': net['subnets'][0],
- }
-
- # create the stack
- template = self._load_template(__file__, self.template_name,
- self.sub_dir)
- return self.stack_create(template=template,
- parameters=parameters)
def check_stack(self, sid):
- self._wait_for_resource_status(
- sid, 'WaitHandle', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
- sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
- sid, 'SmokeKeys', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
- sid, 'CfnUser', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
- sid, 'SmokeServer', 'CREATE_COMPLETE')
+ # Check status of all resources
+ for res in ('WaitHandle', 'SmokeSecurityGroup', 'SmokeKeys',
+ 'CfnUser', 'SmokeServer', 'IPAddress'):
+ self._wait_for_resource_status(
+ sid, res, 'CREATE_COMPLETE')
server_resource = self.client.resources.get(sid, 'SmokeServer')
server_id = server_resource.physical_resource_id
server = self.compute_client.servers.get(server_id)
- server_ip = server.networks[self.conf.network_for_ssh][0]
-
- if not self._ping_ip_address(server_ip):
- self._log_console_output(servers=[server])
- self.fail(
- "Timed out waiting for %s to become reachable" % server_ip)
try:
self._wait_for_resource_status(
@@ -80,6 +47,7 @@
# logs to be compared
self._log_console_output(servers=[server])
+ # Check stack status
self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
stack = self.client.stacks.get(sid)
@@ -94,9 +62,16 @@
self._stack_output(stack, 'WaitConditionStatus'))
self.assertEqual('smoke test complete', wait_status['smoke_status'])
+ server_ip = self._stack_output(stack, 'SmokeServerIp')
+
+ # Check that created server is reachable
+ if not self._ping_ip_address(server_ip):
+ self._log_console_output(servers=[server])
+ self.fail(
+ "Timed out waiting for %s to become reachable" % server_ip)
+
+ # Check that the user can authenticate with the generated keypair
if self.keypair:
- # Check that the user can authenticate with the generated
- # keypair
try:
linux_client = self.get_remote_client(
server_ip, username='ec2-user')
@@ -107,5 +82,31 @@
raise e
def test_server_cfn_init(self):
- sid = self.launch_stack()
- self.check_stack(sid)
+ """
+ Check cfn-init and cfn-signal availability on the created server.
+
+ The alternative scenario is the following:
+ 1. Create a stack with a server and configured security group.
+ 2. Check that all stack resources were created.
+ 3. Check that created server is reachable.
+ 4. Check that stack was created successfully.
+ 5. Check that is it possible to connect to server
+ via generated keypair.
+ """
+ parameters = {
+ "key_name": self.keypair_name,
+ "flavor": self.conf.instance_type,
+ "image": self.conf.image_ref,
+ "timeout": self.conf.build_timeout,
+ "subnet": self.net["subnets"][0],
+ }
+
+ # Launch stack
+ stack_id = self.launch_stack(
+ template_name="test_server_cfn_init.yaml",
+ parameters=parameters,
+ expected_status=None
+ )
+
+ # Check stack
+ self.check_stack(stack_id)
diff --git a/scenario/test_server_software_config.py b/scenario/test_server_software_config.py
index bd5d18b..2df815b 100644
--- a/scenario/test_server_software_config.py
+++ b/scenario/test_server_software_config.py
@@ -10,10 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from heatclient.common import template_utils
import six
from heat_integrationtests.common import exceptions
-from heat_integrationtests.common import test
+from heat_integrationtests.scenario import scenario_base
CFG1_SH = '''#!/bin/sh
echo "Writing to /tmp/$bar"
@@ -39,46 +40,18 @@
}'''
-class SoftwareConfigIntegrationTest(test.HeatIntegrationTest):
+class SoftwareConfigIntegrationTest(scenario_base.ScenarioTestsBase):
def setUp(self):
super(SoftwareConfigIntegrationTest, self).setUp()
if self.conf.skip_software_config_tests:
self.skipTest('Testing software config disabled in conf, '
'skipping')
- self.client = self.orchestration_client
- self.template_name = 'test_server_software_config.yaml'
- self.sub_dir = 'templates'
self.stack_name = self._stack_rand_name()
- self.maxDiff = None
-
- def launch_stack(self):
- net = self._get_default_network()
- self.parameters = {
- 'key_name': self.keypair_name,
- 'flavor': self.conf.instance_type,
- 'image': self.conf.image_ref,
- 'network': net['id']
- }
-
- # create the stack
- self.template = self._load_template(__file__, self.template_name,
- self.sub_dir)
- self.stack_create(
- stack_name=self.stack_name,
- template=self.template,
- parameters=self.parameters,
- files={
- 'cfg1.sh': CFG1_SH,
- 'cfg3.pp': CFG3_PP
- },
- expected_status=None)
-
- self.stack = self.client.stacks.get(self.stack_name)
- self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
def check_stack(self):
sid = self.stack_identifier
+ # Check that all stack resources were created
for res in ('cfg2a', 'cfg2b', 'cfg1', 'cfg3', 'server'):
self._wait_for_resource_status(
sid, res, 'CREATE_COMPLETE')
@@ -87,14 +60,15 @@
server_id = server_resource.physical_resource_id
server = self.compute_client.servers.get(server_id)
+ # Waiting for each deployment to contribute their
+ # config to resource
try:
- # wait for each deployment to contribute their
- # config to resource
for res in ('dep2b', 'dep1', 'dep3'):
self._wait_for_resource_status(
sid, res, 'CREATE_IN_PROGRESS')
- server_metadata = self.client.resources.metadata(sid, 'server')
+ server_metadata = self.client.resources.metadata(
+ sid, 'server')
deployments = dict((d['name'], d) for d in
server_metadata['deployments'])
@@ -106,11 +80,13 @@
self._log_console_output(servers=[server])
raise e
+ # Check that stack was fully created
self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
complete_server_metadata = self.client.resources.metadata(
sid, 'server')
- # ensure any previously available deployments haven't changed so
+
+ # Ensure any previously available deployments haven't changed so
# config isn't re-triggered
complete_deployments = dict((d['name'], d) for d in
complete_server_metadata['deployments'])
@@ -149,10 +125,51 @@
dep1_resource = self.client.resources.get(sid, 'dep1')
dep1_id = dep1_resource.physical_resource_id
dep1_dep = self.client.software_deployments.get(dep1_id)
- self.assertIsNotNone(dep1_dep.updated_time)
- self.assertNotEqual(dep1_dep.updated_time, dep1_dep.creation_time)
+ if hasattr(dep1_dep, 'updated_time'):
+ # Only check updated_time if the attribute exists.
+ # This allows latest heat agent code to be tested with
+ # Juno heat (which doesn't expose updated_time)
+ self.assertIsNotNone(dep1_dep.updated_time)
+ self.assertNotEqual(
+ dep1_dep.updated_time,
+ dep1_dep.creation_time)
def test_server_software_config(self):
- self.assign_keypair()
- self.launch_stack()
+ """
+ Check that passed files with scripts are executed on created server.
+
+ The alternative scenario is the following:
+ 1. Create a stack and pass files with scripts.
+ 2. Check that all stack resources are created successfully.
+ 3. Wait for all deployments.
+ 4. Check that stack was created.
+ 5. Check stack outputs.
+ """
+
+ parameters = {
+ 'key_name': self.keypair_name,
+ 'flavor': self.conf.instance_type,
+ 'image': self.conf.image_ref,
+ 'network': self.net['id']
+ }
+
+ files = {
+ 'cfg1.sh': CFG1_SH,
+ 'cfg3.pp': CFG3_PP
+ }
+
+ env_files, env = template_utils.process_environment_and_files(
+ self.conf.boot_config_env)
+
+ # Launch stack
+ self.stack_identifier = self.launch_stack(
+ stack_name=self.stack_name,
+ template_name='test_server_software_config.yaml',
+ parameters=parameters,
+ files=dict(list(files.items()) + list(env_files.items())),
+ expected_status=None,
+ environment=env
+ )
+
+ # Check stack
self.check_stack()
diff --git a/scenario/test_volumes.py b/scenario/test_volumes.py
index a60ffb0..9b12a9c 100644
--- a/scenario/test_volumes.py
+++ b/scenario/test_volumes.py
@@ -10,24 +10,25 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
from cinderclient import exceptions as cinder_exceptions
+from oslo_log import log as logging
import six
from testtools import testcase
from heat_integrationtests.common import exceptions
-from heat_integrationtests.common import test
+from heat_integrationtests.scenario import scenario_base
LOG = logging.getLogger(__name__)
-class VolumeBackupRestoreIntegrationTest(test.HeatIntegrationTest):
+class VolumeBackupRestoreIntegrationTest(scenario_base.ScenarioTestsBase):
+ """
+ Class is responsible for testing of volume backup.
+ """
def setUp(self):
super(VolumeBackupRestoreIntegrationTest, self).setUp()
- self.client = self.orchestration_client
- self.assign_keypair()
self.volume_description = 'A test volume description 123'
self.volume_size = self.conf.volume_size
@@ -48,37 +49,8 @@
self.assertEqual(self.volume_description,
self._stack_output(stack, 'display_description'))
- def launch_stack(self, template_name, add_parameters={}):
- net = self._get_default_network()
- template = self._load_template(__file__, template_name, 'templates')
- parameters = {'key_name': self.keypair_name,
- 'instance_type': self.conf.instance_type,
- 'image_id': self.conf.minimal_image_ref,
- 'volume_description': self.volume_description,
- 'timeout': self.conf.build_timeout,
- 'network': net['id']}
- parameters.update(add_parameters)
- return self.stack_create(template=template,
- parameters=parameters)
-
- @testcase.skip('Skipped until failure rate '
- 'can be reduced ref bug #1382300')
- def test_cinder_volume_create_backup_restore(self):
- """Ensure the 'Snapshot' deletion policy works.
-
- This requires a more complex test, but it tests several aspects
- of the heat cinder resources:
- 1. Create a volume, attach it to an instance, write some data to it
- 2. Delete the stack, with 'Snapshot' specified, creates a backup
- 3. Check the snapshot has created a volume backup
- 4. Create a new stack, where the volume is created from the backup
- 5. Verify the test data written in (1) is present in the new volume
- """
- stack_identifier = self.launch_stack(
- template_name='test_volumes_delete_snapshot.yaml',
- add_parameters={'volume_size': self.volume_size})
-
- stack = self.client.stacks.get(stack_identifier)
+ def check_stack(self, stack_id):
+ stack = self.client.stacks.get(stack_id)
# Verify with cinder that the volume exists, with matching details
volume_id = self._stack_output(stack, 'volume_id')
@@ -89,8 +61,8 @@
# Delete the stack and ensure a backup is created for volume_id
# but the volume itself is gone
- self.client.stacks.delete(stack_identifier)
- self._wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+ self.client.stacks.delete(stack_id)
+ self._wait_for_stack_status(stack_id, 'DELETE_COMPLETE')
self.assertRaises(cinder_exceptions.NotFound,
self.volume_client.volumes.get,
volume_id)
@@ -130,3 +102,36 @@
self.assertRaises(cinder_exceptions.NotFound,
self.volume_client.volumes.get,
volume_id2)
+
+ @testcase.skip('Skipped until failure rate '
+ 'can be reduced ref bug #1382300')
+ def test_cinder_volume_create_backup_restore(self):
+ """
+ Ensure the 'Snapshot' deletion policy works.
+
+ This requires a more complex test, but it tests several aspects
+ of the heat cinder resources:
+ 1. Create a volume, attach it to an instance, write some data to it
+ 2. Delete the stack, with 'Snapshot' specified, creates a backup
+ 3. Check the snapshot has created a volume backup
+ 4. Create a new stack, where the volume is created from the backup
+ 5. Verify the test data written in (1) is present in the new volume
+ """
+ parameters = {
+ 'key_name': self.keypair_name,
+ 'instance_type': self.conf.instance_type,
+ 'image_id': self.conf.minimal_image_ref,
+ 'volume_description': self.volume_description,
+ 'timeout': self.conf.build_timeout,
+ 'network': self.net['id']
+ }
+
+ # Launch stack
+ stack_id = self.launch_stack(
+ template_name='test_volumes_delete_snapshot.yaml',
+ parameters=parameters,
+ add_parameters={'volume_size': self.volume_size}
+ )
+
+ # Check stack
+ self.check_stack(stack_id)