Initial change to support heat-based environments
Moved the patch from the mcp/mcp-qa repo, ref #1170 to
not lose it after cleaning up.
1.Added envmanager_heat.py to create environment in OpenStack
and use the created heat stack as the metadata source.
Current conventions for heat stack metadata:
- OS::Nova::Server must use 'metadata' property to specify list
of the node roles, example:
cfg01_node:
type: OS::Nova::Server
...
properties:
...
metadata:
roles:
- salt_master
- OS::Neutron::Subnet must use 'tags' property to specify the
address pool name (L3 network roles), example:
control_subnet:
type: OS::Neutron::Subnet
properties:
...
tags:
- private_pool01
2. Change underlay.yaml to use the user data file 'as is', without
indents and jinja blocks. This will allow to use the same
user data file for fuel-devops envs and heat stack envs.
3. Add an example microcloud-8116.env file with some defaults.
For other stacks, another .env files can be created, with different
access keys, networks, images, ...
Related-Bug: PROD-27687
Change-Id: Iaa9e97447bd1b41e5930a1ffbb7312945ba139f4
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index a3bcea4..b1772e8 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -20,6 +20,7 @@
from tcp_tests import settings
from tcp_tests.managers import envmanager_devops
from tcp_tests.managers import envmanager_empty
+from tcp_tests.managers import envmanager_heat
from tcp_tests.managers import underlay_ssh_manager
LOG = logger.logger
@@ -65,6 +66,12 @@
# config.underlay.ssh settings can be empty or witn SSH to existing env
# config.underlay.current_snapshot
env = envmanager_devops.EnvironmentManager(config=config)
+
+ elif manager == 'heat':
+ # heat environment manager is used.
+ # config.underlay.ssh settings can be empty or witn SSH to existing env
+ # config.underlay.current_snapshot
+ env = envmanager_heat.EnvironmentManagerHeat(config=config)
else:
raise Exception("Unknown hardware manager: '{}'".format(manager))
diff --git a/tcp_tests/helpers/exceptions.py b/tcp_tests/helpers/exceptions.py
index 7bc4abc..64b9db9 100644
--- a/tcp_tests/helpers/exceptions.py
+++ b/tcp_tests/helpers/exceptions.py
@@ -101,6 +101,39 @@
)
+class EnvironmentWrongStatus(BaseException):
+ def __init__(self, env_name, env_expected_status, env_actual_status):
+ super(EnvironmentWrongStatus, self).__init__()
+ self.env_name = env_name
+ self.env_expected_status = env_expected_status
+ self.env_actual_status = env_actual_status
+
+ def __str__(self):
+ return ("Environment '{0}' has wrong status: "
+ "expected '{1}', got: '{2}'"
+ .format(self.env_name,
+ self.env_expected_status,
+ self.env_actual_status))
+
+
+class EnvironmentBadStatus(BaseException):
+ def __init__(self, env_name, env_expected_status,
+ env_actual_status, wrong_resources):
+ super(EnvironmentBadStatus, self).__init__()
+ self.env_name = env_name
+ self.env_expected_status = env_expected_status
+ self.env_actual_status = env_actual_status
+ self.wrong_resources = wrong_resources
+
+ def __str__(self):
+ return ("Environment '{0}' has bad status: "
+ "expected '{1}', got: '{2}'\n{3}"
+ .format(self.env_name,
+ self.env_expected_status,
+ self.env_actual_status,
+ self.wrong_resources))
+
+
class EnvironmentSnapshotMissing(BaseException):
def __init__(self, env_name, snapshot_name):
super(EnvironmentSnapshotMissing, self).__init__()
@@ -144,3 +177,14 @@
def __str__(self):
return ("Cloud-init failed on node {0} with error: \n{1}"
.format(self.node_name, self.message))
+
+
+class EnvironmentNodeAccessError(BaseException):
+ def __init__(self, node_name, message=''):
+ super(EnvironmentNodeAccessError, self).__init__()
+ self.node_name = node_name
+ self.message = message
+
+ def __str__(self):
+ return ("Unable to reach the node {0}: \n{1}"
+ .format(self.node_name, self.message))
diff --git a/tcp_tests/managers/envmanager_heat.py b/tcp_tests/managers/envmanager_heat.py
new file mode 100644
index 0000000..2d52e00
--- /dev/null
+++ b/tcp_tests/managers/envmanager_heat.py
@@ -0,0 +1,549 @@
+# Copyright 2019 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import netaddr
+import yaml
+
+from devops.helpers import helpers
+from devops.helpers.helpers import ssh_client
+from retry import retry
+
+from cached_property import cached_property
+
+from heatclient import client as heatclient
+from heatclient import exc as heat_exceptions
+from heatclient.common import template_utils
+from keystoneauth1.identity import v3 as keystone_v3
+from keystoneauth1 import session as keystone_session
+
+import requests
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+
+from oslo_config import cfg
+from paramiko.ssh_exception import (
+ AuthenticationException,
+ BadAuthenticationType)
+
+from tcp_tests import settings
+from tcp_tests import settings_oslo
+from tcp_tests.helpers import exceptions
+from tcp_tests import logger
+
+LOG = logger.logger
+
+EXPECTED_STACK_STATUS = "CREATE_COMPLETE"
+BAD_STACK_STATUSES = ["CREATE_FAILED"]
+
+# Disable multiple notifications like:
+# "InsecureRequestWarning: Unverified HTTPS request is being made."
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+
+class EnvironmentManagerHeat(object):
+ """Class-helper for creating VMs via devops environments"""
+
+ __config = None
+
+ # Do not use self.__heatclient directly! Use properties
+ # for necessary resources with catching HTTPUnauthorized exception
+ __heatclient = None
+
+ def __init__(self, config=None):
+ """Create/connect to the Heat stack with test environment
+
+ :param config: oslo.config object
+ :param config.hardware.heat_version: Heat version
+ :param config.hardware.os_auth_url: OS auth URL to access heat
+ :param config.hardware.os_username: OS username
+ :param config.hardware.os_password: OS password
+ :param config.hardware.os_project_name: OS tenant name
+ """
+ self.__config = config
+
+ if not self.__config.hardware.heat_stack_name:
+ self.__config.hardware.heat_stack_name = settings.ENV_NAME
+
+ self.__init_heatclient()
+
+ try:
+ stack_status = self._current_stack.stack_status
+ if stack_status != EXPECTED_STACK_STATUS:
+ raise exceptions.EnvironmentWrongStatus(
+ self.__config.hardware.heat_stack_name,
+ EXPECTED_STACK_STATUS,
+ stack_status
+ )
+ LOG.info("Heat stack '{0}' already exists".format(
+ self.__config.hardware.heat_stack_name))
+ except heat_exceptions.HTTPNotFound:
+ self._create_environment()
+ LOG.info("Heat stack '{0}' created".format(
+ self.__config.hardware.heat_stack_name))
+
+ self.set_address_pools_config()
+ self.set_dhcp_ranges_config()
+
+ @cached_property
+ def _keystone_session(self):
+ keystone_auth = keystone_v3.Password(
+ auth_url=settings.OS_AUTH_URL,
+ username=settings.OS_USERNAME,
+ password=settings.OS_PASSWORD,
+ project_name=settings.OS_PROJECT_NAME,
+ user_domain_name='Default',
+ project_domain_name='Default')
+ return keystone_session.Session(auth=keystone_auth, verify=False)
+
+ def __init_heatclient(self):
+ token = self._keystone_session.get_token()
+ endpoint_url = self._keystone_session.get_endpoint(
+ service_type='orchestration', endpoint_type='publicURL')
+ self.__heatclient = heatclient.Client(
+ version=settings.OS_HEAT_VERSION, endpoint=endpoint_url,
+ token=token, insecure=True)
+
+ @property
+ def _current_stack(self):
+ return self.__stacks.get(
+ self.__config.hardware.heat_stack_name)
+
+ @property
+ def __stacks(self):
+ try:
+ return self.__heatclient.stacks
+ except heat_exceptions.HTTPUnauthorized:
+ LOG.warning("Authorization token outdated, refreshing")
+ self.__init_heatclient()
+ return self.__heatclient.stacks
+
+ @property
+ def __resources(self):
+ try:
+ return self.__heatclient.resources
+ except heat_exceptions.HTTPUnauthorized:
+ LOG.warning("Authorization token outdated, refreshing")
+ self.__init_heatclient()
+ return self.__heatclient.resources
+
+ def _get_resources_by_type(self, resource_type):
+ res = []
+ for item in self.__resources.list(
+ self.__config.hardware.heat_stack_name):
+ if item.resource_type == resource_type:
+ resource = self.__resources.get(
+ self.__config.hardware.heat_stack_name,
+ item.resource_name)
+ res.append(resource)
+ return res
+
+ @cached_property
+ def _nodes(self):
+ """Get list of nodenames from heat
+
+ Returns list of dicts.
+ Example:
+ - name: cfg01
+ roles:
+ - salt_master
+ addresses: # Optional. May be an empty dict
+ admin-pool01: p.p.p.202
+ - name: ctl01
+ roles:
+ - salt_minion
+ - openstack_controller
+ - openstack_messaging
+ - openstack_database
+ addresses: {} # Optional. May be an empty dict
+
+ 'name': taken from heat template resource's ['name'] parameter
+ 'roles': a list taken from resource's ['metadata']['roles'] parameter
+ """
+ address_pools = self._address_pools
+ nodes = []
+ for heat_node in self._get_resources_by_type("OS::Nova::Server"):
+ # addresses will have the following dict structure:
+ # {'admin-pool01': <floating_ip1>,
+ # 'private-pool01': <floating_ip2>,
+ # 'external-pool01': <floating_ip3>
+ # }
+ # , where key is one of roles from OS::Neutron::Subnet,
+ # and value is a floating IP associated to the fixed IP
+ # in this subnet (if exists).
+ # If no floating IPs associated to the server,
+ # then addresses will be an empty list.
+ addresses = {}
+ for network in heat_node.attributes['addresses']:
+ fixed = None
+ floating = None
+ for address in heat_node.attributes['addresses'][network]:
+ addr_type = address['OS-EXT-IPS:type']
+ if addr_type == 'fixed':
+ fixed = address['addr']
+ elif addr_type == 'floating':
+ floating = address['addr']
+ else:
+ LOG.error("Unexpected OS-EXT-IPS:type={0} "
+ "in node '{1}' for network '{2}'"
+ .format(addr_type,
+ heat_node.attributes['name'],
+ network))
+ if fixed is None or floating is None:
+ LOG.error("Unable to determine the correct IP address "
+ "in node '{0}' for network '{1}'"
+ .format(heat_node.attributes['name'], network))
+ continue
+ # Check which address pool has the fixed address, and set
+ # the floating address as the access to this address pool.
+ for address_pool in address_pools:
+ pool_net = netaddr.IPNetwork(address_pool['cidr'])
+ if fixed in pool_net:
+ for role in address_pool['roles']:
+ addresses[role] = floating
+
+ nodes.append({
+ 'name': heat_node.attributes['name'],
+ 'roles': yaml.load(heat_node.attributes['metadata']['roles']),
+ 'addresses': addresses,
+ })
+ return nodes
+
+ @cached_property
+ def _address_pools(self):
+ """Get address pools from subnets OS::Neutron::Subnet
+
+ Returns list of dicts.
+ Example:
+ - roles:
+ - admin-pool01
+ cidr: x.x.x.x/y
+ start: x.x.x.2
+ end: x.x.x.254
+ gateway: x.x.x.1 # or None
+ """
+ pools = []
+ for heat_subnet in self._get_resources_by_type("OS::Neutron::Subnet"):
+ pools.append({
+ 'roles': heat_subnet.attributes['tags'],
+ 'cidr': heat_subnet.attributes['cidr'],
+ 'gateway': heat_subnet.attributes['gateway_ip'],
+ 'start': heat_subnet.attributes[
+ 'allocation_pools'][0]['start'],
+ 'end': heat_subnet.attributes['allocation_pools'][0]['end'],
+ })
+ return pools
+
+ def _get_nodes_by_roles(self, roles=None):
+ nodes = []
+ if roles is None:
+ return self._nodes
+
+ for node in self._nodes:
+ if set(node['roles']).intersection(set(roles)):
+ nodes.append(node)
+ return nodes
+
+ def get_ssh_data(self, roles=None):
+ """Generate ssh config for Underlay
+
+ :param roles: list of strings
+ """
+ if roles is None:
+ raise Exception("No roles specified for the environment!")
+
+ config_ssh = []
+ for d_node in self._get_nodes_by_roles(roles=roles):
+ for pool_name in d_node['addresses']:
+ ssh_data = {
+ 'node_name': d_node['name'],
+ 'minion_id': d_node['name'],
+ 'roles': d_node['roles'],
+ 'address_pool': pool_name,
+ 'host': d_node['addresses'][pool_name],
+ 'login': settings.SSH_NODE_CREDENTIALS['login'],
+ 'password': settings.SSH_NODE_CREDENTIALS['password'],
+ 'keys': [k['private']
+ for k in self.__config.underlay.ssh_keys]
+ }
+ config_ssh.append(ssh_data)
+ return config_ssh
+
+ def _get_resources_with_wrong_status(self):
+ res = []
+ for item in self.__resources.list(
+ self.__config.hardware.heat_stack_name):
+ if item.resource_status in BAD_STACK_STATUSES:
+ res.append({
+ 'resource_name': item.resource_name,
+ 'resource_status': item.resource_status,
+ 'resource_status_reason': item.resource_status_reason,
+ 'resource_type': item.resource_type
+ })
+ wrong_resources = '\n'.join([
+ "*** Heat stack resource '{0}' ({1}) has wrong status '{2}': {3}"
+ .format(item['resource_name'],
+ item['resource_type'],
+ item['resource_status'],
+ item['resource_status_reason'])
+ for item in res
+ ])
+ return wrong_resources
+
+ def wait_of_stack_status(self, status, delay=30, tries=60):
+
+ @retry(exceptions.EnvironmentWrongStatus, delay=delay, tries=tries)
+ def wait():
+ st = self._current_stack.stack_status
+ if st == status:
+ return
+ elif st in BAD_STACK_STATUSES:
+ wrong_resources = self._get_resources_with_wrong_status()
+ raise exceptions.EnvironmentBadStatus(
+ self.__config.hardware.heat_stack_name,
+ status,
+ st,
+ wrong_resources
+ )
+ else:
+ LOG.info("Stack {0} status: {1}".format(
+ self.__config.hardware.heat_stack_name, st))
+ raise exceptions.EnvironmentWrongStatus(
+ self.__config.hardware.heat_stack_name,
+ status,
+ st
+ )
+ LOG.info("Waiting for stack '{0}' status <{1}>".format(
+ self.__config.hardware.heat_stack_name, status))
+ wait()
+
+ def revert_snapshot(self, name):
+ """Revert snapshot by name
+
+ - Revert the heat snapshot in the environment
+ - Try to reload 'config' object from a file 'config_<name>.ini'
+ If the file not found, then pass with defaults.
+ - Set <name> as the current state of the environment after reload
+
+ :param name: string
+ """
+ LOG.info("Reading INI config (without reverting env to snapshot) "
+ "named '{0}'".format(name))
+
+ try:
+ test_config_path = self._get_snapshot_config_name(name)
+ settings_oslo.reload_snapshot_config(self.__config,
+ test_config_path)
+ except cfg.ConfigFilesNotFoundError as conf_err:
+ LOG.error("Config file(s) {0} not found!".format(
+ conf_err.config_files))
+
+ self.__config.hardware.current_snapshot = name
+
+ def create_snapshot(self, name, *args, **kwargs):
+ """Create named snapshot of current env.
+
+ - Create a snapshot for the environment
+ - Save 'config' object to a file 'config_<name>.ini'
+
+ :name: string
+ """
+ LOG.info("Store INI config (without env snapshot) named '{0}'"
+ .format(name))
+ self.__config.hardware.current_snapshot = name
+ settings_oslo.save_config(self.__config,
+ name,
+ self.__config.hardware.heat_stack_name)
+
+ def _get_snapshot_config_name(self, snapshot_name):
+ """Get config name for the environment"""
+ env_name = self.__config.hardware.heat_stack_name
+ if env_name is None:
+ env_name = 'config'
+ test_config_path = os.path.join(
+ settings.LOGS_DIR, '{0}_{1}.ini'.format(env_name, snapshot_name))
+ return test_config_path
+
+ def has_snapshot(self, name):
+ # Heat doesn't support live snapshots, so just
+ # check if an INI file was created for this environment,
+ # assuming that the environment has the configuration
+ # described in this INI.
+ return self.has_snapshot_config(name)
+
+ def has_snapshot_config(self, name):
+ test_config_path = self._get_snapshot_config_name(name)
+ return os.path.isfile(test_config_path)
+
+ def start(self, underlay_node_roles, timeout=480):
+ """Start environment"""
+ LOG.warning("HEAT Manager doesn't support start environment feature. "
+ "Waiting for finish the bootstrap process on the nodes "
+ "with accessible SSH")
+
+ check_cloudinit_started = '[ -f /is_cloud_init_started ]'
+ check_cloudinit_finished = ('[ -f /is_cloud_init_finished ] || '
+ '[ -f /var/log/mcp/.bootstrap_done ]')
+ check_cloudinit_failed = 'cat /is_cloud_init_failed'
+ passed = {}
+ for node in self._get_nodes_by_roles(roles=underlay_node_roles):
+
+ try:
+ node_ip = self.node_ip(node)
+ except exceptions.EnvironmentNodeAccessError:
+ LOG.warning("Node {0} doesn't have accessible IP address"
+ ", skipping".format(node['name']))
+ continue
+
+ LOG.info("Waiting for SSH on node '{0}' / {1} ...".format(
+ node['name'], node_ip))
+
+ def _ssh_check(host,
+ port,
+ username=settings.SSH_NODE_CREDENTIALS['login'],
+ password=settings.SSH_NODE_CREDENTIALS['password'],
+ timeout=0):
+ try:
+ ssh = ssh_client.SSHClient(
+ host=host, port=port,
+ auth=ssh_client.SSHAuth(
+ username=username,
+ password=password))
+
+ # If '/is_cloud_init_started' exists, then wait for
+ # the flag /is_cloud_init_finished
+ if ssh.execute(check_cloudinit_started)['exit_code'] == 0:
+ result = ssh.execute(check_cloudinit_failed)
+ if result['exit_code'] == 0:
+ raise exceptions.EnvironmentNodeIsNotStarted(
+ "{0}:{1}".format(host, port),
+ result.stdout_str)
+
+ status = ssh.execute(
+ check_cloudinit_finished)['exit_code'] == 0
+ # Else, just wait for SSH
+ else:
+ status = ssh.execute('echo ok')['exit_code'] == 0
+ return status
+
+ except (AuthenticationException, BadAuthenticationType):
+ return True
+ except Exception:
+ return False
+
+ def _ssh_wait(host,
+ port,
+ username=settings.SSH_NODE_CREDENTIALS['login'],
+ password=settings.SSH_NODE_CREDENTIALS['password'],
+ timeout=0):
+
+ if host in passed and passed[host] >= 2:
+ # host already passed the check
+ return True
+
+ for node in self._get_nodes_by_roles(
+ roles=underlay_node_roles):
+ ip = node_ip
+ if ip not in passed:
+ passed[ip] = 0
+ if _ssh_check(ip, port):
+ passed[ip] += 1
+ else:
+ passed[ip] = 0
+
+ helpers.wait(
+ lambda: _ssh_wait(node_ip, 22),
+ timeout=timeout,
+ timeout_msg="Node '{}' didn't open SSH in {} sec".format(
+ node['name'], timeout
+ )
+ )
+ LOG.info('Heat stack "{0}" ready'
+ .format(self.__config.hardware.heat_stack_name))
+
+ def _create_environment(self):
+ tpl_files, template = template_utils.get_template_contents(
+ self.__config.hardware.heat_conf_path)
+ env_files_list = []
+ env_files, env = (
+ template_utils.process_multiple_environments_and_files(
+ env_paths=[self.__config.hardware.heat_env_path],
+ env_list_tracker=env_files_list))
+
+ fields = {
+ 'stack_name': self.__config.hardware.heat_stack_name,
+ 'template': template,
+ 'files': dict(list(tpl_files.items()) + list(env_files.items())),
+ 'environment': env,
+ }
+
+ if env_files_list:
+ fields['environment_files'] = env_files_list
+
+ self.__stacks.create(**fields)
+ self.wait_of_stack_status(EXPECTED_STACK_STATUS)
+ LOG.info("Stack '{0}' created"
+ .format(self.__config.hardware.heat_stack_name))
+
+ def stop(self):
+ """Stop environment"""
+ LOG.warning("HEAT Manager doesn't support stop environment feature")
+ pass
+
+# TODO(ddmitriev): add all Environment methods
+ @staticmethod
+ def node_ip(node, address_pool_name='admin-pool01'):
+ """Determine node's IP
+
+ :param node: a dict element from the self._nodes
+ :return: string
+ """
+ if address_pool_name in node['addresses']:
+ addr = node['addresses'][address_pool_name]
+ LOG.debug('{0} IP= {1}'.format(node['name'], addr))
+ return addr
+ else:
+ raise exceptions.EnvironmentNodeAccessError(
+ node['name'],
+ "No addresses available for the subnet {0}"
+ .format(address_pool_name))
+
+ def set_address_pools_config(self):
+ """Store address pools CIDRs in config object"""
+ for ap in self._address_pools:
+ for role in ap['roles']:
+ self.__config.underlay.address_pools[role] = ap['cidr']
+
+ def set_dhcp_ranges_config(self):
+ """Store DHCP ranges in config object"""
+ for ap in self._address_pools:
+ for role in ap['roles']:
+ self.__config.underlay.dhcp_ranges[role] = {
+ "cidr": ap['cidr'],
+ "start": ap['start'],
+ "end": ap['end'],
+ "gateway": ap['gateway'],
+ }
+
+ def wait_for_node_state(self, node_name, state, timeout):
+ raise NotImplementedError()
+
+ def warm_shutdown_nodes(self, underlay, nodes_prefix, timeout=600):
+ raise NotImplementedError()
+
+ def warm_restart_nodes(self, underlay, nodes_prefix, timeout=600):
+ raise NotImplementedError()
+
+ @property
+ def slave_nodes(self):
+ raise NotImplementedError()
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 2c0f1d9..d9eae28 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -23,3 +23,7 @@
python-jenkins
cmd2<0.9
PyYAML!=5.1
+
+# For Queens: https://github.com/openstack/requirements/blob/stable/queens/global-requirements.txt
+python-heatclient>=1.10.0
+keystoneauth1>=3.3.0
\ No newline at end of file
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 6a4a885..346506f 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -99,3 +99,11 @@
STACK_INSTALL = os.environ.get('STACK_INSTALL', None)
SKIP_SYNC_TIME = get_var_as_bool("SKIP_SYNC_TIME", False)
+
+# OpenStack parameters to work with Heat stacks
+OS_STACK_NAME = os.environ.get("OS_STACK_NAME", ENV_NAME)
+OS_HEAT_VERSION = os.environ.get('OS_HEAT_VERSION', 1)
+OS_AUTH_URL = os.environ.get('OS_AUTH_URL', None)
+OS_USERNAME = os.environ.get('OS_USERNAME', None)
+OS_PASSWORD = os.environ.get('OS_PASSWORD', None)
+OS_PROJECT_NAME = os.environ.get('OS_PROJECT_NAME', None)
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 4b1f14a..5717875 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -27,6 +27,10 @@
_default_conf = pkg_resources.resource_filename(
__name__, 'templates/{0}/underlay.yaml'.format(settings.LAB_CONFIG_NAME))
+_default_heat_conf = pkg_resources.resource_filename(
+ __name__, 'templates/{0}/underlay.hot'.format(settings.LAB_CONFIG_NAME))
+_default_heat_env = pkg_resources.resource_filename(
+ __name__, 'templates/_heat_environments/microcloud-8116.env')
_default_salt_steps = pkg_resources.resource_filename(
__name__, 'templates/{0}/salt.yaml'.format(settings.LAB_CONFIG_NAME))
@@ -89,6 +93,13 @@
ct.Cfg('current_snapshot', ct.String(),
help="Latest environment status name",
default=ext.SNAPSHOT.hardware),
+
+ ct.Cfg('heat_stack_name', ct.String(),
+ help="Heat stack name", default=''),
+ ct.Cfg('heat_conf_path', ct.String(),
+ help="Heat template file", default=_default_heat_conf),
+ ct.Cfg('heat_env_path', ct.String(),
+ help="Heat environment parameters file", default=_default_heat_env),
]
diff --git a/tcp_tests/templates/_heat_environments/microcloud-8116-cookied-cicd-queens-dvr-sl.sh b/tcp_tests/templates/_heat_environments/microcloud-8116-cookied-cicd-queens-dvr-sl.sh
new file mode 100755
index 0000000..ed0e190
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/microcloud-8116-cookied-cicd-queens-dvr-sl.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+
+#. /root/keystonercv3
+
+#heat -v --debug stack-create teststack \
+# --template-file ../cookied-cicd-queens-dvr-sl/underlay.hot \
+# --environment-file microcloud-8116.env \
+# --parameters keypair=baremetal
+
+set -ex
+
+cd $(pwd)/../../../
+export PYTHONIOENCODING=UTF-8
+export PYTHONPATH=$(pwd)
+
+export IMAGE_PATH1604=/home/jenkins/images/ubuntu-16-04-x64-mcp2019.2.0.qcow2
+export IMAGE_PATH_CFG01_DAY01=/home/jenkins/images/cfg01-day01.qcow2
+export REPOSITORY_SUITE=2019.2.0
+
+export MANAGER=heat
+
+export ENV_NAME=test_env_queens
+export LAB_CONFIG_NAME=cookied-cicd-queens-dvr-sl
+
+export OS_AUTH_URL=https://10.90.0.80:5000/v3
+export OS_USERNAME=admin
+export OS_PASSWORD=sacLMXAucxABoxT3sskVRHMbKuwa1ZIv
+export OS_PROJECT_NAME=admin
+
+#export TEST_GROUP=test_create_environment
+export TEST_GROUP=test_bootstrap_salt
+py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k ${TEST_GROUP}
+#dos.py start test-lab-for-ironic
diff --git a/tcp_tests/templates/_heat_environments/microcloud-8116.env b/tcp_tests/templates/_heat_environments/microcloud-8116.env
new file mode 100644
index 0000000..a4cd821
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/microcloud-8116.env
@@ -0,0 +1,18 @@
+---
+
+parameter_defaults:
+ flavor_medium: baremetal
+ image_ubuntu_cloud_xenial: ironic_provision_image
+ #keypair: system-ci-keypair
+ keypair: baremetal
+
+ net_public: public
+
+ # ironic-specific parameters
+ management_physical_network: ironicnet1
+ management_subnet_cidr: 10.13.0.0/24
+ management_subnet_pool_start: 10.13.0.20
+ management_subnet_pool_end: 10.13.0.60
+ management_subnet_gateway_ip: 10.13.0.1
+ dns_nameservers: 172.18.208.44
+...
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
index 81936a4..006a798 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml
@@ -1,84 +1,79 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
- disable_root: false
- chpasswd:
- list: |
+disable_root: false
+chpasswd:
+ list: |
root:r00tme
- expire: False
+ expire: False
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
- runcmd:
- - if lvs vg0; then pvresize /dev/vda3; fi
- - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
- # Enable grub menu using updated config below
- - update-grub
+ # Enable grub menu using updated config below
+ - update-grub
- # Prepare network connection
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
- # Create swap
- - fallocate -l 16G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
- - path: /usr/share/growlvm/image-layout.yml
- content: |
- root:
- size: '30%VG'
- home:
- size: '1G'
- var_log:
- size: '11%VG'
- var_log_audit:
- size: '5G'
- var_tmp:
- size: '11%VG'
- tmp:
- size: '5G'
- owner: root:root
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '30%VG'
+ home:
+ size: '1G'
+ var_log:
+ size: '11%VG'
+ var_log_audit:
+ size: '5G'
+ var_tmp:
+ size: '11%VG'
+ tmp:
+ size: '5G'
+ owner: root:root
- growpart:
- mode: auto
- devices:
- - '/'
- - '/dev/vda3'
- ignore_growroot_disabled: false
+growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
index c529421..8f17648 100644
--- a/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-queens-dvr-sl/underlay.yaml
@@ -3,12 +3,14 @@
{% import 'cookied-cicd-queens-dvr-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-cicd-queens-dvr-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+{%- macro user_data() %}{{ CLOUDINIT_USER_DATA_1604_SWP }}{% endmacro %}
---
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+ - &cloudinit_user_data_1604_swp |
+{{ user_data()|indent(4, first=True) }}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-queens-dvr-sl') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
diff --git a/tcp_tests/utils/gen_test_config.py b/tcp_tests/utils/gen_test_config.py
new file mode 100755
index 0000000..17360ab
--- /dev/null
+++ b/tcp_tests/utils/gen_test_config.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import sys
+import copy
+import ConfigParser
+
+import json
+
+from itertools import chain
+from itertools import ifilter
+from collections import namedtuple
+from collections import OrderedDict
+from heatclient import client as heatclient
+from keystoneauth1.identity import V2Password
+from keystoneauth1.session import Session as KeystoneSession
+
+SshHost = namedtuple("SshHost",
+ "roles node_name host login password keys")
+
+# host_tmpl = {
+# "roles": ["salt_master"],
+# "node_name": "cfg01.mk22-lab-dvr.local",
+# "host": "172.16.10.100",
+# "address_pool": "admin-pool01",
+# "login": "root",
+# "password": "r00tme"}
+
+CONFIG_TMPL = OrderedDict([
+ ('hardware', {
+ 'manager': 'heat',
+ 'current_snapshot': None
+ }),
+ ('underlay', {
+ 'ssh': None,
+ 'roles': None,
+ }),
+ ('salt', {
+ 'salt_master_host': None
+ })
+])
+
+
+def fill_hosts(hosts, ssh_key=None):
+ ret = []
+ for h in hosts:
+
+ ret.append(
+ SshHost(
+ roles=h['roles'],
+ node_name=h['hostname'],
+ host=h['public_ip'],
+ login='ubuntu',
+ password='ubuntu',
+ keys=[ssh_key] if ssh_key else []))
+ return ret
+
+
+def get_heat():
+ keystone_auth = V2Password(
+ auth_url=os.environ['OS_AUTH_URL'],
+ username=os.environ['OS_USERNAME'],
+ password=os.environ['OS_PASSWORD'],
+ tenant_name=os.environ['OS_TENANT_NAME'])
+ session = KeystoneSession(auth=keystone_auth, verify=False)
+ endpoint_url = session.get_endpoint(
+ service_type='orchestration',
+ endpoint_type='publicURL')
+ heat = heatclient.Client(
+ version='1',
+ endpoint=endpoint_url,
+ token=session.get_token())
+ return heat
+
+
+def fill_config(hosts, env_name, last_snapshot):
+ ini = copy.deepcopy(CONFIG_TMPL)
+
+ ini['hardware']['current_snapshot'] = last_snapshot
+ ini['underlay']['ssh'] = json.dumps([h.__dict__ for h in hosts])
+ ini['underlay']['roles'] = json.dumps(
+ list(set(chain(*[h.roles for h in hosts]))))
+ ini['salt']['salt_master_host'] = next(h.host for h in hosts
+ if 'salt-master' in h.roles)
+
+ return ini
+
+
+def save_ini_config(ini, filename):
+ config = ConfigParser.ConfigParser()
+
+ for s in ini:
+ config.add_section(s)
+ for k, v in ini[s].items():
+ config.set(s, k, v)
+
+ with open(filename, 'w') as f:
+ config.write(f)
+
+
+def print_help():
+ text = """
+ Usage: {command} HEAT_STACK_NAME HEAT_SNAPHOT_NAME
+ """.format(command=sys.argv[0])
+ print(text)
+ sys.exit(1)
+
+
+def main():
+ if len(sys.argv) < 3:
+ print_help()
+
+ heat = get_heat()
+ env_name = sys.argv[1]
+ snapshot = sys.argv[2]
+ ssh_key = next(iter(sys.argv[3:]), None)
+ stack = heat.stacks.get(env_name)
+ hosts = next(ifilter(
+ lambda v: v['output_key'] == 'hosts', stack.outputs))['output_value']
+ hosts = list(chain(*hosts))
+ hosts = fill_hosts(hosts, ssh_key=ssh_key)
+ ini = fill_config(hosts, env_name, snapshot)
+ save_ini_config(ini, "{name}_{snapshot}.ini".format(name=env_name,
+ snapshot=snapshot))
+
+
+if __name__ == '__main__':
+ main()