Use MaaS in system tests
Change-Id: Ibaa90ab8a5f290c2a076976243ec0d2511ce8295
diff --git a/tcp_tests/fixtures/day1_fixtures.py b/tcp_tests/fixtures/day1_fixtures.py
new file mode 100644
index 0000000..ff3a0b5
--- /dev/null
+++ b/tcp_tests/fixtures/day1_fixtures.py
@@ -0,0 +1,162 @@
+# Copyright 2018 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from collections import namedtuple
+import pytest
+
+from tcp_tests.helpers import ext
+from tcp_tests import logger
+from tcp_tests.managers import saltmanager
+from tcp_tests.managers import underlay_ssh_manager
+
+LOG = logger.logger
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.day1_underlay)
+@pytest.fixture(scope="function")
+def day1_underlay(revert_snapshot, config, hardware):
+ """Fixture that should provide SSH access to underlay objects.
+
+ - Starts the 'hardware' environment and creates 'underlay' with required
+ configuration.
+ - Fills the following object using the 'hardware' fixture:
+ config.underlay.ssh = JSONList of SSH access credentials for nodes.
+ This list will be used for initialization the
+ model UnderlaySSHManager, see it for details.
+
+ :rtype UnderlaySSHManager: Object that encapsulate SSH credentials;
+ - provide list of underlay nodes;
+ - provide SSH access to underlay nodes using
+ node names or node IPs.
+ """
+ # Create Underlay
+ if not config.day1_underlay.ssh:
+ # If config.underlay.ssh wasn't provided from external config, then
+ # try to get necessary data from hardware manager (fuel-devops)
+
+ # for devops manager: power on nodes and wait for SSH
+ # for empty manager: do nothing
+ # for maas manager: provision nodes and wait for SSH
+ # hardware.start(underlay_node_roles=config.underlay.roles,
+ hardware.start(
+ underlay_node_roles=['salt_master'],
+ timeout=config.underlay.bootstrap_timeout)
+
+ config.day1_underlay.ssh = hardware.get_ssh_data(
+ roles=config.underlay.roles)
+
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+ LOG.info("Generate MACs for MaaS")
+ macs = {
+ n.name.split('.')[0]: {
+ "interface": {
+ "mac": n.get_interface_by_network_name('admin').mac_address}} # noqa
+ for n in hardware.slave_nodes}
+
+ config.day1_cfg_config.maas_machines_macs = {
+ "parameters": {
+ "maas": {
+ "region": {
+ "machines": macs}}}}
+
+ if not config.day1_underlay.lvm:
+ underlay.enable_lvm(hardware.lvm_storages())
+ config.day1_underlay.lvm = underlay.config_lvm
+
+ hardware.create_snapshot(ext.SNAPSHOT.day1_underlay)
+
+ else:
+ # 1. hardware environment created and powered on
+ # 2. config.underlay.ssh contains SSH access to provisioned nodes
+ # (can be passed from external config with TESTS_CONFIGS variable)
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+ return underlay
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.cfg_configured)
+@pytest.fixture(scope='function')
+def day1_cfg_config(revert_snapshot, request, config, hardware, underlay,
+ salt_actions, snapshot, grab_versions):
+ """Fixture to get or install cfg node from day1 image on environment
+
+ :param revert_snapshot: fixture that reverts snapshot that is specified
+ in test with @pytest.mark.revert_snapshot(<name>)
+ :param request: fixture provides pytest data
+ :param config: fixture provides oslo.config
+ :param hardware: fixture provides enviromnet manager
+ :param day1_underlay: fixture provides underlay manager
+ :param salt_actions: fixture provides SaltManager instance
+ :rtype: SaltManager
+
+ If config.salt.salt_master_host is not set, this fixture assumes that
+ the salt was not installed, and do the following:
+ - install salt master and salt minions
+ - make snapshot with name 'cfg_configured'
+ - return SaltManager
+
+ If config.salt.salt_master_host was set, this fixture assumes that the
+ salt was already deployed, and do the following:
+ - return SaltManager instance
+
+ If you want to revert 'cfg_configured' snapshot, please use mark:
+ @pytest.mark.revert_snapshot("cfg_configured")
+ """
+ # Create Salt cluster
+ if config.salt.salt_master_host == '0.0.0.0':
+ # Temporary workaround. Underlay should be extended with roles
+ config.salt.salt_master_host = \
+ underlay.host_by_node_role(
+ node_role=ext.UNDERLAY_NODE_ROLES.salt_master)
+
+ commands = underlay.read_template(
+ config.day1_cfg_config.configure_steps_path)
+ LOG.info("############ Executing command ####### {0}".format(commands))
+ salt_actions.install(commands)
+
+ salt_nodes = salt_actions.get_ssh_data()
+ config.underlay.ssh = config.underlay.ssh + \
+ [node for node in salt_nodes
+ if not any(node['node_name'] == n['node_name']
+ for n in config.underlay.ssh)]
+
+ hardware.create_snapshot(ext.SNAPSHOT.cfg_configured)
+ salt_actions.sync_time()
+
+ else:
+ # 1. hardware environment created and powered on
+ # 2. config.underlay.ssh contains SSH access to provisioned nodes
+ # (can be passed from external config with TESTS_CONFIGS variable)
+ # 3. config.tcp.* options contain access credentials to the already
+ # installed TCP API endpoint
+ pass
+
+ salt_actions.sync_time()
+
+ Collection = namedtuple(
+ 'Collection', ['salt', 'underlay', 'config'], verbose=True)
+
+ return Collection(salt_actions, underlay, config)
+
+
+@pytest.fixture(scope='function')
+def day1_salt_actions(config, day1_underlay):
+ """Fixture that provides various actions for salt
+
+ :param config: fixture provides oslo.config
+ :param day1_underlay: fixture provides underlay manager
+ :rtype: SaltManager
+ """
+ return saltmanager.SaltManager(config, day1_underlay)
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index d991548..9b4bed0 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -152,7 +152,7 @@
@pytest.mark.revert_snapshot(ext.SNAPSHOT.underlay)
@pytest.fixture(scope="function")
-def underlay(revert_snapshot, config, hardware):
+def underlay(request, revert_snapshot, config, hardware):
"""Fixture that should provide SSH access to underlay objects.
- Starts the 'hardware' environment and creates 'underlay' with required
@@ -168,7 +168,8 @@
node names or node IPs.
"""
# Create Underlay
- if not config.underlay.ssh:
+
+ def basic_underlay():
# If config.underlay.ssh wasn't provided from external config, then
# try to get necessary data from hardware manager (fuel-devops)
@@ -189,6 +190,53 @@
hardware.create_snapshot(ext.SNAPSHOT.underlay)
+ return underlay
+
+ def day1_underlay():
+ hardware.start(
+ underlay_node_roles=['salt_master'],
+ timeout=config.underlay.bootstrap_timeout)
+
+ config.underlay.ssh = hardware.get_ssh_data(
+ roles=config.underlay.roles)
+
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+ LOG.info("Generate MACs for MaaS")
+ macs = {
+ n.name.split('.')[0]: {
+ "interface": {
+ "mac": n.get_interface_by_network_name('admin').mac_address}, # noqa
+ "power_parameters": {
+ "power_address": "{}:{}".format(
+ n.get_interface_by_network_name('admin').l2_network_device.address_pool.get_ip('l2_network_device'), # noqa
+ n.bmc_port
+ )}} for n in hardware.slave_nodes}
+
+ config.day1_cfg_config.maas_machines_macs = {
+ "parameters": {
+ "maas": {
+ "region": {
+ "machines": macs}}}}
+
+ if not config.underlay.lvm:
+ underlay.enable_lvm(hardware.lvm_storages())
+ config.underlay.lvm = underlay.config_lvm
+
+ for node in hardware.slave_nodes:
+ # For correct comissioning by MaaS nodes should be powered off
+ node.destroy()
+
+ hardware.create_snapshot(ext.SNAPSHOT.underlay)
+
+ return underlay
+
+ if not config.underlay.ssh:
+ if request.node.get_marker('day1_underlay'):
+ underlay = day1_underlay()
+ else:
+ underlay = basic_underlay()
+
else:
# 1. hardware environment created and powered on
# 2. config.underlay.ssh contains SSH access to provisioned nodes
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 7999201..800367d 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -53,6 +53,8 @@
'k8s_deployed',
'decapod_deployed',
'ceph_deployed',
+ 'day1_underlay',
+ 'cfg_configured',
)
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 1e7f66c..d17c6bd 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -25,12 +25,12 @@
AuthenticationException,
BadAuthenticationType)
-from tcp_tests.helpers import env_config
-from tcp_tests.helpers import exceptions
-from tcp_tests.helpers import ext
-from tcp_tests import logger
from tcp_tests import settings
from tcp_tests import settings_oslo
+from tcp_tests.helpers import env_config
+from tcp_tests.helpers import ext
+from tcp_tests.helpers import exceptions
+from tcp_tests import logger
LOG = logger.logger
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 2b06dc3..b5d5f04 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -19,8 +19,8 @@
from datetime import datetime
from pepper import libpepper
from tcp_tests.helpers import utils
-from tcp_tests import settings
from tcp_tests import logger
+from tcp_tests import settings
from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
LOG = logger.logger
@@ -234,7 +234,7 @@
self.__api = None
self.run_state(
tgt,
- 'cmd.run', 'service ntp stop; ntpd -gq; service ntp start')
+ 'cmd.run', 'service ntp stop; if [ -x /usr/sbin/ntpdate ]; then ntpdate -s ntp.ubuntu.com; else ntpd -gq ; fi; service ntp start') # noqa
new_time_res = self.run_state(tgt, 'cmd.run', 'date')
for node_name, time in sorted(new_time_res[0]['return'][0].items()):
LOG.info("{0}: {1}".format(node_name, time))
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 2c9ed55..5f919ce 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -236,7 +236,8 @@
address_pool=address_pool)
return ssh_data['host']
- def remote(self, node_name=None, host=None, address_pool=None):
+ def remote(self, node_name=None, host=None, address_pool=None,
+ username=None):
"""Get SSHClient by a node name or hostname.
One of the following arguments should be specified:
@@ -249,7 +250,7 @@
ssh_data = self.__ssh_data(node_name=node_name, host=host,
address_pool=address_pool)
ssh_auth = ssh_client.SSHAuth(
- username=ssh_data['login'],
+ username=username or ssh_data['login'],
password=ssh_data['password'],
keys=[rsakey.RSAKey(file_obj=StringIO.StringIO(key))
for key in ssh_data['keys']])
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 64769ed..3d1b4d2 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -11,7 +11,7 @@
docker-compose==1.7.1
urllib3
junit-xml
-jinja2>=2.1
+jinja2>=2.9
jira
testrail<=0.3.8
functools32
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 062e421..8ba9f4d 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -67,7 +67,13 @@
__name__, 'templates/{0}/k8s.yaml'.format(
settings.LAB_CONFIG_NAME))
_default_net_mgm = os.environ.get("NET_MGMT", "admin-pool01")
-
+_default_configure_steps = pkg_resources.resource_filename(
+ __name__, 'templates/{0}/cfg01_configure.yaml'.format(
+ settings.LAB_CONFIG_NAME))
+# _default_cluster_maas_config = pkg_resources.resource_filename(
+# __name__, 'templates/{0}/cluster_infra_maas.yml'.format(
+# settings.LAB_CONFIG_NAME))
+_default_cluster_maas_config = 'cluster_infra_maas.yml'
hardware_opts = [
ct.Cfg('manager', ct.String(),
@@ -352,6 +358,38 @@
'kubernetes/k8s-conformance:v1.8.13-11')
]
+day1_cfg_config_opts = [
+ ct.Cfg('configure_steps_path', ct.String(),
+ help="Path to YAML with steps to config cfg01 node",
+ default=_default_configure_steps),
+ ct.Cfg('environment_template_dir', ct.String(),
+ help="Path to directory with Environment template and inventory",
+ default=_default_environment_template_dir),
+ ct.Cfg('templates_dir', ct.String(),
+ help="Path to directory with templates",
+ default=_default_templates_dir),
+ ct.Cfg('cluster_maas_config', ct.String(),
+ help="Path to maas class yaml file for cfg node",
+ default=_default_cluster_maas_config),
+ ct.Cfg('maas_machines_macs', ct.JSONDict(),
+ help="""MAC of machines interfaces for maas config:
+ 'parameters': {
+ 'maas' : {
+ 'region' : {
+ 'machines': {
+ 'ctl01': {
+ 'interface': {
+ 'mac': 'aa:bb:cc:dd:ee:ff'
+ }
+ }
+ '...': {
+ 'interface': {
+ 'mac': 'aa:bb:cc:dd:ee:ff'
+ }
+ }
+ }}}}""", default={}),
+]
+
_group_opts = [
('hardware', hardware_opts),
('underlay', underlay_opts),
@@ -374,6 +412,8 @@
('ceph_deploy', ceph_deploy_opts),
('k8s_deploy', k8s_deploy_opts),
('k8s', k8s_opts),
+ ('day1_cfg_config', day1_cfg_config_opts),
+ ('day1_underlay', underlay_opts),
]
@@ -472,6 +512,14 @@
help=""))
config.register_opts(group='ceph_deploy', opts=ceph_deploy_opts)
+ config.register_group(cfg.OptGroup(name='day1_cfg_config',
+ title="Day01 config node configuration", help=""))
+ config.register_opts(group='day1_cfg_config', opts=day1_cfg_config_opts)
+
+ config.register_group(cfg.OptGroup(name='day1_underlay',
+ title="Day01 underlay configuration", help=""))
+ config.register_opts(group='day1_underlay', opts=underlay_opts)
+
return config
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index f3b8414..4c8e787 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -149,6 +149,68 @@
skip_fail: false
{%- endmacro %}
+
+{%- macro MACRO_CONFIG_DAY01_SALT_MASTER() %}
+{######################################}
+
+- description: Remove /etc/update-motd.d/52-info
+ cmd: rm -vf /etc/update-motd.d/52-info
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Set up static interface config
+ cmd: |
+ kill $(pidof /sbin/dhclient) || /bin/true
+ cat << 'EOF' > /etc/network/interfaces
+ # This file describes the network interfaces available on your system
+ # and how to activate them. For more information, see interfaces(5).
+
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+
+ # The primary network interface
+ auto ens3
+ iface ens3 inet static
+ address {{ IPV4_NET_ADMIN_PREFIX }}.90
+ netmask 255.255.255.0
+ gateway {{ IPV4_NET_ADMIN_PREFIX }}.1
+ EOF
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install common packages on cfg01
+ cmd: eatmydata apt-get update && apt-get install -y python-pip git curl at tmux byobu iputils-ping traceroute htop tree wget jq ntpdate
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Remove any existing minion keys
+ cmd: salt-key -y -D || true
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure GIT settings and certificates
+ cmd: |
+ set -e;
+ #touch /root/.git_trusted_certs.pem;
+ #for server in github.com; do \
+ # openssl s_client -showcerts -connect $server:443 </dev/null \
+ # | openssl x509 -outform PEM \
+ # >> /root/.git_trusted_certs.pem;
+ #done;
+ #HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
+ HOME=/root git config --global user.email "mcp-integration-qa@example.com";
+ HOME=/root git config --global user.name "MCP Integration QA";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{%- endmacro %}
+
+
{%- macro MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=false) %}
{############################################################}
{# Creates a 'cluster' model from cookiecutter-templates and 'environment' model from uploaded template #}
@@ -223,6 +285,8 @@
[ -f ${CFG01_INVENTORY_FILE} ] || cat << 'EOF' > ${CFG01_INVENTORY_FILE}
classes:
- cluster.{{ CLUSTER_NAME }}.infra.config
+ - cluster.{{ CLUSTER_NAME }}.infra.maas
+ - cluster.{{ CLUSTER_NAME }}.infra.maas-machines
parameters:
_param:
linux_system_codename: xenial
@@ -565,6 +629,66 @@
{%- endmacro %}
+{%- macro MACRO_INSTALL_FORMULAS(FORMULA_SERVICES='') %}
+{#######################################################}
+- description: Configure reclass
+ cmd: |
+ set -e;
+ FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
+ which wget > /dev/null || (apt-get update; apt-get install -y wget);
+ . /etc/lsb-release; # Get DISTRIB_CODENAME variable
+ # echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
+ # wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
+ apt-get clean; apt-get update;
+ [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
+ declare -a formula_services=({{ FORMULA_SERVICES }});
+ echo -e "\nInstalling all required salt formulas\n";
+ eatmydata apt-get install -y "${formula_services[@]/#/salt-formula-}";
+ for formula_service in "${formula_services[@]}"; do
+ echo -e "\nLink service metadata for formula ${formula_service} ...\n";
+ [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
+ done;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Show reclass-salt --top for cfg01 node
+ cmd: reclass-salt --top
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart salt-master service
+ cmd: systemctl restart salt-master;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_CONFIG_DAY01_SALT_MINION() %}
+{#######################################}
+- description: Configure salt-minion on {{ HOSTNAME_CFG01 }}
+ cmd: |
+ export SALT_MASTER_MINION_ID={{ HOSTNAME_CFG01 }}
+ envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
+ service salt-minion restart
+
+ while true; do
+ salt-key | grep "$SALT_MASTER_MINION_ID" && break
+ sleep 5
+ done
+
+ sleep 5
+
+ for i in `salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"`; do
+ salt-key -d $i -y
+ done
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{%- endmacro %}
+
+
{%- macro MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() %}
{##################################################}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
new file mode 100644
index 0000000..e2ba165
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -0,0 +1,154 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: virtual-mcp-pike-dvr.local
+ cluster_name: virtual-mcp-pike-dvr
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 172.16.10.101
+ infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 172.16.10.102
+ infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 172.16.10.103
+ infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 172.16.10.100
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '100'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 172.16.10.100
+ openstack_database_hostname: ctl
+ openstack_database_node01_address: 172.16.10.101
+ openstack_database_node01_hostname: ctl01
+ openstack_database_node02_address: 172.16.10.102
+ openstack_database_node02_hostname: ctl02
+ openstack_database_node03_address: 172.16.10.103
+ openstack_database_node03_hostname: ctl03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 172.16.10.100
+ openstack_message_queue_hostname: ctl
+ openstack_message_queue_node01_address: 172.16.10.101
+ openstack_message_queue_node01_hostname: ctl01
+ openstack_message_queue_node02_address: 172.16.10.102
+ openstack_message_queue_node02_hostname: ctl02
+ openstack_message_queue_node03_address: 172.16.10.103
+ openstack_message_queue_node03_hostname: ctl03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_notification_app_id: '24'
+ oss_notification_sender_password: password
+ oss_notification_smtp_port: '587'
+ oss_notification_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 172.16.10.70
+ stacklight_log_hostname: mon
+ stacklight_log_node01_address: 172.16.10.107
+ stacklight_log_node01_hostname: mon01
+ stacklight_log_node02_address: 172.16.10.108
+ stacklight_log_node02_hostname: mon02
+ stacklight_log_node03_address: 172.16.10.109
+ stacklight_log_node03_hostname: mon03
+ stacklight_monitor_address: 172.16.10.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_notification_address: alerts@localhost
+ stacklight_notification_smtp_host: 127.0.0.1
+ stacklight_telemetry_address: 172.16.10.70
+ stacklight_telemetry_hostname: mon
+ stacklight_telemetry_node01_address: 172.16.10.107
+ stacklight_telemetry_node01_hostname: mon01
+ stacklight_telemetry_node02_address: 172.16.10.108
+ stacklight_telemetry_node02_hostname: mon02
+ stacklight_telemetry_node03_address: 172.16.10.109
+ stacklight_telemetry_node03_hostname: mon03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml
new file mode 100644
index 0000000..0127547
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/_context-environment.yaml
@@ -0,0 +1,165 @@
+nodes:
+ cfg01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - features_designate_pool_manager_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - stacklight_telemetry_leader
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ dns01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - features_designate_pool_manager_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:openstack_dns_node01_address}
+
+ dns02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - features_designate_pool_manager_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
new file mode 100644
index 0000000..6f11a9a
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cfg01_configure.yaml
@@ -0,0 +1,146 @@
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-pike-dvr-maas/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+{% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_CONFIG_DAY01_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: Import ssh key for jenkins user
+ cmd: |
+ mkdir -p /var/lib/jenkins/.ssh && \
+ ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && \
+ chown jenkins /var/lib/jenkins/.ssh/known_hosts
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Upload maas config
+ upload:
+ local_path: {{ config.day1_cfg_config.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: {{ config.day1_cfg_config.cluster_maas_config }}
+ remote_path: /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Rename maas config
+ cmd: mv -v /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/{{ config.day1_cfg_config.cluster_maas_config }} /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/maas.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Save machines macs
+ cmd: |
+ echo -n '{{ config.day1_cfg_config.maas_machines_macs | tojson }}' | \
+ python -c 'import sys, yaml, json; yaml.safe_dump(json.load(sys.stdin), sys.stdout, default_flow_style=False)' > /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/maas-machines.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+{#
+{{ SHARED.MACRO_INSTALL_FORMULAS(FORMULA_SERVICES='"fluentd"') }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+#}
+
+{{ SHARED.MACRO_CONFIG_DAY01_SALT_MINION() }}
+
+- description: Fix config for Jenkins
+ cmd: |
+ export SALT_MASTER_MINION_ID={{ HOSTNAME_CFG01 }}
+ find /var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$SALT_MASTER_DEPLOY_IP'/g'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Setup pipeline libraries
+ cmd: |
+ export PIPELINE_REPO_URL=https://github.com/Mirantis
+ git clone --mirror $PIPELINE_REPO_URL/mk-pipelines.git /home/repo/mk/mk-pipelines/
+ git clone --mirror $PIPELINE_REPO_URL/pipeline-library.git /home/repo/mcp-ci/pipeline-library/
+ chown -R git:www-data /home/repo/mk/mk-pipelines/*
+ chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Refresh pillars before generating nodes
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure network, linux, openssh and salt on cfg01 node
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls linux.network,linux,openssh,salt
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+#- description: Restart MaaS services before run state (need to prevent maas stucking)
+# cmd: systemctl restart maas-regiond && systemctl restart maas-rackd
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+- description: Configure MaaS(cluster) on cfg01 node
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls maas.cluster
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure MaaS(region) on cfg01 node
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls maas.region
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Configure reclass on cfg01 node
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls reclass
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure jenkins on cfg01 node
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls jenkins.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# - description: Hack gtw node
+# cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+# - description: Hack cmp01 node
+# cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+# - description: Hack cmp02 node
+# cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml
new file mode 100644
index 0000000..56394da
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/cluster_infra_maas.yml
@@ -0,0 +1,147 @@
+---
+classes:
+- system.linux.system.repo.mcp.apt_mirantis.maas
+- system.maas.region.single
+- service.jenkins.client
+- system.jenkins.client.credential.salt
+- system.jenkins.client.job.deploy.openstack
+- cluster.virtual-mcp-pike-dvr.infra
+parameters:
+ _param:
+ maas_admin_username: mirantis
+ maas_admin_password: r00tme
+ maas_db_password: fRqC7NJrBR0x
+ dns_server01: 8.8.8.8
+ maas_region_port: 5240
+ maas_cluster_region_port: ${_param:maas_region_port}
+ infra_config_deploy_address: ${_param:reclass_config_master}
+ jenkins_git_url: 'git@cfg01:/home/repo'
+ jenkins_gerrit_url: ${_param:jenkins_git_url}
+ salt_api_password: hovno12345!
+ jenkins_git_url: 'git@cfg01:/home/repo'
+ jenkins_gerrit_url: ${_param:jenkins_git_url}
+ jenkins_salt_api_url: "http://${_param:reclass_config_master}:6969"
+ jenkins_pipeline_library_url: git@cfg01:/home/repo/mcp-ci/pipeline-library
+ jenkins_pipelines_branch: master
+ jenkins:
+ client:
+ lib:
+ pipeline-library:
+ url: ${_param:jenkins_pipeline_library_url}
+ branch: ${_param:jenkins_pipelines_branch}
+ master:
+ host: ${_param:reclass_config_master}
+ port: 8081
+ password: r00tme
+ maas:
+ region:
+ commissioning_scripts:
+ 00-maas-05-simplify-network-interfaces: /etc/maas/files/commisioning_scripts/00-maas-05-simplify-network-interfaces
+ bind:
+ host: ${_param:reclass_config_master}:${_param:maas_region_port}
+ port: 5240
+ maas_config:
+ main_archive: http://mirror.mirantis.com/${_param:apt_mk_version}/ubuntu/
+ disk_erase_with_secure_erase: false
+ machines:
+ ctl01: # cz7341-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "0c:c4:7a:33:1f:e4"
+ # ip: ${_param:infra_kvm_node01_deploy_address}
+ # subnet: "deploy_network" # create it manually... in UI
+ # gateway: ${_param:deploy_network_gateway}
+ power_parameters:
+ # power_address: "185.8.59.161"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+ ctl02: # #cz7342-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "0c:c4:7a:33:20:fc"
+ # # ip: ${_param:infra_kvm_node02_deploy_address}
+ power_parameters:
+ # power_address: "185.8.59.162"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+ ctl03: # #cz7343-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "0c:c4:7a:31:fb:b6"
+ # ip: ${_param:infra_kvm_node03_deploy_address}
+ power_parameters:
+ # power_address: "185.8.59.163"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+ gtw01: # #cz7055-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "00:25:90:e3:3b:26"
+ # ip: ${_param:infra_kvm_node06_deploy_address}
+ power_parameters:
+ # power_address: "176.74.222.106"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+ cmp01: # cz7054-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "00:25:90:e3:37:2e"
+ power_parameters:
+ # power_address: "176.74.222.104"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+ cmp02: #cz7056-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "00:25:90:e3:3f:2a"
+ power_parameters:
+ # power_address: "176.74.222.108"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+ dns01: #cz7056-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "00:25:90:e3:3f:2a"
+ power_parameters:
+ # power_address: "176.74.222.108"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+ dns02: #cz7056-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "00:25:90:e3:3f:2a"
+ power_parameters:
+ # power_address: "176.74.222.108"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+ prx01: #cz7056-kvm.host-telecom.com
+ distro_series: "xenial"
+ # hwe_kernel: "hwe-16.04"
+ interface:
+ # mac: "00:25:90:e3:3f:2a"
+ power_parameters:
+ # power_address: "176.74.222.108"
+ power_password: "r00tme"
+ power_type: ipmi
+ power_user: admin
+
+ cluster:
+ region:
+ host: ${_param:reclass_config_master}:${_param:maas_cluster_region_port}
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/common-services.yaml
new file mode 100644
index 0000000..965d297
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
new file mode 100644
index 0000000..45ededb
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/openstack.yaml
@@ -0,0 +1,395 @@
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: overrides-policy.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+ cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+ cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+ ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@powerdns:server' state.sls powerdns.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+ nova:
+ controller:
+ policy:
+ context_is_admin: 'role:admin or role:administrator'
+ 'compute:create': 'rule:admin_or_owner'
+ 'compute:create:attach_network':
+ cinder:
+ controller:
+ policy:
+ 'volume:delete': 'rule:admin_or_owner'
+ 'volume:extend':
+ neutron:
+ server:
+ policy:
+ create_subnet: 'rule:admin_or_network_owner'
+ 'get_network:queue_id': 'rule:admin_only'
+ 'create_network:shared':
+ glance:
+ server:
+ policy:
+ publicize_image: "role:admin"
+ add_member:
+ keystone:
+ server:
+ policy:
+ admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+ heat:
+ server:
+ policy:
+ context_is_admin: 'role:admin and is_admin_project:True'
+ deny_stack_user: 'not role:heat_stack_user'
+ deny_everybody: '!'
+ 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+ 'cloudformation:DescribeStackResources':
+ ceilometer:
+ server:
+ policy:
+ segregation: 'rule:context_is_admin'
+ 'telemetry:get_resource':
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
new file mode 100644
index 0000000..b3818b7
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/sl.yaml
@@ -0,0 +1,177 @@
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+# Install docker swarm
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure docker service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure fluentd
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 15}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: run docker state
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..be74a88
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
@@ -0,0 +1,67 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sudo ifdown ens3
+ - rm /etc/network/interfaces
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3 || true
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.176.6" >> /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet static
+ address {address}
+ netmask {netmask}
+ gateway {gateway}
+ dns-nameservers 172.18.176.6
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml
new file mode 100644
index 0000000..aa5cbb5
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay.yaml
@@ -0,0 +1,512 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-mcp-pike-dvr-maas/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-pike-dvr-maas/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: true
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: true
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ # - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ # source_image: !os_env MCP_IMAGE_PATH1604
+ # format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ bmc_port: 41623
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ bmc_port: 41624
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ bmc_port: 41625
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ bmc_port: 41626
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ bmc_port: 41630
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: cloudimage1604
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ bmc_port: 41631
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: cloudimage1604
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ bmc_port: 41632
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: cloudimage1604
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ bmc_port: 41633
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ bmc_port: 41634
+ bmc_network: admin
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ # cloud_init_volume_name: iso
+ # cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ # backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+ # capacity: 1
+ # format: raw
+ # device: cdrom
+ # bus: ide
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ bmc_port: 41635
+ bmc_network: admin
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 080cb4d..1214cd3 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -26,6 +26,8 @@
from tcp_tests.fixtures.stacklight_fixtures import * # noqa
from tcp_tests.fixtures.k8s_fixtures import * # noqa
from tcp_tests.fixtures.drivetrain_fixtures import * # noqa
+from tcp_tests.fixtures.day1_fixtures import * # noqa
+
__all__ = sorted([ # sort for documentation
# common_fixtures
@@ -69,5 +71,8 @@
'ceph_action',
# k8s fixtures
'k8s_actions',
- 'k8s_deployed'
+ 'k8s_deployed',
+ 'day1_underlay',
+ 'day1_cfg_config',
+ 'day1_salt_action'
])
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index 51757fd..83f3766 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -11,7 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-# import pytest
+import pytest
import time
from collections import Counter
@@ -28,6 +28,139 @@
class TestOfflineDeployment(object):
"""docstring for TestOfflineDeployment"""
+ @pytest.mark.day1_underlay
+ def test_maas_provision(self, show_step, hardware, underlay,
+ day1_cfg_config):
+ """Test for deploying an mcp dvr environment and check it
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ """
+
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ cfg_node = 'cfg01.virtual-mcp-pike-dvr.local'
+ ssh_test_key = day1_cfg_config.config.underlay.ssh_keys[0]['public']
+ verbose = True
+
+ cfg_admin_iface = next(i for i in hardware.master_nodes[0].interfaces
+ if i.network.name == 'admin')
+ admin_net = cfg_admin_iface.network.address_pool.ip_network
+
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='maas logout mirantis && '
+ 'maas login mirantis '
+ 'http://localhost:5240/MAAS/api/2.0/ '
+ 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN') # noqa
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="maas mirantis package-repository update main_archive "
+ "disabled_pockets=backports,security")
+
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="maas mirantis ipranges create "
+ "type=dynamic start_ip={start} end_ip={end} "
+ "subnet=$(maas mirantis subnets read | jq '.[] | select(.name==\"{net}\") | .id')".format( # noqa
+ start=admin_net[191],
+ end=admin_net[253],
+ net=admin_net))
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd="maas mirantis vlan update "
+ "$(maas mirantis subnets read | jq '.[] | select(.name==\"{net}\") | .vlan.fabric_id') " # noqa
+ "0 dhcp_on=True primary_rack='cfg01'".format(net=admin_net))
+
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='maas mirantis sshkeys create '
+ 'key="$(cat ~root/.ssh/id_rsa.pub)"')
+
+ r, f = day1_cfg_config.salt.enforce_state('cfg01*', 'maas.machines')
+ LOG.info(r)
+ LOG.info(f)
+
+ # show_step(8)
+ nodes_amount = len(hardware.slave_nodes)
+ cmd = """ timeout 1800s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq {amount} ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """.format(amount=nodes_amount) # noqa
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd='salt-key')
+ # show_step(9)
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='salt-call state.sls maas.machines.deploy')
+ # show_step(10)
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='salt-call state.sls maas.machines.wait_for_deployed')
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd='salt-key')
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key root '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key ubuntu '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+ result = \
+ day1_cfg_config.salt.get_pillar(cfg_node,
+ '_param:jenkins_salt_api_url')
+ result = result[0].get(cfg_node)
+
+ jenkins = JenkinsClient(
+ host='http://{host}:8081'.format(
+ host=day1_cfg_config.config.salt.salt_master_host),
+ username='admin',
+ password='r00tme')
+ params = jenkins.make_defults_params('deploy_openstack')
+ params['SALT_MASTER_URL'] = result
+ params['STACK_INSTALL'] = "core,openstack,ovs"
+ build = jenkins.run_build('deploy_openstack', params)
+
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 2)
+
+ assert \
+ jenkins.build_info(
+ name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
+ "Deploy openstack was failed"
+
+ openstack = managers.openstack_manager.OpenstackManager(
+ day1_cfg_config.config, underlay, hardware,
+ day1_cfg_config.salt)
+
+ if settings.RUN_TEMPEST:
+ openstack.run_tempest(pattern=settings.PATTERN)
+ openstack.download_tempest_report()
+
+ LOG.info("*************** DONE **************")
+
def test_deploy_day1(self, show_step, config, underlay, hardware,
common_services_deployed, salt_deployed):
"""Test for deploying an mcp from day01 images