Merge "Offline deploy test"
diff --git a/tcp_tests/fixtures/openstack_fixtures.py b/tcp_tests/fixtures/openstack_fixtures.py
index a704ee6..98e367c 100644
--- a/tcp_tests/fixtures/openstack_fixtures.py
+++ b/tcp_tests/fixtures/openstack_fixtures.py
@@ -22,17 +22,19 @@
@pytest.fixture(scope='function')
-def openstack_actions(config, underlay, salt_deployed):
+def openstack_actions(config, hardware, underlay, salt_deployed):
"""Fixture that provides various actions for OpenStack
:param config: fixture provides oslo.config
+ :param config: fixture provides oslo.config
:param underlay: fixture provides underlay manager
:param salt_deployed: fixture provides salt manager
:rtype: OpenstackManager
For use in tests or fixtures to deploy a custom OpenStack
"""
- return openstack_manager.OpenstackManager(config, underlay, salt_deployed)
+ return openstack_manager.OpenstackManager(config, underlay,
+ hardware, salt_deployed)
@pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index c800cc2..8afb177 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -330,6 +330,39 @@
raise exceptions.EnvironmentIsNotSet()
self.__env.destroy()
+ def destroy_node(self, node_name):
+ """Destroy node"""
+ node = self.__env.get_node(name=node_name)
+ node.destroy()
+
+ def start_node(self, node_name):
+ """Start node"""
+ node = self.__env.get_node(name=node_name)
+ node.start()
+
+ def reboot_node(self, node_name):
+ """Reboot node"""
+ node = self.__env.get_node(name=node_name)
+ node.reboot()
+
+ def remove_node(self, node_name):
+ """Remove node"""
+ node = self.__env.get_node(name=node_name)
+ node.remove()
+
+ def wait_for_node_state(self, node_name, state, timeout):
+ node = self.__env.get_node(name=node_name)
+ if 'active' in state:
+ helpers.wait(lambda: node.is_active(),
+ timeout=timeout,
+ timeout_msg=('Node {0} failed '
+ 'to become active'.format(node)))
+ else:
+ helpers.wait(lambda: not node.is_active(),
+ timeout=timeout,
+ timeout_msg=('Node {0} failed '
+ 'to become active'.format(node)))
+
def has_snapshot(self, name):
return self.__env.has_snapshot(name)
@@ -353,6 +386,15 @@
LOG.debug('Trying to get nodes by role {0}'.format(node_role))
return self.__env.get_nodes(role=node_role)
+ def __get_nodes_by_name(self, node_name):
+ """Get node by given role name
+
+ :param node_name: string
+ :rtype: devops.models.Node
+ """
+ LOG.debug('Trying to get nodes by role {0}'.format(node_name))
+ return self.__env.get_nodes(name=node_name)
+
@property
def master_nodes(self):
"""Get all master nodes
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 6454bdd..ebcc574 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -24,10 +24,12 @@
__config = None
__underlay = None
+ __hardware = None
- def __init__(self, config, underlay, salt):
+ def __init__(self, config, underlay, hardware, salt):
self.__config = config
self.__underlay = underlay
+ self.__hardware = hardware
self._salt = salt
super(OpenstackManager, self).__init__(
config=config, underlay=underlay)
@@ -88,3 +90,34 @@
file_name = result['stdout'][0].rstrip()
LOG.debug("Found files {0}".format(file_name))
r.download(destination=file_name, target=os.getcwd())
+
+ def get_node_name_by_subname(self, node_sub_name):
+ return [node_name for node_name
+ in self.__underlay.node_names()
+ if node_sub_name in node_name]
+
+ def warm_shutdown_openstack_nodes(self, node_sub_name, timeout=10 * 60):
+ """Gracefully shutting down the node """
+ node_names = self.get_node_name_by_subname(node_sub_name)
+ LOG.info('Shutting down nodes {}'.format(node_names))
+ for node in node_names:
+ LOG.debug('Shutdown node {0}'.format(node))
+ self.__underlay.check_call(cmd="shutdown +1", node_name=node)
+ for node in node_names:
+ LOG.info('Destroy node {}'.format(node))
+ self.__hardware.destroy_node(node)
+ self.__hardware.wait_for_node_state(
+ node, state='offline', timeout=timeout)
+
+ def warm_start_nodes(self, node_sub_name, timeout=10 * 60):
+ node_names = self.get_node_name_by_subname(node_sub_name)
+ LOG.info('Starting nodes {}'.format(node_names))
+ for node in node_names:
+ self.__hardware.start_node(node)
+ self.__hardware.wait_for_node_state(
+ node, state='active', timeout=timeout)
+
+ def warm_restart_nodes(self, node_names, timeout=10 * 60):
+ LOG.info('Reboot (warm restart) nodes {0}'.format(node_names))
+ self.warm_shutdown_openstack_nodes(node_names, timeout=timeout)
+ self.warm_start_nodes(node_names)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 4a934ef..ad3e4ad 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -422,13 +422,17 @@
minion_nodes = [ssh for ssh in self.config_ssh
if node_role not in ssh['roles']]
for node in minion_nodes:
- with self.remote(host=node['host']) as r_node:
- r_node.check_call(('tar '
- '--absolute-names '
- '--warning=no-file-changed '
- '-czf {t} {d}'.format(
- t='{0}.tar.gz'.format(node['node_name']), d='/var/log')),
- verbose=True, raise_on_err=False)
+ try:
+ with self.remote(host=node['host']) as r_node:
+ r_node.check_call(('tar '
+ '--absolute-names '
+ '--warning=no-file-changed '
+ '-czf {t} {d}'.format(
+ t='{0}.tar.gz'.format(node['node_name']),
+ d='/var/log')),
+ verbose=True, raise_on_err=False)
+ except:
+ LOG.info("Can not ssh for node {}".format(node))
with self.remote(master_node['node_name']) as r:
for node in minion_nodes:
packages_minion_cmd = ("salt '{0}*' cmd.run "
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index c7dce43..ffcf909 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -338,11 +338,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Set disks 03
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
- description: Create partitions 01
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
@@ -352,13 +352,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -392,11 +392,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Temporary WR set enabled backends value 03
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
#- description: Install docker.io on gtw
# cmd: salt-call cmd.run 'apt-get install docker.io -y'
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index dfaac94..3c8f025 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -24,19 +24,6 @@
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas"') }}
-- description: "Fix salt-formula/salt VCP creation. Delete as fast as possible"
- cmd: |
- set -e;
- mkdir -p /tmp/fix_vcp;
- cd /tmp/fix_vcp;
- git clone https://gerrit.mcp.mirantis.net/salt-formulas/salt;
- cd salt;
- git fetch https://gerrit.mcp.mirantis.net/salt-formulas/salt refs/changes/10/11210/1 && git checkout FETCH_HEAD;
- cp _modules/virtng.py /usr/share/salt-formulas/env/_modules/;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
index 891fb2e..bfe5673 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
@@ -283,44 +283,14 @@
skip_fail: false
# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Set disks 03
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+- description: Create physical volumes on a second disk
+ cmd: salt 'ctl*' cmd.run 'pvcreate -y /dev/vdb'
+ node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -349,11 +319,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Temporary WR set enabled backends value 03
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
index 5b9b09d..33ad49b 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
@@ -53,6 +53,20 @@
retry: {count: 1, delay: 5}
skip_fail: false
+# Elasticsearch (system service)
+#-------------------------------
+- description: Setup Elasticsearch
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Setup Elasticsearch
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
# Setup Docker Swarm
#-------------------
@@ -255,23 +269,21 @@
retry: {count: 3, delay: 10}
skip_fail: false
-# Elasticsearch
+# Elasticsearch (in container, disabled until https://mirantis.jira.com/browse/PROD-15297 is not fixed)
#--------------
-
-- description: 'Waiting for Elasticsearch to come up in container...'
- cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200/?pretty && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
-- description: Setup Elasticsearch
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
-
+#- description: 'Waiting for Elasticsearch to come up in container...'
+# cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' cmd.run
+# 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+# while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200/?pretty && break; sleep 2; done'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 3, delay: 10}
+# skip_fail: false
+#
+#- description: Setup Elasticsearch
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 3, delay: 10}
+# skip_fail: false
# Generate docs
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
index cd2180b..b169617 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
@@ -53,16 +53,16 @@
infra_deploy_nic: eth0
maas_deploy_address: 10.167.4.91
maas_hostname: mas01
- infra_kvm01_control_address: 10.167.4.91
+ infra_kvm01_control_address: ${_param:cicd_control_node01_address}
infra_kvm01_deploy_address: 10.167.5.91
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 10.167.4.92
+ infra_kvm01_hostname: ${_param:cicd_control_node01_hostname}
+ infra_kvm02_control_address: ${_param:cicd_control_node02_address}
infra_kvm02_deploy_address: 10.167.5.92
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 10.167.4.93
+ infra_kvm02_hostname: ${_param:cicd_control_node02_hostname}
+ infra_kvm03_control_address: ${_param:cicd_control_node03_address}
infra_kvm03_deploy_address: 10.167.5.93
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 10.167.4.90
+ infra_kvm03_hostname: ${_param:cicd_control_node03_hostname}
+ infra_kvm_vip_address: ${_param:cicd_control_address}
infra_primary_first_nic: eth1
infra_primary_second_nic: eth2
kubernetes_enabled: 'False'
@@ -82,14 +82,14 @@
openstack_control_node02_hostname: ctl02
openstack_control_node03_address: 10.167.4.13
openstack_control_node03_hostname: ctl03
- openstack_database_address: 10.167.4.10
- openstack_database_hostname: dbs
- openstack_database_node01_address: 10.167.4.11
- openstack_database_node01_hostname: dbs01
- openstack_database_node02_address: 10.167.4.12
- openstack_database_node02_hostname: dbs02
- openstack_database_node03_address: 10.167.4.13
- openstack_database_node03_hostname: dbs03
+ openstack_database_address: ${_param:openstack_control_address}
+ openstack_database_hostname: ${_param:openstack_control_hostname}
+ openstack_database_node01_address: ${_param:openstack_control_node01_address}
+ openstack_database_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_database_node02_address: ${_param:openstack_control_node02_address}
+ openstack_database_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_database_node03_address: ${_param:openstack_control_node03_address}
+ openstack_database_node03_hostname: ${_param:openstack_control_node03_hostname}
openstack_enabled: 'True'
openstack_gateway_node01_address: 10.167.4.224
openstack_gateway_node01_hostname: gtw01
@@ -100,35 +100,35 @@
openstack_gateway_node03_address: 10.167.4.226
openstack_gateway_node03_hostname: gtw03
openstack_gateway_node03_tenant_address: 10.167.6.8
- openstack_message_queue_address: 10.167.4.10
- openstack_message_queue_hostname: msg
- openstack_message_queue_node01_address: 10.167.4.11
- openstack_message_queue_node01_hostname: msg01
- openstack_message_queue_node02_address: 10.167.4.12
- openstack_message_queue_node02_hostname: msg02
- openstack_message_queue_node03_address: 10.167.4.13
- openstack_message_queue_node03_hostname: msg03
+ openstack_message_queue_address: ${_param:openstack_control_address}
+ openstack_message_queue_hostname: ${_param:openstack_control_hostname}
+ openstack_message_queue_node01_address: ${_param:openstack_control_node01_address}
+ openstack_message_queue_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_message_queue_node02_address: ${_param:openstack_control_node02_address}
+ openstack_message_queue_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_message_queue_node03_address: ${_param:openstack_control_node03_address}
+ openstack_message_queue_node03_hostname: ${_param:openstack_control_node03_hostname}
openstack_network_engine: ovs
openstack_nfv_dpdk_enabled: 'False'
openstack_nfv_sriov_enabled: 'False'
openstack_ovs_dvr_enabled: 'True'
openstack_neutron_qos: 'False'
openstack_ovs_encapsulation_type: vlan
- openstack_ovs_encapsulation_vlan_range: 2416:2420
+ openstack_ovs_encapsulation_vlan_range: 2418:2420
openstack_proxy_address: 10.167.4.80
openstack_proxy_hostname: prx
openstack_proxy_node01_address: 10.167.4.81
openstack_proxy_node01_hostname: prx01
openstack_proxy_node02_address: 10.167.4.82
openstack_proxy_node02_hostname: prx02
- openstack_telemetry_address: 10.167.4.10
- openstack_telemetry_hostname: mdb
- openstack_telemetry_node01_address: 10.167.4.11
- openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_address: 10.167.4.12
- openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_address: 10.167.4.13
- openstack_telemetry_node03_hostname: mdb03
+ openstack_telemetry_address: ${_param:openstack_control_address}
+ openstack_telemetry_hostname: ${_param:openstack_control_hostname}
+ openstack_telemetry_node01_address: ${_param:openstack_control_node01_address}
+ openstack_telemetry_node01_hostname: ${_param:openstack_control_node01_hostname}
+ openstack_telemetry_node02_address: ${_param:openstack_control_node02_address}
+ openstack_telemetry_node02_hostname: ${_param:openstack_control_node02_hostname}
+ openstack_telemetry_node03_address: ${_param:openstack_control_node03_address}
+ openstack_telemetry_node03_hostname: ${_param:openstack_control_node03_hostname}
openstack_benchmark_node01_hostname: bmk01
openstack_benchmark_node01_address: 10.167.4.85
openstack_version: ocata
@@ -139,14 +139,6 @@
salt_master_management_address: 10.167.5.15
stacklight_enabled: 'True'
stacklight_version: '2'
- stacklight_log_address: 10.167.4.70
- stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.4.71
- stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.4.72
- stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.4.73
- stacklight_log_node03_hostname: log03
stacklight_monitor_address: 10.167.4.70
stacklight_monitor_hostname: mon
stacklight_monitor_node01_address: 10.167.4.71
@@ -155,14 +147,22 @@
stacklight_monitor_node02_hostname: mon02
stacklight_monitor_node03_address: 10.167.4.73
stacklight_monitor_node03_hostname: mon03
- stacklight_telemetry_address: 10.167.4.70
- stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.4.71
- stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.4.72
- stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.4.73
- stacklight_telemetry_node03_hostname: mtr03
+ stacklight_log_address: ${_param:stacklight_monitor_address}
+ stacklight_log_hostname: ${_param:stacklight_monitor_hostname}
+ stacklight_log_node01_address: ${_param:stacklight_monitor_node01_address}
+ stacklight_log_node01_hostname: ${_param:stacklight_monitor_node01_hostname}
+ stacklight_log_node02_address: ${_param:stacklight_monitor_node02_address}
+ stacklight_log_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
+ stacklight_log_node03_address: ${_param:stacklight_monitor_node03_address}
+ stacklight_log_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
+ stacklight_telemetry_address: ${_param:stacklight_monitor_address}
+ stacklight_telemetry_hostname: ${_param:stacklight_monitor_hostname}
+ stacklight_telemetry_node01_address: ${_param:stacklight_monitor_node01_address}
+ stacklight_telemetry_node01_hostname: ${_param:stacklight_monitor_node01_hostname}
+ stacklight_telemetry_node02_address: ${_param:stacklight_monitor_node02_address}
+ stacklight_telemetry_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
+ stacklight_telemetry_node03_address: ${_param:stacklight_monitor_node03_address}
+ stacklight_telemetry_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
tenant_network_gateway: ''
tenant_network_netmask: 255.255.255.0
tenant_vlan: '20'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
index d15bdcf..bda6d51 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
@@ -25,6 +25,33 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_telemetry_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_telemetry_node02 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_telemetry_node03 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
index 9893151..ab31534 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
@@ -219,7 +219,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 2) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 4096) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 3072) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -257,7 +257,7 @@
role: salt_minion
params:
vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -295,7 +295,7 @@
role: salt_minion
params:
vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -321,7 +321,7 @@
role: salt_minion
params:
vcpu: {{ os_env('CID_NODE_CPU', 3) }}
- memory: {{ os_env('CID_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -347,7 +347,7 @@
role: salt_minion
params:
vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -376,7 +376,7 @@
role: salt_minion
params:
vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -405,7 +405,7 @@
role: salt_minion
params:
vcpu: {{ os_env('CTL_NODE_CPU', 3) }}
- memory: {{ os_env('CTL_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('CTL_NODE_MEMORY', 14000) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -434,7 +434,7 @@
role: salt_minion
params:
vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 16384) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -463,7 +463,7 @@
role: salt_minion
params:
vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 16384) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -492,7 +492,7 @@
role: salt_minion
params:
vcpu: {{ os_env('MON_NODE_CPU', 2) }}
- memory: {{ os_env('MON_NODE_MEMORY', 12288) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 16384) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -521,7 +521,7 @@
role: salt_minion
params:
vcpu: {{ os_env('MON_NODE_CPU', 1) }}
- memory: {{ os_env('MON_NODE_MEMORY', 4096) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 2048) }}
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
index 79ef4da..1a4a5f5 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
@@ -295,11 +295,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Set disks 03
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
- description: Create partitions 01
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
@@ -309,13 +309,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -349,11 +349,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Temporary WR set enabled backends value 03
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
index c520c5c..332473c 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,7 +10,7 @@
ens4:
role: single_vlan_ctl
- kvm01.mcp11-ovs-dpdk.local:
+ kvm01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: infra_kvm_node01
roles:
- infra_kvm
@@ -21,7 +21,7 @@
ens4:
role: single_vlan_ctl
- kvm02.mcp11-ovs-dpdk.local:
+ kvm02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: infra_kvm_node02
roles:
- infra_kvm
@@ -32,7 +32,7 @@
ens4:
role: single_vlan_ctl
- kvm03.mcp11-ovs-dpdk.local:
+ kvm03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: infra_kvm_node03
roles:
- infra_kvm
@@ -43,7 +43,7 @@
ens4:
role: single_vlan_ctl
- cid01.mcp11-ovs-dpdk.local:
+ cid01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: cicd_control_node01
roles:
- cicd_control_leader
@@ -54,7 +54,7 @@
ens4:
role: single_vlan_ctl
- cid02.mcp11-ovs-dpdk.local:
+ cid02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: cicd_control_node02
roles:
- cicd_control_manager
@@ -65,7 +65,7 @@
ens4:
role: single_vlan_ctl
- cid03.mcp11-ovs-dpdk.local:
+ cid03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: cicd_control_node03
roles:
- cicd_control_manager
@@ -76,7 +76,7 @@
ens4:
role: single_vlan_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_control_node01
roles:
- openstack_control_leader
@@ -89,7 +89,7 @@
ens4:
role: single_vlan_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_control_node02
roles:
- openstack_control
@@ -101,7 +101,7 @@
ens4:
role: single_vlan_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_control_node03
roles:
- openstack_control
@@ -113,7 +113,7 @@
ens4:
role: single_vlan_ctl
- dbs01.mcp11-ovs-dpdk.local:
+ dbs01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_database_node01
roles:
- openstack_database_leader
@@ -125,7 +125,7 @@
ens4:
role: single_vlan_ctl
- dbs02.mcp11-ovs-dpdk.local:
+ dbs02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_database_node02
roles:
- openstack_database
@@ -137,7 +137,7 @@
ens4:
role: single_vlan_ctl
- dbs03.mcp11-ovs-dpdk.local:
+ dbs03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_database_node03
roles:
- openstack_database
@@ -149,7 +149,7 @@
ens4:
role: single_vlan_ctl
- msg01.mcp11-ovs-dpdk.local:
+ msg01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_message_queue_node01
roles:
- openstack_message_queue
@@ -160,7 +160,7 @@
ens4:
role: single_vlan_ctl
- msg02.mcp11-ovs-dpdk.local:
+ msg02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_message_queue_node02
roles:
- openstack_message_queue
@@ -171,7 +171,7 @@
ens4:
role: single_vlan_ctl
- msg03.mcp11-ovs-dpdk.local:
+ msg03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_message_queue_node03
roles:
- openstack_message_queue
@@ -182,7 +182,7 @@
ens4:
role: single_vlan_ctl
- mdb01.mcp11-ovs-dpdk.local:
+ mdb01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_telemetry_node01
roles:
- openstack_telemetry
@@ -193,7 +193,7 @@
ens4:
role: single_vlan_ctl
- mdb02.mcp11-ovs-dpdk.local:
+ mdb02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_telemetry_node02
roles:
- openstack_telemetry
@@ -204,7 +204,7 @@
ens4:
role: single_vlan_ctl
- mdb03.mcp11-ovs-dpdk.local:
+ mdb03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_telemetry_node03
roles:
- openstack_telemetry
@@ -215,7 +215,7 @@
ens4:
role: single_vlan_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
@@ -226,7 +226,7 @@
ens4:
role: single_vlan_ctl
- prx02.mcp11-ovs-dpdk.local:
+ prx02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_proxy_node02
roles:
- openstack_proxy
@@ -237,7 +237,40 @@
ens4:
role: single_vlan_ctl
- mtr01.mcp11-ovs-dpdk.local:
+ mon01.mcp-ocata-dvr-vxlan.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon02.mcp-ocata-dvr-vxlan.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon03.mcp-ocata-dvr-vxlan.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mtr01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_telemetry_node01
roles:
- stacklight_telemetry_leader
@@ -248,7 +281,7 @@
ens4:
role: single_vlan_ctl
- mtr02.mcp11-ovs-dpdk.local:
+ mtr02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_telemetry_node02
roles:
- stacklight_telemetry
@@ -259,7 +292,7 @@
ens4:
role: single_vlan_ctl
- mtr03.mcp11-ovs-dpdk.local:
+ mtr03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_telemetry_node03
roles:
- stacklight_telemetry
@@ -270,7 +303,7 @@
ens4:
role: single_vlan_ctl
- log01.mcp11-ovs-dpdk.local:
+ log01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_log_node01
roles:
- stacklight_log_leader_v2
@@ -281,7 +314,7 @@
ens4:
role: single_vlan_ctl
- log02.mcp11-ovs-dpdk.local:
+ log02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_log_node02
roles:
- stacklight_log
@@ -292,7 +325,7 @@
ens4:
role: single_vlan_ctl
- log03.mcp11-ovs-dpdk.local:
+ log03.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: stacklight_log_node03
roles:
- stacklight_log
@@ -304,7 +337,7 @@
role: single_vlan_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
@@ -319,7 +352,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -334,7 +367,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw02.mcp11-ovs-dpdk.local:
+ gtw02.mcp-ocata-dvr-vxlan.local:
reclass_storage_name: openstack_gateway_node02
roles:
- openstack_gateway
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cicd.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cicd.yaml
deleted file mode 100644
index 3bc891d..0000000
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cicd.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- - export TERM=linux
- - export LANG=C
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Enable grub menu using updated config below
- - update-grub
-
- # Prepare network connection
- # WARNING! On CID* nodes, admin network is connected to ens4, and control network to ens3 (as in the model)
- # On other nodes (cfg01 and openstack), admin network is connected to ens3, and control network to ens4
- - sudo ifup ens4
-
- # Create swap
- - fallocate -l 8G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
- ########################################################
-
- write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet manual
- auto ens4
- iface ens4 inet dhcp
-
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
index 677c392..a5f6916 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
@@ -42,7 +42,7 @@
#- sudo ifup ens4
# Create swap
- - fallocate -l 4G /swapfile
+ - fallocate -l 16G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
index 35309f0..823002f 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
@@ -4,21 +4,38 @@
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-ocata-dvr-vxlan') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID = os_env('HOSTNAME_CID', 'cid.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM = os_env('HOSTNAME_KVM', 'kvm.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL = os_env('HOSTNAME_CTL', 'ctl.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON = os_env('HOSTNAME_MON', 'mon.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG = os_env('HOSTNAME_LOG', 'log.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR = os_env('HOSTNAME_MTR', 'mtr.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp001.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
@@ -26,7 +43,6 @@
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cicd.yaml' as CLOUDINIT_USER_DATA_CICD with context %}
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
@@ -34,7 +50,6 @@
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- - &cloudinit_user_data_cicd {{ CLOUDINIT_USER_DATA_CICD }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
template:
@@ -84,6 +99,10 @@
default_{{ HOSTNAME_MDB03 }}: +78
default_{{ HOSTNAME_BMK01 }}: +85
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -140,6 +159,10 @@
default_{{ HOSTNAME_MDB03 }}: +78
default_{{ HOSTNAME_BMK01 }}: +85
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -197,6 +220,10 @@
default_{{ HOSTNAME_MDB03 }}: +78
default_{{ HOSTNAME_BMK01 }}: +85
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -253,6 +280,10 @@
default_{{ HOSTNAME_MDB03 }}: +78
default_{{ HOSTNAME_BMK01 }}: +85
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
default_{{ HOSTNAME_LOG }}: +60
default_{{ HOSTNAME_LOG01 }}: +61
default_{{ HOSTNAME_LOG02 }}: +62
@@ -392,57 +423,57 @@
networks:
- private
-# - name: {{ HOSTNAME_CID02 }}
-# role: salt_minion
-# params:
-## vcpu: !os_env SLAVE_NODE_CPU, 2
-# memory: !os_env SLAVE_NODE_MEMORY, 6144
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
-# - name: {{ HOSTNAME_CID03 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 2
-# memory: !os_env SLAVE_NODE_MEMORY, 6144
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
# KVM* nodes required for services like glusterfs.server
- name: {{ HOSTNAME_KVM01 }}
@@ -627,57 +658,57 @@
interfaces: *interfaces
network_config: *network_config
-# - name: {{ HOSTNAME_MSG02 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ - name: {{ HOSTNAME_MSG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
-# - name: {{ HOSTNAME_MSG03 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MSG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
- name: {{ HOSTNAME_CTL01 }}
role: salt_minion
@@ -737,34 +768,268 @@
interfaces: *interfaces
network_config: *network_config
-# - name: {{ HOSTNAME_CTL03 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 2
-# memory: !os_env SLAVE_NODE_MEMORY, 6144
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: cloudimage1604
-# format: qcow2
-# - name: cinder
-# capacity: 50
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *interfaces
-# network_config: *network_config
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
- name: {{ HOSTNAME_PRX01 }}
role: salt_minion
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index cb1c515..5b27e3a 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -134,6 +134,10 @@
find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+ # Workaround for PROD-14756: all roles must use service.keepalived.cluster.single with the default 'VIP' instance"
+ find /srv/salt/reclass/classes/cluster/ -type f -exec sed -i 's/system.keepalived.*/service.keepalived.cluster.single/g' {} +
+ find /srv/salt/reclass/classes/system/ -type f -exec sed -i 's/system.keepalived.*/service.keepalived.cluster.single/g' {} +
+
{%- if IS_CONTRAIL_LAB %}
# vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
find ${REPLACE_DIRS} -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} +
@@ -609,15 +613,15 @@
{%- macro MACRO_NETWORKING_WORKAROUNDS() %}
{#########################################}
-- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
- "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
- git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD;
- cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
- cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+#- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
+# cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
+# "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
+# git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD;
+# cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
+# cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
- description: '*Workaround: Load bonding module before call state.linux'
cmd: salt -C "I@linux:network:interface:*:type:bond" cmd.run 'modprobe bonding'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-cicd/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-cicd/openstack.yaml
index fa6203e..4e7e234 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-cicd/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-cicd/openstack.yaml
@@ -294,12 +294,12 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
index dad874b..6a5aa50 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
@@ -320,13 +320,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
index c78e1b7..569ae2d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
@@ -302,13 +302,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
index 5b2d223..4a54200 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
@@ -270,12 +270,12 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
index d967df3..41ab7aa 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
@@ -288,13 +288,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -350,4 +350,4 @@
cmd: scp /root/keystonercv3 gtw01:/root
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
index c6dbe35..658ddd5 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
@@ -206,7 +206,7 @@
- description: Create net04
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
+ '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -220,7 +220,7 @@
- description: Create router
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
+ '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -280,12 +280,12 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
index 9971bd6..821e44e 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
@@ -288,13 +288,13 @@
- description: Create partitions 02
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Create partitions 03
cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -350,4 +350,4 @@
cmd: scp /root/keystonercv3 gtw01:/root
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
diff --git a/tcp_tests/tests/system/test_failover.py b/tcp_tests/tests/system/test_failover.py
new file mode 100644
index 0000000..a8bb6b8
--- /dev/null
+++ b/tcp_tests/tests/system/test_failover.py
@@ -0,0 +1,205 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class TestFailover(object):
+ """Test class for testing OpenStack nodes failover"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_warm_shutdown_ctl01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test warm shutdown ctl01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Shutdown ctl01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_shutdown_openstack_nodes('ctl01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_ctl01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test restart ctl01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Restart ctl01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_restart_nodes('ctl01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_warm_shutdown_cmp01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test warm shutdown cmp01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Shutdown cmp01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_shutdown_openstack_nodes('cmp01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_cmp01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test restart cmp01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Restart cmp01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_restart_nodes('cmp01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_mon01_node(self, underlay, openstack_deployed,
+ openstack_actions, sl_deployed,
+ show_step):
+ """Test restart mon01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Restart mon01
+ 5. Run LMA smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_restart_nodes('mon01')
+ # STEP #5
+ show_step(5)
+ # Run SL component tetsts
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/test_smoke.py',
+ 'test_alerts.py')
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_warm_shutdown_mon01_node(self, underlay, openstack_deployed,
+ openstack_actions, sl_deployed,
+ show_step):
+ """Test warm shutdown mon01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Shutdown mon01
+ 5. Run LMA smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_shutdown_openstack_nodes('mon01')
+ # STEP #5
+ show_step(5)
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/test_smoke.py',
+ 'test_alerts.py')
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_cookied_ocata.py b/tcp_tests/tests/system/test_install_cookied_ocata.py
index 3f3f208..22f4b93 100644
--- a/tcp_tests/tests/system/test_install_cookied_ocata.py
+++ b/tcp_tests/tests/system/test_install_cookied_ocata.py
@@ -51,8 +51,8 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
- def test_cookied_ocata_cicd_oss_install(self, underlay, oss_deployed,
- openstack_deployed, sl_deployed,
+ def test_cookied_ocata_cicd_oss_install(self, underlay, openstack_deployed,
+ oss_deployed, sl_deployed,
show_step):
"""Test for deploying an mcp environment and check it
Scenario: