Merge "Add kibana in template"
diff --git a/tcp_tests/fixtures/drivetrain_fixtures.py b/tcp_tests/fixtures/drivetrain_fixtures.py
new file mode 100644
index 0000000..e0e709b
--- /dev/null
+++ b/tcp_tests/fixtures/drivetrain_fixtures.py
@@ -0,0 +1,81 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import ext
+from tcp_tests.managers import drivetrain_manager
+
+LOG = logger.logger
+
+
+@pytest.fixture(scope='function')
+def drivetrain_actions(config, underlay, salt_actions):
+ """Fixture that provides various actions for Drivetrain
+
+ :param config: fixture provides oslo.config
+ :param underlay: fixture provides underlay manager
+ :rtype: DrivetrainManager
+ """
+ return drivetrain_manager.DrivetrainManager(config, underlay, salt_actions)
+
+
+@pytest.mark.revert_snapshot(ext.SNAPSHOT.drivetrain_deployed)
+@pytest.fixture(scope='function')
+def drivetrain_deployed(revert_snapshot, request, config,
+ hardware, underlay, salt_deployed,
+ drivetrain_actions):
+ """Fixture to get or install Drivetrain on the environment
+
+ :param revert_snapshot: fixture that reverts snapshot that is specified
+ in test with @pytest.mark.revert_snapshot(<name>)
+ :param request: fixture provides pytest data
+ :param config: fixture provides oslo.config
+ :param hardware: fixture provides enviromnet manager
+ :param underlay: fixture provides underlay manager
+ :param drivetrain_actions: fixture provides OSSManager instance
+ :rtype: DrivetrainManager
+
+ If config.drivetrain.drivetrain_installed is not set, this
+ fixture assumes that the Drivetrain were not installed
+ , and do the following:
+ - install Drivetrain
+ - make snapshot with name 'drivetrain_deployed'
+ - return DrivetrainManager
+
+ If config.drivetrain.drivetrain_installed was set, this fixture
+ assumes that the Drivetrain were already installed, and do
+ the following:
+ - return DrivetrainManager instance
+
+ If you want to revert 'drivetrain_deployed' snapshot, please use mark:
+ @pytest.mark.revert_snapshot("drivetrain_deployed")
+ """
+ if not config.drivetrain.drivetrain_installed:
+ steps_path = config.drivetrain_deploy.drivetrain_steps_path
+ commands = underlay.read_template(steps_path)
+ drivetrain_actions.install(commands)
+ hardware.create_snapshot(ext.SNAPSHOT.drivetrain_deployed)
+ salt_deployed.sync_time()
+
+ else:
+ # 1. hardware environment created and powered on
+ # 2. config.underlay.ssh contains SSH access to provisioned nodes
+ # (can be passed from external config with TESTS_CONFIGS variable)
+ # 3. config.tcp.* options contain access credentials to the already
+ # installed TCP API endpoint
+ pass
+
+ return drivetrain_actions
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index 58c8509..9bd24b3 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -35,7 +35,7 @@
@pytest.mark.revert_snapshot(ext.SNAPSHOT.salt_deployed)
@pytest.fixture(scope='function')
def salt_deployed(revert_snapshot, request, config,
- hardware, underlay, salt_actions, grab_versions, snapshot):
+ hardware, underlay, salt_actions, snapshot, grab_versions):
"""Fixture to get or install salt service on environment
:param revert_snapshot: fixture that reverts snapshot that is specified
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 7502df2..d991548 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -211,10 +211,17 @@
grab_version = request.keywords.get('grab_versions', None)
def test_fin():
- if hasattr(request.node, 'rep_call') and \
- (request.node.rep_call.passed or request.node.rep_call.failed)\
- and grab_version:
+ fixture_failed = (hasattr(request.node, 'rep_setup') and
+ request.node.rep_setup.failed)
+ test_passed = (hasattr(request.node, 'rep_call') and
+ request.node.rep_call.passed)
+ test_failed = (hasattr(request.node, 'rep_call') and
+ request.node.rep_call.failed)
+
+ if fixture_failed or test_passed or test_failed:
artifact_name = utils.extract_name_from_mark(grab_version) or \
"{}".format(func_name)
underlay.get_logs(artifact_name)
- request.addfinalizer(test_fin)
+
+ if grab_version:
+ request.addfinalizer(test_fin)
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 804d110..8dbf832 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -45,6 +45,7 @@
'salt_deployed',
'common_services_deployed',
'oss_deployed',
+ 'drivetrain_deployed'
'openstack_deployed',
'sl_deployed',
'virtlet_deployed',
diff --git a/tcp_tests/managers/drivetrain_manager.py b/tcp_tests/managers/drivetrain_manager.py
new file mode 100644
index 0000000..6e2fbda
--- /dev/null
+++ b/tcp_tests/managers/drivetrain_manager.py
@@ -0,0 +1,34 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+
+
+class DrivetrainManager(ExecuteCommandsMixin):
+ """docstring for DrivetrainManager"""
+
+ __config = None
+ __underlay = None
+
+ def __init__(self, config, underlay, salt=None):
+ self.__config = config
+ self.__underlay = underlay
+ self._salt = salt
+ super(DrivetrainManager, self).__init__(
+ config=config, underlay=underlay)
+
+ def install(self, commands):
+ self.execute_commands(commands,
+ label='Install Drivetrain Tools')
+ self.__config.drivetrain.drivetrain_installed = True
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index 3c01867..a8cea3d 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -64,18 +64,20 @@
# skip_fail = step.get('skip_fail', False)
msg = "[ {0} #{1} ] {2}".format(label, n + 1, description)
- LOG.info("\n\n{0}\n{1}".format(msg, '=' * len(msg)))
+ log_msg = "\n\n{0}\n{1}".format(msg, '=' * len(msg))
if action_cmd:
- self.execute_command(step)
+ self.execute_command(step, msg)
elif action_do:
- self.command2(step)
+ self.command2(step, msg)
elif action_upload:
+ LOG.info(log_msg)
self.action_upload(step)
elif action_download:
+ LOG.info(log_msg)
self.action_download(step)
- def execute_command(self, step):
+ def execute_command(self, step, msg):
# Required fields
cmd = step.get('cmd')
node_name = step.get('node_name')
@@ -90,7 +92,16 @@
for x in range(retry_count, 0, -1):
time.sleep(3)
- result = remote.execute(cmd, verbose=True)
+
+ retry_msg = (' (try {0} of {1}, skip_fail={2}, node_name={3})'
+ .format(retry_count - x + 1,
+ retry_count,
+ skip_fail,
+ node_name))
+ LOG.info("\n\n{0}\n{1}".format(
+ msg + retry_msg, '=' * len(msg + retry_msg)))
+
+ result = remote.execute('set -ex; ' + cmd, verbose=True)
# Workaround of exit code 0 from salt in case of failures
failed = 0
@@ -106,16 +117,11 @@
if result.exit_code != 0:
time.sleep(retry_delay)
- LOG.info(
- " === RETRY ({0}/{1}) ========================="
- .format(x - 1, retry_count))
elif failed != 0:
LOG.error(
" === SALT returned exit code = 0 while "
"there are failed modules! ===")
- LOG.info(
- " === RETRY ({0}/{1}) ======================="
- .format(x - 1, retry_count))
+ time.sleep(retry_delay)
else:
break
@@ -124,7 +130,7 @@
raise Exception("Step '{0}' failed"
.format(description))
- def command2(self, step):
+ def command2(self, step, msg):
# Required fields
do = step['do']
target = step['target']
@@ -145,6 +151,14 @@
for x in range(retry_count, 0, -1):
time.sleep(3)
+ retry_msg = (' (try {0} of {1}, skip_fail={2}, target={3})'
+ .format(retry_count - x + 1,
+ retry_count,
+ skip_fail,
+ target))
+ LOG.info("\n\n{0}\n{1}".format(
+ msg + retry_msg, '=' * len(msg + retry_msg)))
+
method = getattr(self._salt, self._salt._map[do])
command_ret = method(tgt=target, state=state or states,
args=args, kwargs=kwargs)
@@ -163,8 +177,6 @@
LOG.error("States finished with failures.\n{}".format(
all_fails))
time.sleep(retry_delay)
- LOG.info(" === RETRY ({0}/{1}) ========================="
- .format(x - 1, retry_count))
else:
break
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index bbf9315..275ef57 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -105,6 +105,10 @@
with self.__underlay.remote(node_name=target_name[0]) as node_remote:
result = node_remote.execute(cmd, verbose=True)
+ cmd_iptables = "iptables --policy FORWARD ACCEPT"
+ with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+ result = node_remote.execute(cmd_iptables, verbose=True)
+
with self.__underlay.remote(
host=self.__config.salt.salt_master_host) as node_remote:
result = node_remote.execute(
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index ae72941..d5b8782 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -96,6 +96,9 @@
version=version))
self._underlay.check_call(cmd, node_name=self._node_name)
+ cmd_iptables = "iptables --policy FORWARD ACCEPT"
+ self._underlay.check_call(cmd_iptables, node_name=self._node_name)
+
LOG.info("Create rally workdir")
cmd = 'mkdir -p /root/rally; chown 65500 /root/rally'
self._underlay.check_call(cmd, node_name=self._node_name)
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 142cd3f..5c1b3a1 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -150,7 +150,7 @@
"""
for node in nodes:
services_status = self.get_service_info_from_node(node)
- assert len(services_status) == len(expected_services), \
+ assert set(services_status) >= set(expected_services), \
'Some services are missed on node {0}. ' \
'Current service list: {1}\nExpected service list: {2}' \
.format(node, services_status, expected_services)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 5194239..55ccbbe 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -414,50 +414,98 @@
def get_logs(self, artifact_name,
node_role=ext.UNDERLAY_NODE_ROLES.salt_master):
- master_node = [ssh for ssh in self.config_ssh
- if node_role in ssh['roles']][0]
- cmd = ("dpkg -l | grep formula > "
- "/var/log/{0}_packages.output".format(master_node['node_name']))
- tar_cmd = ('tar --absolute-names'
- ' --warning=no-file-changed '
- '-czf {t} {d}'.format(
- t='{0}_log.tar.gz'.format(artifact_name), d='/var/log'))
- minion_nodes = [ssh for ssh in self.config_ssh
- if node_role not in ssh['roles']]
+ # Prefix each '$' symbol with backslash '\' to disable
+ # early interpolation of environment variables on cfg01 node only
+ dump_commands = (
+ "mkdir /root/\$(hostname -f)/;"
+ "rsync -aruv /var/log/ /root/\$(hostname -f)/;"
+ "dpkg -l > /root/\$(hostname -f)/dump_dpkg_l.txt;"
+ "df -h > /root/\$(hostname -f)/dump_df.txt;"
+ "mount > /root/\$(hostname -f)/dump_mount.txt;"
+ "blkid -o list > /root/\$(hostname -f)/dump_blkid_o_list.txt;"
+ "iptables -t nat -S > /root/\$(hostname -f)/dump_iptables_nat.txt;"
+ "iptables -S > /root/\$(hostname -f)/dump_iptables.txt;"
+ "ps auxwwf > /root/\$(hostname -f)/dump_ps.txt;"
+ "docker images > /root/\$(hostname -f)/dump_docker_images.txt;"
+ "docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
+ "docker service ls > "
+ " /root/\$(hostname -f)/dump_docker_services_ls.txt;"
+ "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
+ " do docker service ps --no-trunc 2>&1 \$SERVICE >> "
+ " /root/\$(hostname -f)/dump_docker_service_ps.txt;"
+ " done;"
+ "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
+ " do docker service logs 2>&1 \$SERVICE > "
+ " /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
+ " done;"
+ "vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
+ "lvdisplay > /root/\$(hostname -f)/dump_lvdisplay.txt;"
+ "ip a > /root/\$(hostname -f)/dump_ip_a.txt;"
+ "ip r > /root/\$(hostname -f)/dump_ip_r.txt;"
+ "netstat -anp > /root/\$(hostname -f)/dump_netstat.txt;"
+ "brctl show > /root/\$(hostname -f)/dump_brctl_show.txt;"
+ "arp -an > /root/\$(hostname -f)/dump_arp.txt;"
+ "uname -a > /root/\$(hostname -f)/dump_uname_a.txt;"
+ "lsmod > /root/\$(hostname -f)/dump_lsmod.txt;"
+ "cat /proc/interrupts > /root/\$(hostname -f)/dump_interrupts.txt;"
+ "cat /etc/*-release > /root/\$(hostname -f)/dump_release.txt;"
+ # OpenStack specific, will fail on other nodes
+ # "rabbitmqctl report > "
+ # " /root/\$(hostname -f)/dump_rabbitmqctl.txt;"
- with self.remote(master_node['node_name']) as r:
- for node in minion_nodes:
- LOG.info("Archiving logs on the node {0}"
+ # "ceph health > /root/\$(hostname -f)/dump_ceph_health.txt;"
+ # "ceph -s > /root/\$(hostname -f)/dump_ceph_s.txt;"
+ # "ceph osd tree > /root/\$(hostname -f)/dump_ceph_osd_tree.txt;"
+
+ # "for ns in \$(ip netns list);"
+ # " do echo Namespace: \${ns}; ip netns exec \${ns} ip a;"
+ # "done > /root/\$(hostname -f)/dump_ip_a_ns.txt;"
+
+ # "for ns in \$(ip netns list);"
+ # " do echo Namespace: \${ns}; ip netns exec \${ns} ip r;"
+ # "done > /root/\$(hostname -f)/dump_ip_r_ns.txt;"
+
+ # "for ns in \$(ip netns list);"
+ # " do echo Namespace: \${ns}; ip netns exec \${ns} netstat -anp;"
+ # "done > /root/\$(hostname -f)/dump_netstat_ns.txt;"
+
+ "/usr/bin/haproxy-status.sh > "
+ " /root/\$(hostname -f)/dump_haproxy.txt;"
+
+ # Archive the files
+ "cd /root/; tar --absolute-names --warning=no-file-changed "
+ " -czf \$(hostname -f).tar.gz ./\$(hostname -f)/;"
+ )
+
+ master_host = self.__config.salt.salt_master_host
+ with self.remote(host=master_host) as master:
+ # dump files
+ LOG.info("Archive artifacts on all nodes")
+ master.check_call('salt "*" cmd.run "{0}"'.format(dump_commands),
+ raise_on_err=False)
+
+ # create target dir for archives
+ master.check_call("mkdir /root/dump/")
+
+ # get archived artifacts to the master node
+ for node in self.config_ssh:
+ LOG.info("Getting archived artifacts from the node {0}"
.format(node['node_name']))
- r.check_call((
- "salt '{n}*' cmd.run "
- "'tar "
- "--absolute-names "
- "--warning=no-file-changed "
- "-czf {t} {d}'".format(
- n=node['node_name'],
- t='{0}.tar.gz'.format(node['node_name']),
- d='/var/log')),
- raise_on_err=False)
+ master.check_call("rsync -aruv {0}:/root/*.tar.gz "
+ "/root/dump/".format(node['node_name']),
+ raise_on_err=False)
- LOG.info("Copying logs from {0} to {1}"
- .format(node['node_name'], master_node['node_name']))
- packages_minion_cmd = ("salt '{0}*' cmd.run "
- "'dpkg -l' > /var/log/"
- "{0}_packages.output".format(
- node['node_name']))
- r.check_call(packages_minion_cmd)
- r.check_call("rsync {0}:/root/*.tar.gz "
- "/var/log/".format(node['node_name']),
- raise_on_err=False)
+ destination_name = '/root/{0}_dump.tar.gz'.format(artifact_name)
+ # Archive the artifacts from all nodes
+ master.check_call(
+ 'cd /root/dump/;'
+ 'tar --absolute-names --warning=no-file-changed -czf '
+ ' {0} ./'.format(destination_name))
- r.check_call(cmd)
- r.check_call(tar_cmd)
-
- destination_name = '{0}_log.tar.gz'.format(artifact_name)
+ # Download the artifact to the host
LOG.info("Downloading the artifact {0}".format(destination_name))
- r.download(destination=destination_name, target=os.getcwd())
+ master.download(destination=destination_name, target=os.getcwd())
def delayed_call(
self, cmd,
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index b06b805..a5cda3b 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,5 +1,6 @@
# git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre] # Use this requirement for PostgreSQL
-git+git://github.com/openstack/fuel-devops.git@c39a2a9 # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
+libvirt-python>=3.5.0, !=4.1.0 # LGPLv2+
+git+git://github.com/openstack/fuel-devops.git@cce44f4 # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
git+git://github.com/dis-xcom/fuel-devops-driver-ironic
paramiko
six
@@ -15,7 +16,7 @@
testrail
functools32
python-k8sclient==0.4.0
-salt-pepper
+salt-pepper<=0.5.3
setuptools<=36.2.0
netaddr
mock>=1.2
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index ad4cce3..bcefb83 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -42,6 +42,10 @@
__name__,
'templates/{0}/oss.yaml'.format(
settings.LAB_CONFIG_NAME))
+_default_drivetrain_steps = pkg_resources.resource_filename(
+ __name__,
+ 'templates/{0}/drivetrain.yaml'.format(
+ settings.LAB_CONFIG_NAME))
_default_decapod_steps = pkg_resources.resource_filename(
__name__,
'templates/{0}/decapod.yaml'.format(
@@ -129,6 +133,11 @@
ct.Cfg('templates_dir', ct.String(),
help="Path to directory with templates",
default=_default_templates_dir),
+ ct.Cfg('salt_roles', ct.JSONList(),
+ help="Node roles to install salt-minions and manage by salt",
+ default=[ext.UNDERLAY_NODE_ROLES.salt_master,
+ ext.UNDERLAY_NODE_ROLES.salt_minion,
+ ext.UNDERLAY_NODE_ROLES.k8s_controller]),
]
salt_opts = [
ct.Cfg('salt_master_host', ct.IPAddress(),
@@ -159,6 +168,17 @@
help="", default=False),
]
+drivetrain_deploy_opts = [
+ ct.Cfg('drivetrain_steps_path', ct.String(),
+ help="Path to YAML with steps to deploy Drivetrain",
+ default=_default_drivetrain_steps),
+]
+
+drivetrain_opts = [
+ ct.Cfg('drivetrain_installed', ct.Boolean(),
+ help="", default=False),
+]
+
decapod_deploy_opts = [
ct.Cfg('decapod_steps_path', ct.String(),
help="Path to YAML with steps to deploy Ceph with Decapod",
@@ -332,6 +352,8 @@
('common_services', common_services_opts),
('oss_deploy', oss_deploy_opts),
('oss', oss_opts),
+ ('drivetrain_deploy', drivetrain_deploy_opts),
+ ('drivetrain', drivetrain_opts),
('decapod_deploy', decapod_deploy_opts),
('decapod', decapod_opts),
('openstack_deploy', openstack_deploy_opts),
@@ -383,6 +405,15 @@
config.register_opts(group='oss_deploy',
opts=oss_deploy_opts)
+ config.register_group(cfg.OptGroup(name='drivetrain',
+ title="Drivetrain Tools", help=""))
+ config.register_opts(group='drivetrain', opts=drivetrain_opts)
+
+ config.register_group(cfg.OptGroup(name='drivetrain_deploy',
+ title="Drivetrain deploy config", help=""))
+ config.register_opts(group='drivetrain_deploy',
+ opts=drivetrain_deploy_opts)
+
config.register_group(cfg.OptGroup(name='decapod',
title="Decapod options for Ceph", help=""))
config.register_opts(group='decapod', opts=decapod_opts)
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
index abf1040..4e3bb65 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
@@ -75,7 +75,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 342e4c8..f64d8ca 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -280,6 +280,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: salt "gtw01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 74a4666..392798b 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -18,8 +18,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.0/26
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: obutenko@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -109,6 +109,35 @@
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_master_address: 10.167.4.2
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.2
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
index b875e47..7435fc8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
@@ -93,6 +93,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -102,6 +103,7 @@
reclass_storage_name: openstack_proxy_node02
roles:
- openstack_proxy
+ - features_designate_proxy
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
index ed3636c..3c4ce05 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
@@ -1,22 +1,6 @@
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install docker swarm.
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on StackLight mon nodes
- cmd: |
- SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on StackLight mon nodes
+ cmd: |
+ SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
+ echo "_param:cluster_vip_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
index 6026c30..12016f5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -52,7 +52,6 @@
# - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
# - which wget >/dev/null || (apt-get update; apt-get install -y wget);
# # Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
index a0b1c88..ca4b062 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
@@ -52,7 +52,6 @@
#- sleep 120
# - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
# - which wget >/dev/null || (apt-get update; apt-get install -y wget)
# # Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
index 49f28dd..7985929 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
@@ -52,7 +52,6 @@
#- sleep 120
# - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
# - which wget >/dev/null || (apt-get update; apt-get install -y wget)
# # Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index a1d6090..ab28e9b 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -113,7 +113,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
index 5a4fe25..2a08d8a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/common-services.yaml
@@ -106,7 +106,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index 0de3fd7..992ef9f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -338,6 +338,11 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: salt "ctl01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
#- description: create rc file on cfg
# cmd: scp ctl01:/root/keystonercv3 /root
# node_name: {{ HOSTNAME_CFG01 }}
@@ -353,7 +358,6 @@
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf;"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
index 265d897..d1da7cc 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
@@ -17,8 +17,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
infra_bond_mode: active-backup
infra_deploy_nic: eth0
@@ -112,14 +112,43 @@
openstack_proxy_node02_hostname: prx02
openstack_version: ocata
oss_enabled: 'False'
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_master_address: 10.167.8.66
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
index c91448f..4994602 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -17,8 +17,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
infra_bond_mode: active-backup
infra_deploy_nic: eth0
@@ -114,14 +114,43 @@
openstack_proxy_node02_hostname: prx02
openstack_version: ocata
oss_enabled: 'False'
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_master_address: 10.167.8.66
salt_master_hostname: cfg01
salt_master_management_address: 172.16.49.66
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index 38888ec..1db0984 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -168,7 +168,6 @@
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf;"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 8d131b7..37c8e0c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -1,22 +1,6 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install docker swarm.
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on StackLight mon nodes
- cmd: |
- SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on StackLight mon nodes
+ cmd: |
+ SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
+ echo "_param:cluster_vip_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
index 2bb48f0..6c9e48f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
@@ -52,7 +52,6 @@
# - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
# - which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
index 07a6936..106c3d5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
@@ -52,7 +52,6 @@
#- sleep 120
# - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
# - which wget >/dev/null || (apt-get update; apt-get install -y wget)
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
index e8e6345..915981e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
@@ -52,7 +52,6 @@
#- sleep 120
# - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
# - which wget >/dev/null || (apt-get update; apt-get install -y wget)
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
index 2d0a783..7b3d93c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
@@ -126,7 +126,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
index 3a2ada7..fa2ffb2 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
@@ -55,8 +55,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -152,6 +152,35 @@
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_api_password: KEfAXxIWJykUBv0v8uKWdI2v4cBG5A07
salt_api_password_hash: $6$XBCrfheG$2q48l7h1giiqF2sdp7CFtLQQi8pcMa6K5A8cPYQmhuGqJtzv08YXVqYyhkHARzl1VBLVf.aTMY6d0M5naM5WU0
salt_master_address: 172.16.49.66
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
index 225c6f0..da9c583 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
@@ -55,8 +55,8 @@
deploy_network_netmask: 255.255.255.192
deploy_network_subnet: 172.16.49.64/26
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: sgudz@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -159,8 +159,8 @@
oss_node01_address: ${_param:stacklight_monitor_node01_address}
oss_node02_address: ${_param:stacklight_monitor_node02_address}
oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_email_from: sgudz@mirantis.com
- oss_notification_email_recipients: sgudz@mirantis.com
+ oss_webhook_from: sgudz@mirantis.com
+ oss_webhook_recipients: sgudz@mirantis.com
oss_openstack_auth_url: http://172.17.16.190:5000/v3
oss_openstack_cert: ' -----BEGIN CERTIFICATE----- MIIE0DCCA7igAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT AAGjggEaMIIBFjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
@@ -220,6 +220,35 @@
rundeck_iframe_host: ${_param:openstack_proxy_address}
rundeck_iframe_port: ${_param:haproxy_rundeck_exposed_port}
rundeck_iframe_ssl: 'False'
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_api_password: qFc7FCrLBnWkeLxRWWQeezH5dmuI5vsI
salt_api_password_hash: $6$hbWiURAY$Sfzk6dzos6j1B8gFDK6WoGNDk0I2Bd2IOarWDGOflgY2sBpUJ4KTq1Uw241ri933/ROHTSuhNcodmDe13i5gS.
salt_master_address: 172.16.49.66
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
index c6314ad..8faced7 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -46,39 +44,7 @@
- mkswap /swapfile
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
index c3ffe17..1837e32 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -38,7 +36,6 @@
# Prepare network connection
- sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
# Create swap
- fallocate -l 4G /swapfile
@@ -46,43 +43,7 @@
- mkswap /swapfile
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
- ########################################################
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
index bff4542..90c6227 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
@@ -135,7 +135,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
index 7c247b7..9929c03 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/common-services.yaml
@@ -82,7 +82,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
index 8c214e3..43483ae 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
@@ -319,6 +319,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: salt "gtw01*" cmd.run 'iptables --policy FORWARD ACCEPT'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
index f26d7ee..f64b373 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
@@ -2,29 +2,6 @@
# Install OSS: Operational Support System Tools
-# Keepalived
-#-----------
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster:enabled:True' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@haproxy:proxy:enabled:True' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the CICD VIP
- cmd: |
- CICD_CONTROL_ADDRESS=`salt --out=newline_values_only -C 'I@haproxy:proxy and I@jenkins:client' pillar.get _param:cluster_vip_address`;
- echo "_param:cluster_vip_address (vip): ${CICD_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C "I@keepalived:cluster:instance:*:address:${CICD_CONTROL_ADDRESS}" cmd.run "ip a | grep ${CICD_CONTROL_ADDRESS}" | grep -B1 ${CICD_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
# Glusterfs
#-----------
@@ -110,6 +87,29 @@
retry: {count: 1, delay: 5}
skip_fail: false
+# Keepalived
+#-----------
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster:enabled:True' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@haproxy:proxy:enabled:True' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the CICD VIP
+ cmd: |
+ CICD_CONTROL_ADDRESS=`salt --out=newline_values_only -C 'I@haproxy:proxy and I@jenkins:client' pillar.get _param:cluster_vip_address`;
+ echo "_param:cluster_vip_address (vip): ${CICD_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C "I@keepalived:cluster:instance:*:address:${CICD_CONTROL_ADDRESS}" cmd.run "ip a | grep ${CICD_CONTROL_ADDRESS}" | grep -B1 ${CICD_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Configure OSS services
#-----------------------
@@ -182,7 +182,7 @@
'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
while true; do curl -sf ldap://${CICD_CONTROL_ADDRESS} && break; sleep 2; done'
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
+ retry: {count: 10, delay: 30}
skip_fail: false
- description: "Setup OpenLDAP"
@@ -200,7 +200,7 @@
'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8080/config/server/version && break; sleep 2; done'
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
+ retry: {count: 10, delay: 30}
skip_fail: false
- description: "Setup Gerrit"
@@ -222,7 +222,7 @@
sleep 2;
done'
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
+ retry: {count: 10, delay: 30}
skip_fail: false
- description: "Setup Jenkins"
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
index 8022d47..91c0506 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
@@ -47,8 +47,8 @@
control_vlan: '10'
deploy_network_gateway: ''
deploy_network_netmask: 255.255.255.0
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.176.6
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
infra_deploy_nic: eth0
@@ -127,6 +127,35 @@
openstack_version: ocata
public_host: ${_param:openstack_proxy_address}
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_master_address: 10.167.4.15
salt_master_hostname: cfg01
salt_master_management_address: 10.167.5.15
@@ -157,6 +186,7 @@
stacklight_telemetry_node02_hostname: ${_param:stacklight_monitor_node02_hostname}
stacklight_telemetry_node03_address: ${_param:stacklight_monitor_node03_address}
stacklight_telemetry_node03_hostname: ${_param:stacklight_monitor_node03_hostname}
+ stacklight_long_term_storage_type: influxdb
tenant_network_gateway: ''
tenant_network_netmask: 255.255.255.0
tenant_vlan: '20'
@@ -232,15 +262,14 @@
oss_runbook_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGBE7Iz5GcmIOFL+p9drLENsaYyogZaxY+dzVF0xwn3GUEzhpuPiI0HvCaPX2gxEFcq+u8yxueE4nVnW1YfWPCbetnvOImUNSjUyz/AA9dXrv5rK4sJKnokZPNrGj/hhN/NS++cIm88q4h/4GPETr9HN3OunpvgVi685+TE3+krgd+utHAmPJAIJ5casNyre8q8Fpn/X0w3km2usJHHgvjZQXLWQCQ0IJGKv5iwr62IMzdN+iv3k81d0zJ4rMwRzznwP1fEr5TI0WMas7aFmbp+ZGOBYy42doadWi2ZPXPWkH3V3fmrufrR3dDspGfqn7FK2vAHusH34myLcvqr5UR
# Experimental notification parameters
- oss_notification_email_recipients: 'ddmitriev@mirantis.com'
- oss_notification_saleforce_username: 'sfdc-devops-portal-test-customer-user@mirantis.com'
- oss_notification_smtp_host: '127.0.0.1'
- oss_notification_smtp_port: '25'
- oss_notification_email_from: 'integration-ci@mirantis.com'
- oss_notification_sender_password: 'integration-password'
+ oss_pushkin_smtp_host: '127.0.0.1'
+ oss_pushkin_smtp_port: '25'
+ oss_pushkin_email_sender_password: 'integration-password'
+ oss_webhook_from: 'integration-ci@mirantis.com'
+ oss_webhook_recipients: 'ddmitriev@mirantis.com'
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
- oss_notification_webhook_login_id: '13'
- oss_notification_webhook_app_id: '24'
oss_cis_enabled: 'True'
oss_cis_jobs_repository: https://github.com/Mirantis/rundeck-cis-jobs.git
oss_cis_jobs_repository_branch: master
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
index 648984a..a17173d 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-environment.yaml
@@ -96,6 +96,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -107,6 +108,7 @@
reclass_storage_name: openstack_proxy_node02
roles:
- openstack_proxy
+ - features_designate_proxy
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
index 962035c..03e3153 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
@@ -2,22 +2,6 @@
{% from 'cookied-mcp-ocata-dop-sl2/salt.yaml' import ENVIRONMENT_MODEL_INVENTORY_NAME with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on StackLight mon nodes
- cmd: |
- SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -66,7 +50,36 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
+#Launch containers
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
@@ -82,12 +95,6 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: Install elasticsearch server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
@@ -123,6 +130,45 @@
retry: {count: 1, delay: 5}
skip_fail: true
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+ cmd: |
+ PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+ if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+ cmd: |
+ FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+ if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ else
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+ cmd: |
+ CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Ceilometer service presence: ${CEILO}";
+ if [[ "$CEILO" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
# Collect grains needed to configure the services
- description: Get grains
@@ -140,28 +186,47 @@
- description: Update mine
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 5, delay: 15}
skip_fail: false
# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
+- description: Configure prometheus in docker swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+- description: Install sphinx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
+#- description: Install prometheus alertmanager
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: run docker state
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+#
+#- description: docker ps
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
- description: Configure Grafana dashboards and datasources
cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
index 126a441..fbbda4b 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
@@ -234,7 +234,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 2) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 4096) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -641,8 +641,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: {{ os_env('GTW_NODE_CPU', 1) }}
- memory: {{ os_env('GTW_NODE_MEMORY', 2048) }}
+ vcpu: {{ os_env('GTW_NODE_CPU', 4) }}
+ memory: {{ os_env('GTW_NODE_MEMORY', 4096) }}
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml
index 74d3c68..6b26974 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/common-services.yaml
@@ -82,7 +82,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
index 3882976..a4b52a5 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
@@ -361,6 +361,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 315f8cc..fd84b59 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -20,8 +20,8 @@
control_vlan: '10'
deploy_network_gateway: ''
deploy_network_netmask: 255.255.255.0
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
infra_deploy_nic: eth0
@@ -101,11 +101,42 @@
openstack_version: ocata
public_host: ${_param:openstack_proxy_address}
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_master_address: 10.167.4.15
salt_master_hostname: cfg01
salt_master_management_address: 10.167.5.15
stacklight_enabled: 'True'
+ fluentd_enabled: 'True'
stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
stacklight_log_address: 10.167.4.60
stacklight_log_hostname: log
stacklight_log_node01_address: 10.167.4.61
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
index 9de891d..e60baf8 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-environment.yaml
@@ -186,6 +186,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -197,6 +198,7 @@
reclass_storage_name: openstack_proxy_node02
roles:
- openstack_proxy
+ - features_designate_proxy
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
index dd64b90..0a90afa 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
@@ -1,22 +1,6 @@
{% from 'cookied-mcp-ocata-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,7 +49,36 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
+#Launch containers
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
@@ -81,12 +94,6 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure collector
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: Install elasticsearch server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
@@ -122,6 +129,45 @@
retry: {count: 1, delay: 5}
skip_fail: true
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+ cmd: |
+ PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+ if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+ cmd: |
+ FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+ if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ else
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+ cmd: |
+ CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Ceilometer service presence: ${CEILO}";
+ if [[ "$CEILO" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
# Collect grains needed to configure the services
- description: Get grains
@@ -139,28 +185,47 @@
- description: Update mine
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 5, delay: 15}
skip_fail: false
# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+- description: Configure prometheus in docker swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+- description: Install sphinx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
+#- description: Install prometheus alertmanager
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: run docker state
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+#
+#- description: docker ps
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
- description: Configure Grafana dashboards and datasources
cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
@@ -171,5 +236,4 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
- skip_fail: false
-
+ skip_fail: false
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
index 8c4c848..cffa424 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
@@ -350,7 +350,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
new file mode 100644
index 0000000..04390ff
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -0,0 +1,156 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: cookied-mcp-pike-dpdk.local
+ cluster_name: cookied-mcp-pike-dpdk
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: ${_param:openstack_control_node01_address}
+# infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: ${_param:openstack_control_node01_hostname}
+ infra_kvm02_control_address: ${_param:openstack_control_node02_address}
+# infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: ${_param:openstack_control_node02_hostname}
+ infra_kvm03_control_address: ${_param:openstack_control_node03_address}
+# infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: ${_param:openstack_control_node03_hostname}
+ infra_kvm_vip_address: ${_param:openstack_control_address}
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.4.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.4.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.4.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 10.167.4.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.4.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.4.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.4.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'True'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_hugepages_count: '600'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_cpu_pinning: '3'
+ openstack_ovs_dvr_enabled: 'False'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_branch: master
+ shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ stacklight_enabled: 'False'
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
similarity index 64%
copy from tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
index 0127547..913636b 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.cookied-mcp-pike-dpdk.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,16 +10,11 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.cookied-mcp-pike-dpdk.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- - features_designate_pool_manager_keystone
- linux_system_codename_xenial
interfaces:
ens3:
@@ -27,15 +22,11 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.cookied-mcp-pike-dpdk.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- linux_system_codename_xenial
interfaces:
ens3:
@@ -43,15 +34,11 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.cookied-mcp-pike-dpdk.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_pool_manager_database
- - features_designate_pool_manager
- linux_system_codename_xenial
interfaces:
ens3:
@@ -59,7 +46,73 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ dbs01.cookied-mcp-pike-dpdk.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs02.cookied-mcp-pike-dpdk.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs03.cookied-mcp-pike-dpdk.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg01.cookied-mcp-pike-dpdk.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg02.cookied-mcp-pike-dpdk.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg03.cookied-mcp-pike-dpdk.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.cookied-mcp-pike-dpdk.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
@@ -70,47 +123,8 @@
ens4:
role: single_ctl
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.cookied-mcp-pike-dpdk.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
@@ -121,11 +135,15 @@
ens4:
role: single_ctl
ens5:
- role: bond0_ab_ovs_vxlan_mesh
+ role: bond2_dpdk_prv
+ dpdk_pci: "00:05.0"
ens6:
+ role: bond2_dpdk_prv
+ dpdk_pci: "00:06.0"
+ ens7:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.cookied-mcp-pike-dpdk.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -136,30 +154,7 @@
ens4:
role: single_ctl
ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
+ role: single_ovs_br_prv
+ mtu: 1500
+ ens7:
role: bond1_ab_ovs_floating
-
- dns01.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node01
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node01_address}
-
- dns02.mcp11-ovs-dpdk.local:
- reclass_storage_name: openstack_dns_node02
- roles:
- - features_designate_pool_manager_dns
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/common-services.yaml
new file mode 100644
index 0000000..9264ed5
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
new file mode 100644
index 0000000..0311e79
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/openstack.yaml
@@ -0,0 +1,277 @@
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
new file mode 100644
index 0000000..d424970
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/salt.yaml
@@ -0,0 +1,51 @@
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: Enable hugepages on cmp nodes
+ cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+ cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
new file mode 100644
index 0000000..1097d70
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
@@ -0,0 +1,634 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-pike-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dpdk') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_DBS }}: +50
+ default_{{ HOSTNAME_DBS01 }}: +51
+ default_{{ HOSTNAME_DBS02 }}: +52
+ default_{{ HOSTNAME_DBS03 }}: +53
+ default_{{ HOSTNAME_MSG }}: +40
+ default_{{ HOSTNAME_MSG01 }}: +41
+ default_{{ HOSTNAME_MSG02 }}: +42
+ default_{{ HOSTNAME_MSG03 }}: +43
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_DBS }}: +50
+ default_{{ HOSTNAME_DBS01 }}: +51
+ default_{{ HOSTNAME_DBS02 }}: +52
+ default_{{ HOSTNAME_DBS03 }}: +53
+ default_{{ HOSTNAME_MSG }}: +40
+ default_{{ HOSTNAME_MSG01 }}: +41
+ default_{{ HOSTNAME_MSG02 }}: +42
+ default_{{ HOSTNAME_MSG03 }}: +43
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_DBS }}: +50
+ default_{{ HOSTNAME_DBS01 }}: +51
+ default_{{ HOSTNAME_DBS02 }}: +52
+ default_{{ HOSTNAME_DBS03 }}: +53
+ default_{{ HOSTNAME_MSG }}: +40
+ default_{{ HOSTNAME_MSG01 }}: +41
+ default_{{ HOSTNAME_MSG02 }}: +42
+ default_{{ HOSTNAME_MSG03 }}: +43
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_DBS }}: +50
+ default_{{ HOSTNAME_DBS01 }}: +51
+ default_{{ HOSTNAME_DBS02 }}: +52
+ default_{{ HOSTNAME_DBS03 }}: +53
+ default_{{ HOSTNAME_MSG }}: +40
+ default_{{ HOSTNAME_MSG01 }}: +41
+ default_{{ HOSTNAME_MSG02 }}: +42
+ default_{{ HOSTNAME_MSG03 }}: +43
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: True
+ use_hugepages: True
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_DBS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_DBS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_DBS03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MSG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MSG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MSG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 12
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ numa:
+ - cpus: 0,1,2,3,4,5
+ memory: 4096
+ - cpus: 6,7,8,9,10,11
+ memory: 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: e1000
+ - label: ens6
+ l2_network_device: tenant
+ interface_model: e1000
+ - label: ens7
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - tenant
+ ens7:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 12
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ numa:
+ - cpus: 0,1,2,3,4,5
+ memory: 4096
+ - cpus: 6,7,8,9,10,11
+ memory: 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
similarity index 70%
copy from tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
index e2ba165..bac6199 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
@@ -2,8 +2,8 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-pike-dvr.local
- cluster_name: virtual-mcp-pike-dvr
+ cluster_domain: cookied-mcp-pike-dvr-ssl.local
+ cluster_name: cookied-mcp-pike-dvr-ssl
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -18,8 +18,8 @@
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: ddmitriev@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -103,14 +103,43 @@
openstack_version: pike
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
similarity index 79%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
index dcb1e40..6edac6e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.virtual-mcp-pike-dvr-ssl.local:
+ cfg01.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,7 +10,7 @@
ens4:
role: single_vlan_ctl
- ctl01.virtual-mcp-pike-dvr-ssl.local:
+ ctl01.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
@@ -27,7 +27,7 @@
ens4:
role: single_vlan_ctl
- ctl02.virtual-mcp-pike-dvr-ssl.local:
+ ctl02.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
@@ -43,7 +43,7 @@
ens4:
role: single_vlan_ctl
- ctl03.virtual-mcp-pike-dvr-ssl.local:
+ ctl03.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
@@ -59,10 +59,11 @@
ens4:
role: single_vlan_ctl
- prx01.virtual-mcp-pike-dvr-ssl.local:
+ prx01.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -70,7 +71,7 @@
ens4:
role: single_vlan_ctl
- mon01.virtual-mcp-pike-dvr-ssl.local:
+ mon01.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
@@ -83,7 +84,7 @@
ens4:
role: single_vlan_ctl
- mon02.virtual-mcp-pike-dvr-ssl.local:
+ mon02.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
@@ -96,7 +97,7 @@
ens4:
role: single_vlan_ctl
- mon03.virtual-mcp-pike-dvr-ssl.local:
+ mon03.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
@@ -110,7 +111,7 @@
role: single_vlan_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.virtual-mcp-pike-dvr-ssl.local:
+ cmp<<count>>.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
@@ -125,7 +126,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.virtual-mcp-pike-dvr-ssl.local:
+ gtw01.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -140,11 +141,16 @@
ens6:
role: bond1_ab_ovs_floating
- dns01.virtual-mcp-pike-dvr-ssl.local:
+ dns01.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_dns_node01
roles:
- features_designate_pool_manager_dns
- linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.extra
+ - system.linux.system.repo.mcp.apt_mirantis.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+ - system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
interfaces:
ens3:
role: single_dhcp
@@ -152,11 +158,16 @@
role: single_vlan_ctl
single_address: ${_param:openstack_dns_node01_address}
- dns02.virtual-mcp-pike-dvr-ssl.local:
+ dns02.cookied-mcp-pike-dvr-ssl.local:
reclass_storage_name: openstack_dns_node02
roles:
- features_designate_pool_manager_dns
- linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.extra
+ - system.linux.system.repo.mcp.apt_mirantis.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+ - system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
interfaces:
ens3:
role: single_dhcp
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/common-services.yaml
new file mode 100644
index 0000000..4d79b7d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
new file mode 100644
index 0000000..02cb4c7
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
@@ -0,0 +1,407 @@
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: overrides-policy.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+ cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+ cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+ ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+- description: Nginx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@powerdns:server' state.sls powerdns.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+ nova:
+ controller:
+ policy:
+ context_is_admin: 'role:admin or role:administrator'
+ 'compute:create': 'rule:admin_or_owner'
+ 'compute:create:attach_network':
+ cinder:
+ controller:
+ policy:
+ 'volume:delete': 'rule:admin_or_owner'
+ 'volume:extend':
+ neutron:
+ server:
+ policy:
+ create_subnet: 'rule:admin_or_network_owner'
+ 'get_network:queue_id': 'rule:admin_only'
+ 'create_network:shared':
+ glance:
+ server:
+ policy:
+ publicize_image: "role:admin"
+ add_member:
+ keystone:
+ server:
+ policy:
+ admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+ heat:
+ server:
+ policy:
+ context_is_admin: 'role:admin and is_admin_project:True'
+ deny_stack_user: 'not role:heat_stack_user'
+ deny_everybody: '!'
+ 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+ 'cloudformation:DescribeStackResources':
+ ceilometer:
+ server:
+ policy:
+ segregation: 'rule:context_is_admin'
+ 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
new file mode 100644
index 0000000..38c0742
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/salt.yaml
@@ -0,0 +1,45 @@
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+ cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
rename to tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
index f492e73..3e5f7fb 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/sl.yaml
@@ -1,22 +1,6 @@
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
new file mode 100644
index 0000000..8b6c716
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
@@ -0,0 +1,516 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-pike-dvr-ssl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dvr-ssl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ssl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ssl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
similarity index 60%
copy from tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index e2ba165..a4c8abf 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -2,8 +2,8 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-pike-dvr.local
- cluster_name: virtual-mcp-pike-dvr
+ cluster_domain: cookied-mcp-pike-dvr.local
+ cluster_name: cookied-mcp-pike-dvr
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -18,8 +18,8 @@
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: ddmitriev@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -47,7 +47,7 @@
openstack_benchmark_node01_address: 172.16.10.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
- openstack_compute_count: '100'
+ openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -103,14 +103,43 @@
openstack_version: pike
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
salt_master_address: 172.16.10.90
@@ -118,33 +147,33 @@
salt_master_management_address: 192.168.10.90
shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
+ fluentd_enabled: 'True'
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
stacklight_monitor_address: 172.16.10.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_address: 172.16.10.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_address: 172.16.10.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_address: 172.16.10.73
stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ stacklight_long_term_storage_type: prometheus
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
similarity index 68%
copy from tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
index 0127547..caec0fa 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
@@ -63,6 +63,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -74,8 +75,6 @@
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- linux_system_codename_xenial
interfaces:
ens3:
@@ -87,8 +86,6 @@
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- linux_system_codename_xenial
interfaces:
ens3:
@@ -100,7 +97,27 @@
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
- - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
- stacklight_log
- linux_system_codename_xenial
interfaces:
@@ -109,6 +126,50 @@
ens4:
role: single_ctl
+ log03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
# Generator-based computes. For compatibility only
cmp<<count>>.mcp11-ovs-dpdk.local:
reclass_storage_name: openstack_compute_rack01
@@ -145,6 +206,11 @@
roles:
- features_designate_pool_manager_dns
- linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.extra
+ - system.linux.system.repo.mcp.apt_mirantis.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+ - system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
interfaces:
ens3:
role: single_dhcp
@@ -157,6 +223,11 @@
roles:
- features_designate_pool_manager_dns
- linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.extra
+ - system.linux.system.repo.mcp.apt_mirantis.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+ - system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
interfaces:
ens3:
role: single_dhcp
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/common-services.yaml
new file mode 100644
index 0000000..31a6257
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
new file mode 100644
index 0000000..3509982
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -0,0 +1,401 @@
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: overrides-policy.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+ cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+ cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+ ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@powerdns:server' state.sls powerdns.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+ nova:
+ controller:
+ policy:
+ context_is_admin: 'role:admin or role:administrator'
+ 'compute:create': 'rule:admin_or_owner'
+ 'compute:create:attach_network':
+ cinder:
+ controller:
+ policy:
+ 'volume:delete': 'rule:admin_or_owner'
+ 'volume:extend':
+ neutron:
+ server:
+ policy:
+ create_subnet: 'rule:admin_or_network_owner'
+ 'get_network:queue_id': 'rule:admin_only'
+ 'create_network:shared':
+ glance:
+ server:
+ policy:
+ publicize_image: "role:admin"
+ add_member:
+ keystone:
+ server:
+ policy:
+ admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+ heat:
+ server:
+ policy:
+ context_is_admin: 'role:admin and is_admin_project:True'
+ deny_stack_user: 'not role:heat_stack_user'
+ deny_everybody: '!'
+ 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+ 'cloudformation:DescribeStackResources':
+ ceilometer:
+ server:
+ policy:
+ segregation: 'rule:context_is_admin'
+ 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
new file mode 100644
index 0000000..d1dfb9e
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/salt.yaml
@@ -0,0 +1,47 @@
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+ cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
similarity index 66%
copy from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
copy to tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
index f492e73..b75dfe9 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/sl.yaml
@@ -1,22 +1,6 @@
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,7 +49,36 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
+#Launch containers
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
@@ -81,12 +94,6 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: Install elasticsearch server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
@@ -122,6 +129,45 @@
retry: {count: 1, delay: 5}
skip_fail: true
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+ cmd: |
+ PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+ if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+ cmd: |
+ FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+ if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ else
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+ cmd: |
+ CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Ceilometer service presence: ${CEILO}";
+ if [[ "$CEILO" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
# Collect grains needed to configure the services
- description: Get grains
@@ -143,24 +189,43 @@
skip_fail: false
# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+- description: Configure prometheus in docker swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+- description: Install sphinx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
+#- description: Install prometheus alertmanager
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: run docker state
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+#
+#- description: docker ps
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
- description: Configure Grafana dashboards and datasources
cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
new file mode 100644
index 0000000..9aeabca
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
@@ -0,0 +1,764 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-pike-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
similarity index 60%
rename from tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
rename to tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
index 8b6e51a..cd29897 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -2,8 +2,8 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-pike-ovs.local
- cluster_name: virtual-mcp-pike-ovs
+ cluster_domain: cookied-mcp-pike-ovs.local
+ cluster_name: cookied-mcp-pike-ovs
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -18,8 +18,8 @@
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: ddmitriev@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -47,7 +47,7 @@
openstack_benchmark_node01_address: 172.16.10.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
- openstack_compute_count: '100'
+ openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -103,48 +103,77 @@
openstack_version: pike
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
salt_master_address: 172.16.10.90
salt_master_hostname: cfg01
salt_master_management_address: 192.168.10.90
shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ fluentd_enabled: 'True'
stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
stacklight_monitor_address: 172.16.10.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_address: 172.16.10.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_address: 172.16.10.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_address: 172.16.10.73
stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.1.0.1
tenant_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
similarity index 71%
rename from tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
rename to tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
index 63cedf1..8ac0a05 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
@@ -65,6 +65,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -76,8 +77,6 @@
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- linux_system_codename_xenial
interfaces:
ens3:
@@ -89,8 +88,6 @@
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- linux_system_codename_xenial
interfaces:
ens3:
@@ -102,7 +99,27 @@
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
- - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
- stacklight_log
- linux_system_codename_xenial
interfaces:
@@ -111,6 +128,50 @@
ens4:
role: single_ctl
+ log03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
# Generator-based computes. For compatibility only
cmp<<count>>.mcp11-ovs-dpdk.local:
reclass_storage_name: openstack_compute_rack01
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/common-services.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/common-services.yaml
new file mode 100644
index 0000000..89aaf5c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
new file mode 100644
index 0000000..76eb198
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
@@ -0,0 +1,380 @@
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# isntall designate
+- description: Install bind
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@bind:server' state.sls bind
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
new file mode 100644
index 0000000..9a39b90
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
@@ -0,0 +1,47 @@
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+ cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
similarity index 66%
copy from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
copy to tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
index f492e73..e237aa3 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/sl.yaml
@@ -1,22 +1,8 @@
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,7 +51,36 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
+#Launch containers
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
@@ -81,12 +96,6 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
- description: Install elasticsearch server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
@@ -122,6 +131,45 @@
retry: {count: 1, delay: 5}
skip_fail: true
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+ cmd: |
+ PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+ if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+ cmd: |
+ FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+ if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ else
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+ cmd: |
+ CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Ceilometer service presence: ${CEILO}";
+ if [[ "$CEILO" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
# Collect grains needed to configure the services
- description: Get grains
@@ -143,24 +191,43 @@
skip_fail: false
# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+- description: Configure prometheus in docker swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+- description: Install sphinx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
+#- description: Install prometheus alertmanager
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: run docker state
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+#
+#- description: docker ps
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
- description: Configure Grafana dashboards and datasources
cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
new file mode 100644
index 0000000..fea38c9
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
@@ -0,0 +1,701 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-pike-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-pike-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-ovs') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
new file mode 100644
index 0000000..cc7603c
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
@@ -0,0 +1,69 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-mitaka-dvr' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-mitaka-dvr.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
+{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") %}
+{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
+{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
+{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+ cmd: |
+ set -e;
+ apt-get install -y salt-formula-*
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # Workaround of missing reclass.system for dns role
+ salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
index bb17c15..2e3cfde 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -26,6 +26,9 @@
sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
. /root/venv-reclass-tools/bin/activate;
reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
index 6b85fea..34857bc 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -26,6 +26,9 @@
sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
. /root/venv-reclass-tools/bin/activate;
reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
new file mode 100644
index 0000000..72cad63
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
@@ -0,0 +1,50 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dpdk' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-pike-ovs-dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
+ parameters:
+ neutron:
+ compute:
+ bridge_mappings:
+ physnet2: br-prv
+ EOF
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index c823df7..174fce8 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -1,10 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'virtual-mcp-pike-dvr' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
@@ -13,6 +10,15 @@
{% import 'shared-salt.yaml' as SHARED with context %}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+ cmd: |
+ set -e;
+ apt-get install -y salt-formula-*
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
@@ -23,8 +29,11 @@
sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
. /root/venv-reclass-tools/bin/activate;
reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
@@ -36,7 +45,7 @@
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index f117bd8..3cab167 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -1,10 +1,7 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% set LAB_CONFIG_NAME = 'virtual-mcp-pike-ovs' %}
+{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-ovs' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
@@ -23,8 +20,11 @@
sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
. /root/venv-reclass-tools/bin/activate;
reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
@@ -36,7 +36,7 @@
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
# Bind9 services are placed on the first two ctl nodes
salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
index c871146..ba4ee4e 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
@@ -23,13 +23,14 @@
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
+
output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
runcmd:
# Prepare network connection
- sudo ifdown ens3
- - sudo ifup ens3
+ #- sudo ifup ens3
#- sudo route add default gw {gateway} {interface_name}
# Configure dhclient
@@ -50,7 +51,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- apt-get clean
- apt-get update
@@ -69,6 +69,13 @@
- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
########################################################
+ # Purge the unattended-upgrades package (Workaround for PROD-17904, PROD-18736)"
+ - echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
+ - echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
+ - echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
+ - apt-get -y purge unattended-upgrades
+ - reboot
+
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/cookied-model-generator/underlay.yaml b/tcp_tests/templates/cookied-model-generator/underlay.yaml
index e90a8c8..25fb76c 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay.yaml
@@ -57,7 +57,7 @@
role: salt_master
params:
vcpu: {{ os_env('CFG_NODE_CPU', 4) }}
- memory: {{ os_env('CFG_NODE_MEMORY', 4096) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
index 3afdf7f..9908098 100644
--- a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
@@ -48,7 +48,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- apt-get update
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- apt-get install -y ntp
@@ -77,7 +76,8 @@
{%- endif %}
# Enable SNAT to allow internet access for deploying nodes using ironic node as a gateway
- - iptables -t nat -A POSTROUTING -s {{ os_env('IRONIC_DHCP_POOL_START', '10.0.175.100') }}/{{ os_env('IRONIC_DHCP_POOL_NETMASK_PREFIX', '24') }} ! -d {{ os_env('IRONIC_DHCP_POOL_START', '10.0.175.100') }}/{{ os_env('IRONIC_DHCP_POOL_NETMASK_PREFIX', '24') }} -j MASQUERADE
+ - iptables -t nat -I POSTROUTING -s {{ os_env('IRONIC_DHCP_POOL_START', '10.0.175.100') }}/{{ os_env('IRONIC_DHCP_POOL_NETMASK_PREFIX', '24') }} ! -d {{ os_env('IRONIC_DHCP_POOL_START', '10.0.175.100') }}/{{ os_env('IRONIC_DHCP_POOL_NETMASK_PREFIX', '24') }} -j MASQUERADE
+ - iptables -I FORWARD -j ACCEPT
- echo "Building ironic agent image (stable/newton) ..."
- apt-get install -y docker.io gzip uuid-runtime cpio findutils grep gnupg make
diff --git a/tcp_tests/templates/ironic_standalone/underlay.yaml b/tcp_tests/templates/ironic_standalone/underlay.yaml
index 8d30156..4837204 100644
--- a/tcp_tests/templates/ironic_standalone/underlay.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay.yaml
@@ -84,7 +84,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- source_image: !os_env IMAGE_PATH1604
+ source_image: {{ os_env('IRONIC_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
diff --git a/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
index 1a6d136..c002a00 100644
--- a/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
@@ -51,7 +51,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
index af0bf27..6c6ba65 100644
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
@@ -53,7 +53,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
diff --git a/tcp_tests/templates/mcp-local-aptly/underlay--user-data-apt01.yaml b/tcp_tests/templates/mcp-local-aptly/underlay--user-data-apt01.yaml
index f18129f..79443bd 100644
--- a/tcp_tests/templates/mcp-local-aptly/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/mcp-local-aptly/underlay--user-data-apt01.yaml
@@ -38,8 +38,7 @@
export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- echo "supersede domain-name-servers $LOCAL_DNS_IP, 8.8.8.8, 172.18.208.44" >> /etc/dhcp/dhclient.conf
+ echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
export TERM=linux
export LANG=C
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml
index 492cff4..32588b5 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
index 8802687..043b74a 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
@@ -373,6 +373,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
index 4927c4f..b3a2679 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
@@ -1,22 +1,6 @@
{% from 'mcp-ocata-local-repo-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
index fc3d284..bbaec2b 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay--user-data-apt01.yaml
@@ -39,8 +39,7 @@
- export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
- echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $LOCAL_DNS_IP, 8.8.8.8, 172.18.208.44" >> /etc/dhcp/dhclient.conf
+ - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
- export TERM=linux
- export LANG=C
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
index b066287..d2d4778 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
@@ -126,7 +126,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -136,7 +136,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -213,7 +213,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
index ab7da5d..fbf3a06 100644
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
index 3bdd098..e386337 100644
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/underlay--user-data-cfg01.yaml
@@ -48,7 +48,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
index ca5c171..b0568d3 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
@@ -283,7 +283,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml b/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
index b738b3a..cb176c1 100644
--- a/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
+++ b/tcp_tests/templates/physical_mcp11_dvr/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml
index 03d82ea..0c5a3c2 100644
--- a/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/physical_mcp11_dvr/underlay--user-data-cfg01.yaml
@@ -46,7 +46,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
index bbbd2f5..0c30920 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/common-services.yaml
@@ -82,7 +82,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml
index bde813c..c207f25 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml
@@ -48,7 +48,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
index 0dfe7a4..e9a318f 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
@@ -123,7 +123,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 60da5d8..f2feef4 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -62,7 +62,9 @@
echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu.list
echo "{{ UBUNTU_UPDATES_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu_updates.list
echo "{{ UBUNTU_SECURITY_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu_security.list
- eatmydata apt-get clean && apt-get update;
+ eatmydata apt-get clean;
+ apt-get update;
+ sync;
node_name: {{ NODE_NAME }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -93,6 +95,7 @@
eatmydata apt-get install -y --allow-unauthenticated reclass salt-master
# Install common packages
eatmydata apt-get install -y python-pip git curl at tmux byobu iputils-ping traceroute htop tree mc
+ sync;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
@@ -186,15 +189,15 @@
# 10.16.0 -> <external network>
# So let's replace constant networks to the keywords, and then keywords to the desired networks.
export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
- find ${REPLACE_DIRS} -type f -exec sed -i 's/192\.168\.10\./==IPV4_NET_ADMIN_PREFIX==/g' {} +
- find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10\./==IPV4_NET_CONTROL_PREFIX==/g' {} +
- find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0\./==IPV4_NET_TENANT_PREFIX==/g' {} +
- find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0\./==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/192\.168\.10/==IPV4_NET_ADMIN_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10/==IPV4_NET_CONTROL_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0/==IPV4_NET_TENANT_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
- find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}./g' {} +
- find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}./g' {} +
- find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}./g' {} +
- find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}./g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
@@ -289,12 +292,12 @@
sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/172\.17\.16\./==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}./g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
{% set items = CLUSTER_PRODUCT_MODELS or '$(ls /tmp/cookiecutter-templates/cluster_product/)' %}
for item in {{ items }}; do
@@ -444,7 +447,7 @@
[ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
declare -a formula_services=({{ FORMULA_SERVICES }});
echo -e "\nInstalling all required salt formulas\n";
- eatmydata apt-get install -y "${formula_services[@]/#/salt-formula-}";
+ apt-get install -y "${formula_services[@]/#/salt-formula-}";
for formula_service in "${formula_services[@]}"; do
echo -e "\nLink service metadata for formula ${formula_service} ...\n";
[ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
@@ -495,7 +498,15 @@
{%- macro MACRO_INSTALL_SALT_MINIONS() %}
{#######################################}
-{% for ssh in config.underlay.ssh %}
+{%- for ssh in config.underlay.ssh %}
+ {%- set salt_roles = [] %}
+ {%- for role in ssh['roles'] %}
+ {%- if role in config.salt_deploy.salt_roles %}
+ {%- set _ = salt_roles.append(role) %}
+ {%- endif %}
+ {%- endfor %}
+
+ {%- if salt_roles %}
- description: Configure salt-minion on {{ ssh['node_name'] }}
cmd: |
[ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
@@ -522,14 +533,21 @@
eatmydata apt-get install -y salt-minion;
# Install common packages
eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
+ sync
# Restart salt-minion if it was already installed
service salt-minion restart
node_name: {{ ssh['node_name'] }}
retry: {count: 1, delay: 1}
skip_fail: false
-{% endfor %}
+ {%- else %}
+- description: Check SSH connectivity to non-salt-minion node {{ ssh['node_name'] }}
+ cmd: echo "SSH to $(hostname -f) passed"
+ node_name: {{ ssh['node_name'] }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+ {%- endif %}
+{%- endfor %}
- description: Accept salt keys from all the nodes
cmd: salt-key -A -y
@@ -649,6 +667,15 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Validate pillar on salt master node
+ cmd: |
+ set -e
+ if salt-call sys.doc reclass.validate_node_params | grep -q reclass.validate_node_params ; then salt-call reclass.validate_nodes_params ; fi
+ if salt-call sys.doc reclass.validate_pillar | grep -q reclass.validate_pillar ; then salt-call reclass.validate_pillar ; fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' state.sls reclass
@@ -720,11 +747,31 @@
- description: Configure salt.minion on other nodes
cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls salt.minion &&
- sleep 10
+ sleep 30
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 15}
skip_fail: false
+- description: Wait for salt-minions wake up after restart
+ cmd: salt --timeout=30 --hard-crash --state-output=mixed --state-verbose=False '*' test.ping
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 25, delay: 30}
+ skip_fail: false
+
+- description: Update minion information
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update && sleep 15
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Check salt minion versions on slaves
cmd: salt --timeout=60 '*' test.version
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
similarity index 72%
rename from tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
rename to tcp_tests/templates/virtual-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
index e2ba165..039d0a3 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
@@ -2,8 +2,8 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
cicd_enabled: 'False'
- cluster_domain: virtual-mcp-pike-dvr.local
- cluster_name: virtual-mcp-pike-dvr
+ cluster_domain: virtual-mcp-mitaka-dvr.local
+ cluster_name: virtual-mcp-mitaka-dvr
compute_bond_mode: active-backup
compute_primary_first_nic: eth1
compute_primary_second_nic: eth2
@@ -47,7 +47,7 @@
openstack_benchmark_node01_address: 172.16.10.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
- openstack_compute_count: '100'
+ openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -100,7 +100,7 @@
openstack_proxy_node02_address: 172.16.10.122
openstack_proxy_node02_hostname: prx02
openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: pike
+ openstack_version: mitaka
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
oss_notification_app_id: '24'
@@ -111,6 +111,35 @@
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/_context-environment.yaml
similarity index 98%
rename from tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
rename to tcp_tests/templates/virtual-mcp-mitaka-dvr/_context-environment.yaml
index 0127547..ca8114b 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/_context-environment.yaml
@@ -63,6 +63,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/virtual-mcp-mitaka-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/common-services.yaml
new file mode 100644
index 0000000..3613971
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-mitaka-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/openstack.yaml
new file mode 100644
index 0000000..77be7c0
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/openstack.yaml
@@ -0,0 +1,395 @@
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: overrides-policy.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+ cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+ cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+ ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@powerdns:server' state.sls powerdns.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Allow all tcp
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Allow all icmp
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-mitaka-dvr/overrides-policy.yml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+ nova:
+ controller:
+ policy:
+ context_is_admin: 'role:admin or role:administrator'
+ 'compute:create': 'rule:admin_or_owner'
+ 'compute:create:attach_network':
+ cinder:
+ controller:
+ policy:
+ 'volume:delete': 'rule:admin_or_owner'
+ 'volume:extend':
+ neutron:
+ server:
+ policy:
+ create_subnet: 'rule:admin_or_network_owner'
+ 'get_network:queue_id': 'rule:admin_only'
+ 'create_network:shared':
+ glance:
+ server:
+ policy:
+ publicize_image: "role:admin"
+ add_member:
+ keystone:
+ server:
+ policy:
+ admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+ heat:
+ server:
+ policy:
+ context_is_admin: 'role:admin and is_admin_project:True'
+ deny_stack_user: 'not role:heat_stack_user'
+ deny_everybody: '!'
+ 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+ 'cloudformation:DescribeStackResources':
+ ceilometer:
+ server:
+ policy:
+ segregation: 'rule:context_is_admin'
+ 'telemetry:get_resource':
diff --git a/tcp_tests/templates/virtual-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/salt.yaml
new file mode 100644
index 0000000..b70ab4a
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/salt.yaml
@@ -0,0 +1,66 @@
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+#- description: "Workaround for PROD-14831 , add 'dns' role to cmp01 and cmp02 nodes"
+# cmd: |
+# set -e;
+# apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+# [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+# . /root/venv-reclass-tools/bin/activate;
+# pip install git+https://github.com/dis-xcom/reclass-tools;
+
+# # Combine 'dns' role with compute nodes
+# reclass-tools add-key 'classes' 'cluster.{{ LAB_CONFIG_NAME }}.openstack.dns' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/compute.yml --merge;
+# # Remove linux.network.interface hardcode from 'dns' role to avoid conflict with compute interfaces
+# reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/dns.yml
+
+# export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
+# find ${REPLACE_DIRS} -type f -exec sed -i 's/openstack_dns_node01_address:.*/openstack_dns_node01_address: {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/g' {} +
+# find ${REPLACE_DIRS} -type f -exec sed -i 's/openstack_dns_node02_address:.*/openstack_dns_node02_address: {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/g' {} +
+
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+ cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/sl.yaml
similarity index 97%
copy from tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
copy to tcp_tests/templates/virtual-mcp-mitaka-dvr/sl.yaml
index f492e73..f7eada4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/sl.yaml
@@ -1,22 +1,6 @@
-{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-mitaka-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
@@ -81,8 +81,8 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+- description: Configure collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..a73ca23
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
@@ -0,0 +1,70 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay.yaml
new file mode 100644
index 0000000..714cd1b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-mitaka-dvr/underlay.yaml
@@ -0,0 +1,581 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-mcp-mitaka-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-mitaka-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-mitaka-dvr') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-mitaka-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml
index 38ff7ac..fd46292 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/common-services.yaml
@@ -83,7 +83,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
index 1783d6d..827b16a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
@@ -259,6 +259,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
index 16c439e..0c06c6b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-apt01.yaml
@@ -39,8 +39,7 @@
- export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
- echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $LOCAL_DNS_IP, 8.8.8.8, 172.18.208.44" >> /etc/dhcp/dhclient.conf
+ - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
- export TERM=linux
- export LANG=C
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
index 47120d5..49445ae 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
@@ -147,7 +147,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -159,7 +159,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -236,7 +236,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
index 2c60b9a..83f45ea 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -18,8 +18,8 @@
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: ddmitriev@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -47,7 +47,7 @@
openstack_benchmark_node01_address: 172.16.10.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
- openstack_compute_count: '100'
+ openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -103,14 +103,43 @@
openstack_version: ocata
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
index 0127547..91f76f6 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
@@ -63,6 +63,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -145,6 +146,11 @@
roles:
- features_designate_pool_manager_dns
- linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.extra
+ - system.linux.system.repo.mcp.apt_mirantis.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+ - system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
interfaces:
ens3:
role: single_dhcp
@@ -157,6 +163,11 @@
roles:
- features_designate_pool_manager_dns
- linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.extra
+ - system.linux.system.repo.mcp.apt_mirantis.openstack
+ - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+ - system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
interfaces:
ens3:
role: single_dhcp
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml
index f8bf7b9..23420b1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
index ed0ee59..f823c2f 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
@@ -375,6 +375,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: Restart cinder volume
cmd: |
salt -C 'I@cinder:controller' service.restart cinder-volume;
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
index 5795a25..eff861b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
@@ -1,22 +1,6 @@
{% from 'virtual-mcp-ocata-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -65,6 +49,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
index 390bfdb..b9e03aa 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
@@ -144,7 +144,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -156,7 +156,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -184,7 +184,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -222,7 +222,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -263,7 +263,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -292,7 +292,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -321,7 +321,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
@@ -347,7 +347,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
@@ -373,7 +373,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml
index 8f9e14f..5419a9b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
index f35c749..4fbecca 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
@@ -259,6 +259,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
index b2dfc14..c7aecc5 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
@@ -144,7 +144,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -156,7 +156,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -184,7 +184,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -222,7 +222,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -263,7 +263,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -292,7 +292,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
index 5aaaaf1..08db21a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -18,8 +18,8 @@
deploy_network_netmask: 255.255.255.0
deploy_network_subnet: 192.168.10.0/24
deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
email_address: ddmitriev@mirantis.com
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -47,7 +47,7 @@
openstack_benchmark_node01_address: 172.16.10.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
- openstack_compute_count: '100'
+ openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 172.16.10
openstack_compute_rack01_tenant_subnet: 10.1.0
@@ -103,14 +103,43 @@
openstack_version: ocata
oss_enabled: 'False'
oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
publication_method: email
reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_api_password: PGah7Ph3IdWuMdAX3ZBLSf5BtlBG1Qhl
salt_api_password_hash: $6$kgvztcjH$9B2950AyxRjE2Tj5QNVCnvdrgaFo/u6c59pMoQPqfxs2MTLLU7ywxPTQnDH3cNV.BBEK6FilF9SulWfIfENou0
salt_master_address: 172.16.10.90
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
index 63cedf1..3e05cf0 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
@@ -65,6 +65,7 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
+ - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml
index f654d14..ec1692a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
index a82a5f6..613814a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
@@ -361,6 +361,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
index c11350b..f581357 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
@@ -3,22 +3,6 @@
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -67,6 +51,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
index 44214e9..895ee4a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
@@ -134,7 +134,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -146,7 +146,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -174,7 +174,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -212,7 +212,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -253,7 +253,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -282,7 +282,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -311,7 +311,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
@@ -337,7 +337,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
@@ -363,7 +363,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index 8b05b63..c5f8f3c 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -110,20 +110,28 @@
retry: {count: 1, delay: 10}
skip_fail: false
-{% for ssh in config.underlay.ssh %}
+{%- for ssh in config.underlay.ssh %}
+ {%- set salt_roles = [] %}
+ {%- for role in ssh['roles'] %}
+ {%- if role in config.salt_deploy.salt_roles %}
+ {%- set _ = salt_roles.append(role) %}
+ {%- endif %}
+ {%- endfor %}
+
+ {%- if salt_roles %}
- description: Restart salt-minion as workaround of PROD-16970
cmd: |
service salt-minion restart; # For case if salt-minion was already installed
node_name: {{ ssh['node_name'] }}
retry: {count: 1, delay: 1}
skip_fail: false
-{% endfor %}
+ {%- endif %}
+{%- endfor %}
- description: Connect ceph to glance
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -165,4 +173,4 @@
'. /root/keystonercv3; glance --timeout 120 image-create --name "cirros" --disk-format raw --container-format bare --visibility public --file cirros.raw'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
index 72f2c9d..4017f70 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
index f4903a9..c5f0593 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
@@ -259,6 +259,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
index cf558d1..483940e 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
@@ -159,7 +159,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -171,7 +171,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -196,7 +196,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -234,7 +234,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -275,7 +275,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -304,7 +304,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -654,8 +654,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
deleted file mode 100644
index 9fb83bf..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
+++ /dev/null
@@ -1,154 +0,0 @@
-default_context:
- bmk_enabled: 'False'
- ceph_enabled: 'False'
- cicd_enabled: 'False'
- cluster_domain: virtual-mcp-pike-dvr-ssl.local
- cluster_name: virtual-mcp-pike-dvr-ssl
- compute_bond_mode: active-backup
- compute_primary_first_nic: eth1
- compute_primary_second_nic: eth2
- context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
- control_network_netmask: 255.255.255.0
- control_network_subnet: 172.16.10.0/24
- control_vlan: '10'
- cookiecutter_template_branch: master
- cookiecutter_template_credentials: gerrit
- cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 192.168.10.1
- deploy_network_netmask: 255.255.255.0
- deploy_network_subnet: 192.168.10.0/24
- deployment_type: physical
- dns_server01: 8.8.8.8
- dns_server02: 8.8.4.4
- email_address: ddmitriev@mirantis.com
- gateway_primary_first_nic: eth1
- gateway_primary_second_nic: eth2
- infra_bond_mode: active-backup
- infra_deploy_nic: eth0
- infra_kvm01_control_address: 172.16.10.101
- infra_kvm01_deploy_address: 192.168.10.101
- infra_kvm01_hostname: kvm01
- infra_kvm02_control_address: 172.16.10.102
- infra_kvm02_deploy_address: 192.168.10.102
- infra_kvm02_hostname: kvm02
- infra_kvm03_control_address: 172.16.10.103
- infra_kvm03_deploy_address: 192.168.10.103
- infra_kvm03_hostname: kvm03
- infra_kvm_vip_address: 172.16.10.100
- infra_primary_first_nic: eth1
- infra_primary_second_nic: eth2
- kubernetes_enabled: 'False'
- local_repositories: 'False'
- maas_deploy_address: 192.168.10.90
- maas_hostname: cfg01
- mcp_version: stable
- offline_deployment: 'False'
- opencontrail_enabled: 'False'
- openstack_benchmark_node01_address: 172.16.10.95
- openstack_benchmark_node01_hostname: bmk01
- openstack_cluster_size: compact
- openstack_compute_count: '100'
- openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_control_address: 172.16.10.100
- openstack_control_hostname: ctl
- openstack_control_node01_address: 172.16.10.101
- openstack_control_node01_hostname: ctl01
- openstack_control_node02_address: 172.16.10.102
- openstack_control_node02_hostname: ctl02
- openstack_control_node03_address: 172.16.10.103
- openstack_control_node03_hostname: ctl03
- openstack_database_address: 172.16.10.100
- openstack_database_hostname: ctl
- openstack_database_node01_address: 172.16.10.101
- openstack_database_node01_hostname: ctl01
- openstack_database_node02_address: 172.16.10.102
- openstack_database_node02_hostname: ctl02
- openstack_database_node03_address: 172.16.10.103
- openstack_database_node03_hostname: ctl03
- openstack_enabled: 'True'
- openstack_gateway_node01_address: 172.16.10.110
- openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.1.0.6
- openstack_gateway_node02_address: 172.16.10.111
- openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.1.0.7
- openstack_gateway_node03_address: 172.16.10.112
- openstack_gateway_node03_hostname: gtw03
- openstack_gateway_node03_tenant_address: 10.1.0.8
- openstack_message_queue_address: 172.16.10.100
- openstack_message_queue_hostname: ctl
- openstack_message_queue_node01_address: 172.16.10.101
- openstack_message_queue_node01_hostname: ctl01
- openstack_message_queue_node02_address: 172.16.10.102
- openstack_message_queue_node02_hostname: ctl02
- openstack_message_queue_node03_address: 172.16.10.103
- openstack_message_queue_node03_hostname: ctl03
- openstack_network_engine: ovs
- openstack_neutron_qos: 'False'
- openstack_neutron_vlan_aware_vms: 'False'
- openstack_nfv_dpdk_enabled: 'False'
- openstack_nfv_sriov_enabled: 'False'
- openstack_nova_compute_nfv_req_enabled: 'False'
- openstack_ovs_dvr_enabled: 'True'
- openstack_ovs_encapsulation_type: vxlan
- openstack_proxy_address: 172.16.10.80
- openstack_proxy_hostname: prx
- openstack_proxy_node01_address: 172.16.10.121
- openstack_proxy_node01_hostname: prx01
- openstack_proxy_node02_address: 172.16.10.122
- openstack_proxy_node02_hostname: prx02
- openstack_upgrade_node01_address: 172.16.10.19
- openstack_version: pike
- oss_enabled: 'False'
- oss_node03_address: ${_param:stacklight_monitor_node03_address}
- oss_notification_app_id: '24'
- oss_notification_sender_password: password
- oss_notification_smtp_port: '587'
- oss_notification_webhook_login_id: '13'
- platform: openstack_enabled
- public_host: ${_param:openstack_proxy_address}
- publication_method: email
- reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
- salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
- salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
- salt_master_address: 172.16.10.90
- salt_master_hostname: cfg01
- salt_master_management_address: 192.168.10.90
- shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 172.16.10.70
- stacklight_log_hostname: mon
- stacklight_log_node01_address: 172.16.10.107
- stacklight_log_node01_hostname: mon01
- stacklight_log_node02_address: 172.16.10.108
- stacklight_log_node02_hostname: mon02
- stacklight_log_node03_address: 172.16.10.109
- stacklight_log_node03_hostname: mon03
- stacklight_monitor_address: 172.16.10.70
- stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 172.16.10.107
- stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 172.16.10.108
- stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 172.16.10.109
- stacklight_monitor_node03_hostname: mon03
- stacklight_notification_address: alerts@localhost
- stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 172.16.10.70
- stacklight_telemetry_hostname: mon
- stacklight_telemetry_node01_address: 172.16.10.107
- stacklight_telemetry_node01_hostname: mon01
- stacklight_telemetry_node02_address: 172.16.10.108
- stacklight_telemetry_node02_hostname: mon02
- stacklight_telemetry_node03_address: 172.16.10.109
- stacklight_telemetry_node03_hostname: mon03
- stacklight_version: '2'
- static_ips_on_deploy_network_enabled: 'False'
- tenant_network_gateway: 10.1.0.1
- tenant_network_netmask: 255.255.255.0
- tenant_network_subnet: 10.1.0.0/24
- tenant_vlan: '20'
- upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
index 39e9398..d0143da 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index 5d573d7..5fdf941 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -381,6 +381,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: Restart cinder volume
cmd: |
salt -C 'I@cinder:controller' service.restart cinder-volume;
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
index 32c0e91..d1c8656 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
@@ -20,9 +20,6 @@
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
@@ -43,9 +40,6 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
@@ -63,9 +57,6 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
@@ -83,9 +74,6 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
@@ -103,9 +91,6 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
@@ -134,7 +119,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -146,7 +131,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -171,7 +156,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -209,7 +194,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -250,7 +235,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -279,7 +264,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -304,84 +289,6 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- name: {{ HOSTNAME_PRX01 }}
role: salt_minion
params:
@@ -492,8 +399,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
index 965d297..3981011 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index 45ededb..8276b67 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -375,6 +375,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: Restart cinder volume
cmd: |
salt -C 'I@cinder:controller' service.restart cinder-volume;
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
index 21aa389..805e184 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
@@ -14,7 +14,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
deleted file mode 100644
index b3818b7..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
index b4aff6a..c198901 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
@@ -20,9 +20,6 @@
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
@@ -45,9 +42,6 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_DNS01 }}: +111
default_{{ HOSTNAME_DNS02 }}: +112
@@ -67,9 +61,6 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_DNS01 }}: +111
default_{{ HOSTNAME_DNS02 }}: +112
@@ -89,9 +80,6 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_DNS01 }}: +111
default_{{ HOSTNAME_DNS02 }}: +112
@@ -111,9 +99,6 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_DNS01 }}: +111
default_{{ HOSTNAME_DNS02 }}: +112
@@ -144,7 +129,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -156,7 +141,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -181,7 +166,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -219,7 +204,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -260,7 +245,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -289,7 +274,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -314,84 +299,6 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- name: {{ HOSTNAME_PRX01 }}
role: salt_minion
params:
@@ -502,8 +409,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
index b602748..bcbcad0 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
@@ -113,20 +113,28 @@
retry: {count: 1, delay: 10}
skip_fail: false
-{% for ssh in config.underlay.ssh %}
+{%- for ssh in config.underlay.ssh %}
+ {%- set salt_roles = [] %}
+ {%- for role in ssh['roles'] %}
+ {%- if role in config.salt_deploy.salt_roles %}
+ {%- set _ = salt_roles.append(role) %}
+ {%- endif %}
+ {%- endfor %}
+
+ {%- if salt_roles %}
- description: Restart salt-minion as workaround of PROD-16970
cmd: |
service salt-minion restart; # For case if salt-minion was already installed
node_name: {{ ssh['node_name'] }}
retry: {count: 1, delay: 1}
skip_fail: false
-{% endfor %}
+ {%- endif %}
+{%- endfor %}
- description: Connect ceph to glance
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -168,4 +176,4 @@
'. /root/keystonercv3; glance --timeout 120 image-create --name "cirros" --disk-format raw --container-format bare --visibility public --file cirros.raw'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
index a33ed13..b825cc2 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
index 9d7dbf4..34692c7 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
@@ -259,6 +259,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
index 3cfaed6..44559f9 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
@@ -144,7 +144,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -156,7 +156,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -181,7 +181,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -219,7 +219,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -260,7 +260,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -289,7 +289,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -556,8 +556,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
index c762467..0187c6b 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index 65f3dab..fa6aa9c 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -151,7 +151,55 @@
retry: {count: 5, delay: 10}
skip_fail: false
+# Install Telemetry services (mdb nodes)
+- description: Install redis service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:cluster:role:master' state.sls redis &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@redis:server' state.sls redis
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+- description: Install gnocchi server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server and *01*' state.sls gnocchi.server &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:server' state.sls gnocchi.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+#- description: Install gnocchi statsd (optional)
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:statsd and *01*' state.sls gnocchi.statsd &&
+# salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:statsd' state.sls gnocchi.statsd
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+- description: Install panko server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server and *01*' state.sls panko &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server' state.sls panko
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install ceilometer server on first node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server and *01*' state.sls ceilometer
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Install ceilometer server on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceilometer:server' state.sls ceilometer
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Install aodh server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server and *01*' state.sls aodh &&
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aodh:server' state.sls aodh
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Install OpenStack dashboard and proxy services
- description: Deploy horizon dashboard
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@horizon:server' state.sls horizon
@@ -361,6 +409,12 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy
+ cmd: iptables --policy FORWARD ACCEPT
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
index 6168127..422e178 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
@@ -14,7 +14,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
deleted file mode 100644
index c5a6ffa..0000000
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
-
-# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
-{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
index f87f2a5..8c45b68 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
@@ -20,11 +20,12 @@
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -43,11 +44,12 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MDB01 }}: +45
+ default_{{ HOSTNAME_MDB02 }}: +46
+ default_{{ HOSTNAME_MDB03 }}: +47
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +130
ip_ranges:
dhcp: [+90, -10]
@@ -63,11 +65,12 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MDB01 }}: +45
+ default_{{ HOSTNAME_MDB02 }}: +46
+ default_{{ HOSTNAME_MDB03 }}: +47
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +130
ip_ranges:
dhcp: [+90, -10]
@@ -83,11 +86,12 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MDB01 }}: +45
+ default_{{ HOSTNAME_MDB02 }}: +46
+ default_{{ HOSTNAME_MDB03 }}: +47
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +130
ip_ranges:
dhcp: [+10, -10]
@@ -103,11 +107,12 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_MON01 }}: +107
- default_{{ HOSTNAME_MON02 }}: +108
- default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_MDB01 }}: +45
+ default_{{ HOSTNAME_MDB02 }}: +46
+ default_{{ HOSTNAME_MDB03 }}: +47
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +130
ip_ranges:
dhcp: [+10, -10]
@@ -134,7 +139,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -146,7 +151,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -171,7 +176,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -209,7 +214,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -250,7 +255,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -279,7 +284,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -304,11 +309,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
+ - name: {{ HOSTNAME_MDB01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -318,6 +323,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -330,11 +338,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON02 }}
+ - name: {{ HOSTNAME_MDB02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -344,6 +352,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -356,11 +367,11 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON03 }}
+ - name: {{ HOSTNAME_MDB03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -370,6 +381,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: mcp_ubuntu_1604_image
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -491,8 +505,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
@@ -513,3 +527,29 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml b/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
index 3ad67c2..7049d2a 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
index 0ddf871..fe35460 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
@@ -114,20 +114,20 @@
retry: {count: 1, delay: 5}
skip_fail: false
-# isntall designate
-#- description: Install bind
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@bind:server' state.sls bind
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-#
-#- description: Install designate
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@designate:server' state.sls designate -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 5, delay: 10}
-# skip_fail: false
+# install designate
+- description: Install bind
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@bind:server' state.sls bind
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
- description: Check neutron agent-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -281,6 +281,14 @@
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Enable forward policy on gtw
+ cmd: |
+ set -e;
+ iptables --policy FORWARD ACCEPT;
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
index 76b76b6..37a5bff 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
@@ -3,22 +3,6 @@
{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on mon nodes
- cmd: |
- SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
- echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Configure docker service
cmd: salt -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
@@ -67,6 +51,22 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
# Install slv2 infra
- description: Install telegraf
cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
index 6448211..da0761b 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
@@ -49,7 +49,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
index cf74d86..4b4ce8e 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
@@ -134,7 +134,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -146,7 +146,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml b/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml
index 5fa88e9..7b19b50 100644
--- a/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp10-contrail/common-services.yaml
@@ -75,7 +75,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data-cfg01.yaml
index c9728cc..8f039fd 100644
--- a/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp10-contrail/underlay--user-data-cfg01.yaml
@@ -47,7 +47,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/underlay.yaml b/tcp_tests/templates/virtual-mcp10-contrail/underlay.yaml
index 5ee6958..eb64cf1 100644
--- a/tcp_tests/templates/virtual-mcp10-contrail/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp10-contrail/underlay.yaml
@@ -136,7 +136,9 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
+ forward:
+ mode: route
admin:
address_pool: admin-pool01
@@ -146,7 +148,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml
index b59a90d..920286d 100644
--- a/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp10-dvr/common-services.yaml
@@ -62,7 +62,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp10-dvr/salt.yaml
index 2bdd4ae..e1c2efe 100644
--- a/tcp_tests/templates/virtual-mcp10-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp10-dvr/salt.yaml
@@ -12,7 +12,6 @@
- description: Configure repository on the cfg01 node
cmd:
echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
which wget >/dev/null || (apt-get update; apt-get install -y wget);
echo "deb [arch=amd64] http://apt.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp10-dvr/underlay.yaml
index a308d72..e163d23 100644
--- a/tcp_tests/templates/virtual-mcp10-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp10-dvr/underlay.yaml
@@ -110,7 +110,9 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
+ forward:
+ mode: route
admin:
address_pool: admin-pool01
@@ -120,7 +122,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/salt.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/salt.yaml
index d093c3e..57c0417 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs.new/salt.yaml
@@ -12,7 +12,6 @@
- description: Configure repository on the cfg01 node
cmd:
echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
which wget >/dev/null || (apt-get update; apt-get install -y wget);
echo "deb [arch=amd64] http://apt.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
diff --git a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay.yaml b/tcp_tests/templates/virtual-mcp10-ovs.new/underlay.yaml
index 528a0bf..0a6b9f4 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs.new/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs.new/underlay.yaml
@@ -110,7 +110,9 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
+ forward:
+ mode: route
admin:
address_pool: admin-pool01
@@ -120,7 +122,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml
index 7b85b51..c42213f 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs/common-services.yaml
@@ -62,7 +62,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp10-ovs/salt.yaml
index d093c3e..57c0417 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs/salt.yaml
@@ -12,7 +12,6 @@
- description: Configure repository on the cfg01 node
cmd:
echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
which wget >/dev/null || (apt-get update; apt-get install -y wget);
echo "deb [arch=amd64] http://apt.mirantis.com/xenial nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp10-ovs/underlay.yaml
index 528a0bf..0a6b9f4 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs/underlay.yaml
@@ -110,7 +110,9 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
+ forward:
+ mode: route
admin:
address_pool: admin-pool01
@@ -120,7 +122,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml b/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml
index 82664ec..6653d1f 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/common-services.yaml
@@ -82,7 +82,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml
index 6448211..da0761b 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/underlay--user-data-cfg01.yaml
@@ -49,7 +49,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
index 50d16e0..7a37142 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/underlay.yaml
@@ -119,7 +119,9 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
+ forward:
+ mode: route
admin:
address_pool: admin-pool01
@@ -129,7 +131,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml
index 25c662b..739ba35 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay--user-data-cfg01.yaml
@@ -51,7 +51,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml
index 58739fb..1362b8e 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/common-services.yaml
@@ -80,7 +80,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
index 2355c0c..1e3df5b 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
@@ -112,7 +112,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
index 9ed734a..d21f166 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
@@ -67,7 +67,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
index 60094c7..d76d5d4 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
@@ -164,7 +164,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -366,7 +366,7 @@
role: salt_minion
params:
vcpu: {{ os_env('MON_NODE_CPU', 1) }}
- memory: {{ os_env('MON_NODE_MEMORY', 3072) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -392,7 +392,7 @@
role: salt_minion
params:
vcpu: {{ os_env('MON_NODE_CPU', 1) }}
- memory: {{ os_env('MON_NODE_MEMORY', 3072) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
boot:
- hd
cloud_init_volume_name: iso
@@ -418,7 +418,7 @@
role: salt_minion
params:
vcpu: {{ os_env('MON_NODE_CPU', 1) }}
- memory: {{ os_env('MON_NODE_MEMORY', 3072) }}
+ memory: {{ os_env('MON_NODE_MEMORY', 8192) }}
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml
index db00e7b..2356475 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml
index 6448211..da0761b 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -49,7 +49,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml
index 888786e..c396bcd 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/underlay.yaml
@@ -120,7 +120,9 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
+ forward:
+ mode: route
admin:
address_pool: admin-pool01
@@ -130,7 +132,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml
index 129d360..7d13f72 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs.new/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml
index a40dbaa..d569b54 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay--user-data-cfg01.yaml
@@ -47,7 +47,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
- echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
diff --git a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml
index 68c09ae..40ea763 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs.new/underlay.yaml
@@ -119,7 +119,9 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
+ forward:
+ mode: route
admin:
address_pool: admin-pool01
@@ -129,7 +131,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml b/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml
index 85faa31..7daf069 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/common-services.yaml
@@ -83,7 +83,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml
index 6448211..da0761b 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/underlay--user-data-cfg01.yaml
@@ -49,7 +49,6 @@
- echo "Preparing base OS"
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml
index 1291dae..bd52ae0 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/underlay.yaml
@@ -119,7 +119,9 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
+ forward:
+ mode: route
admin:
address_pool: admin-pool01
@@ -129,7 +131,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
index 34283a1..2527625 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
@@ -83,7 +83,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
index 44024a9..1e5d62e 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -5,7 +5,8 @@
{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker/ ' + REPOSITORY_SUITE + ' stable') %}
# Install OpenStack control services
- description: Install glance on all controllers
@@ -114,6 +115,16 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Install bind if pillars 'bind:server' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@bind:server' match.pillar 'bind:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@bind:server' state.sls bind;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@designate:server' state.sls designate -b 1
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Check neutron agent-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
@@ -318,8 +329,25 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
+- description: Enable local docker repo
+ cmd: |
+ set -e;
+ echo "{{ DOCKER_LOCAL_REPO }}" > /etc/apt/sources.list.d/mcp_docker.list;
+ apt-get clean; apt-get update;
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker-ce on gtw
+ cmd: salt-call cmd.run 'apt-get install docker-ce -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy on gtw
+ cmd: |
+ set -e;
+ iptables --policy FORWARD ACCEPT;
node_name: {{ HOSTNAME_GTW01 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
index 7d01059..6dcefa2 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
@@ -59,7 +59,7 @@
retry: {count: 1, delay: 10}
skip_fail: false
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
index 16c439e..0c06c6b 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
@@ -39,8 +39,7 @@
- export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
- echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - echo "supersede domain-name-servers $LOCAL_DNS_IP, 8.8.8.8, 172.18.208.44" >> /etc/dhcp/dhclient.conf
+ - echo "supersede domain-name-servers $LOCAL_DNS_IP, 172.18.208.44" >> /etc/dhcp/dhclient.conf
- export TERM=linux
- export LANG=C
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
index ccb9a12..e473292 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
@@ -122,7 +122,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -134,7 +134,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -152,6 +152,9 @@
- name: mcp_ubuntu_1604_image # Pre-configured image for control plane
source_image: !os_env MCP_IMAGE_PATH1604
format: qcow2
+ - name: apt_cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env APT_IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
nodes:
- name: {{ HOSTNAME_APT01 }}
@@ -166,7 +169,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: apt_cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -208,7 +211,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
new file mode 100644
index 0000000..c72c3bb
--- /dev/null
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -0,0 +1,156 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: virtual-pike-ovs-dpdk.local
+ cluster_name: virtual-pike-ovs-dpdk
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: ${_param:openstack_control_node01_address}
+# infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: ${_param:openstack_control_node01_hostname}
+ infra_kvm02_control_address: ${_param:openstack_control_node02_address}
+# infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: ${_param:openstack_control_node02_hostname}
+ infra_kvm03_control_address: ${_param:openstack_control_node03_address}
+# infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: ${_param:openstack_control_node03_hostname}
+ infra_kvm_vip_address: ${_param:openstack_control_address}
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.4.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.4.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.4.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.4.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 10.167.4.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.4.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.4.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.4.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'True'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_hugepages_count: '600'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_cpu_pinning: '3'
+ openstack_ovs_dvr_enabled: 'False'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_branch: master
+ shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ stacklight_enabled: 'False'
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml
similarity index 66%
copy from tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
copy to tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml
index 63cedf1..a5640d1 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.virtual-pike-ovs-dpdk.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,17 +10,11 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.virtual-pike-ovs-dpdk.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- - features_designate_bind9_keystone
- linux_system_codename_xenial
interfaces:
ens3:
@@ -28,16 +22,11 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.virtual-pike-ovs-dpdk.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9_dns
- - features_designate_bind9
- linux_system_codename_xenial
interfaces:
ens3:
@@ -45,15 +34,11 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.virtual-pike-ovs-dpdk.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- - openstack_database
- - openstack_message_queue
- - features_designate_bind9_database
- - features_designate_bind9
- linux_system_codename_xenial
interfaces:
ens3:
@@ -61,7 +46,73 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ dbs01.virtual-pike-ovs-dpdk.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs02.virtual-pike-ovs-dpdk.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs03.virtual-pike-ovs-dpdk.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg01.virtual-pike-ovs-dpdk.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg02.virtual-pike-ovs-dpdk.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg03.virtual-pike-ovs-dpdk.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.virtual-pike-ovs-dpdk.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
@@ -72,47 +123,8 @@
ens4:
role: single_ctl
- mon01.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node01
- roles:
- - stacklightv2_server_leader
- - stacklight_telemetry_leader
- - stacklight_log_leader_v2
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon02.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node02
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mon03.mcp11-ovs-dpdk.local:
- reclass_storage_name: stacklight_server_node03
- roles:
- - stacklightv2_server
- - stacklight_telemetry
- - stacklight_log
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.virtual-pike-ovs-dpdk.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
@@ -127,7 +139,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.virtual-pike-ovs-dpdk.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
new file mode 100644
index 0000000..050c4c4
--- /dev/null
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
@@ -0,0 +1,37 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-pike-ovs-dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
index 84e4829..9da458c 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
@@ -76,7 +76,7 @@
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera
+ -C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index 4079089..1cd8024 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -1,4 +1,7 @@
{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
@@ -11,7 +14,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -28,19 +31,19 @@
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
- description: Hack gtw node
- cmd: salt 'gtw*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Hack cmp01 node
- cmd: salt 'cmp01*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Hack cmp02 node
- cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
index f0463b1..a5673e2 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
@@ -18,6 +18,14 @@
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
@@ -38,6 +46,14 @@
default_{{ HOSTNAME_CTL01 }}: +101
default_{{ HOSTNAME_CTL02 }}: +102
default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_DBS }}: +50
+ default_{{ HOSTNAME_DBS01 }}: +51
+ default_{{ HOSTNAME_DBS02 }}: +52
+ default_{{ HOSTNAME_DBS03 }}: +53
+ default_{{ HOSTNAME_MSG }}: +40
+ default_{{ HOSTNAME_MSG01 }}: +41
+ default_{{ HOSTNAME_MSG02 }}: +42
+ default_{{ HOSTNAME_MSG03 }}: +43
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
default_{{ HOSTNAME_GTW01 }}: +110
@@ -55,6 +71,14 @@
default_{{ HOSTNAME_CTL01 }}: +101
default_{{ HOSTNAME_CTL02 }}: +102
default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_DBS }}: +50
+ default_{{ HOSTNAME_DBS01 }}: +51
+ default_{{ HOSTNAME_DBS02 }}: +52
+ default_{{ HOSTNAME_DBS03 }}: +53
+ default_{{ HOSTNAME_MSG }}: +40
+ default_{{ HOSTNAME_MSG01 }}: +41
+ default_{{ HOSTNAME_MSG02 }}: +42
+ default_{{ HOSTNAME_MSG03 }}: +43
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
default_{{ HOSTNAME_GTW01 }}: +110
@@ -72,6 +96,14 @@
default_{{ HOSTNAME_CTL01 }}: +101
default_{{ HOSTNAME_CTL02 }}: +102
default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_DBS }}: +50
+ default_{{ HOSTNAME_DBS01 }}: +51
+ default_{{ HOSTNAME_DBS02 }}: +52
+ default_{{ HOSTNAME_DBS03 }}: +53
+ default_{{ HOSTNAME_MSG }}: +40
+ default_{{ HOSTNAME_MSG01 }}: +41
+ default_{{ HOSTNAME_MSG02 }}: +42
+ default_{{ HOSTNAME_MSG03 }}: +43
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
default_{{ HOSTNAME_GTW01 }}: +110
@@ -89,6 +121,14 @@
default_{{ HOSTNAME_CTL01 }}: +101
default_{{ HOSTNAME_CTL02 }}: +102
default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_DBS }}: +50
+ default_{{ HOSTNAME_DBS01 }}: +51
+ default_{{ HOSTNAME_DBS02 }}: +52
+ default_{{ HOSTNAME_DBS03 }}: +53
+ default_{{ HOSTNAME_MSG }}: +40
+ default_{{ HOSTNAME_MSG01 }}: +41
+ default_{{ HOSTNAME_MSG02 }}: +42
+ default_{{ HOSTNAME_MSG03 }}: +43
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
default_{{ HOSTNAME_GTW01 }}: +110
@@ -120,7 +160,7 @@
l2_network_devices:
private:
address_pool: private-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
@@ -132,7 +172,7 @@
tenant:
address_pool: tenant-pool01
- dhcp: true
+ dhcp: false
external:
address_pool: external-pool01
@@ -155,7 +195,7 @@
role: salt_master
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -193,7 +233,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -234,7 +274,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -263,7 +303,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 12288
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
boot:
- hd
cloud_init_volume_name: iso
@@ -288,6 +328,162 @@
interfaces: *interfaces
network_config: *network_config
+ - name: {{ HOSTNAME_DBS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_DBS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_DBS03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MSG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MSG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MSG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
- name: {{ HOSTNAME_PRX01 }}
role: salt_minion
params:
@@ -408,8 +604,8 @@
- name: {{ HOSTNAME_GTW01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 64288ab..754e0d7 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -25,7 +25,7 @@
from tcp_tests.fixtures.decapod_fixtures import * # noqa
from tcp_tests.fixtures.stacklight_fixtures import * # noqa
from tcp_tests.fixtures.k8s_fixtures import * # noqa
-
+from tcp_tests.fixtures.drivetrain_fixtures import * # noqa
__all__ = sorted([ # sort for documentation
# common_fixtures
@@ -53,6 +53,9 @@
'oss_actions',
'oss_deployed',
'oss_sl_os_deployed',
+ # drivetrain_fixtures
+ 'drivetrain_actions'
+ 'drivetrain_deployed'
# decapod_fixtures
'decapod_actions',
'decapod_deployed',
diff --git a/tcp_tests/tests/system/test_failover.py b/tcp_tests/tests/system/test_failover.py
deleted file mode 100644
index 615e26f..0000000
--- a/tcp_tests/tests/system/test_failover.py
+++ /dev/null
@@ -1,603 +0,0 @@
-# Copyright 2017 Mirantis, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from devops.helpers import helpers
-import pytest
-
-from tcp_tests import logger
-from tcp_tests.helpers import ext
-
-LOG = logger.logger
-
-
-class TestFailover(object):
- """Test class for testing OpenStack nodes failover"""
-
- @staticmethod
- def check_influxdb_xfail(sl_deployed, node_name, value):
-
- def check_influxdb_data():
- return value in sl_deployed.check_data_in_influxdb(node_name)
-
- try:
- helpers.wait(
- check_influxdb_data,
- timeout=10, interval=2,
- timeout_msg=('Influxdb data {0} was not replicated to {1} '
- '[https://mirantis.jira.com/browse/PROD-16272]'
- .format(value, node_name)))
- except Exception:
- pytest.xfail('Influxdb data {0} was not replicated to {1} '
- '[https://mirantis.jira.com/browse/PROD-16272]'
- .format(value, node_name))
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- def test_warm_shutdown_ctl01_node(self, underlay, openstack_deployed,
- openstack_actions, show_step):
- """Test warm shutdown ctl01
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute nodes
- 4. Shutdown ctl01
- 5. Run tempest smoke after failover
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
- # STEP #4
- show_step(4)
- openstack_actions.warm_shutdown_openstack_nodes('ctl01')
- # STEP #5
- show_step(5)
- openstack_actions.run_tempest(pattern='smoke')
-
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- def test_restart_ctl01_node(self, underlay, openstack_deployed,
- openstack_actions, show_step):
- """Test restart ctl01
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute nodes
- 4. Restart ctl01
- 5. Run tempest smoke after failover
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- openstack_actions.warm_restart_nodes('ctl01')
- # STEP #5
- show_step(5)
- openstack_actions.run_tempest(pattern='smoke')
-
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- def test_warm_shutdown_cmp01_node(self, underlay, openstack_deployed,
- openstack_actions, show_step):
- """Test warm shutdown cmp01
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute nodes
- 4. Shutdown cmp01
- 5. Run tempest smoke after failover
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- openstack_actions.warm_shutdown_openstack_nodes('cmp01')
- # STEP #5
- show_step(5)
- openstack_actions.run_tempest(pattern='smoke')
-
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- def test_restart_cmp01_node(self, underlay, openstack_deployed,
- openstack_actions, show_step):
- """Test restart cmp01
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute nodes
- 4. Restart cmp01
- 5. Run tempest smoke after failover
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- openstack_actions.warm_restart_nodes('cmp01')
- # STEP #5
- show_step(5)
- openstack_actions.run_tempest(pattern='smoke')
-
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
- def test_restart_mon01_node(self, openstack_actions,
- sl_os_deployed, show_step):
- """Test restart mon01
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute, monitoring nodes
- 4. Check targets before restart
- 5. Restart mon01
- 6. Check targets after restart
- 7. Run LMA smoke after failover
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- mon_nodes = sl_os_deployed.get_monitoring_nodes()
- LOG.debug('Mon nodes list {0}'.format(mon_nodes))
- sl_os_deployed.check_prometheus_targets(mon_nodes)
- before_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- failed_tests = [test['name'] for test in
- before_result if 'passed' not in test['outcome']]
- # STEP #5
- show_step(5)
- openstack_actions.warm_restart_nodes('mon01')
- # STEP #6
- show_step(6)
- sl_os_deployed.check_prometheus_targets(mon_nodes)
- # STEP #7
- show_step(7)
- # Run SL component tetsts
- after_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- for test in after_result:
- if test['name'] not in failed_tests:
- assert 'passed' in test['outcome'], \
- 'Failed test {}'.format(test)
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
- def test_warm_shutdown_mon01_node(self, openstack_actions,
- sl_os_deployed,
- show_step):
- """Test warm shutdown mon01
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute, monitoring nodes
- 4. Check LMA before mon node shutdown
- 5. Shutdown mon01 node
- 6. Run LMA tests after failover
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- mon_nodes = sl_os_deployed.get_monitoring_nodes()
- LOG.debug('Mon nodes list {0}'.format(mon_nodes))
- sl_os_deployed.check_prometheus_targets(mon_nodes)
- before_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- failed_tests = [test['name'] for test in
- before_result if 'passed' not in test['outcome']]
- # STEP #5
- show_step(5)
- openstack_actions.warm_shutdown_openstack_nodes('mon01')
- # STEP #6
- show_step(6)
- # Run SL component tetsts
- after_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- for test in after_result:
- if test['name'] not in failed_tests:
- assert 'passed' in test['outcome'], \
- 'Failed test {}'.format(test)
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
- def test_kill_influxdb_relay_mon01_node(self, sl_os_deployed,
- show_step):
- """Test kill influxdb relay on mon01 node
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute, monitoring nodes
- 4. Check LMA before mon node shutdown
- 5. Kill influxdb relay on mon01 node
- 6. Post data into influx
- 7. Get data from all healthy nodes
- 8. Start influx db
- 9. Request data on mon01
- 10. Run LMA tests after fail and compare with result before fail
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- mon_nodes = sl_os_deployed.get_monitoring_nodes()
- LOG.debug('Mon nodes list {0}'.format(mon_nodes))
- before_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- failed_tests = [test['name'] for test in
- before_result if 'passed' not in test['outcome']]
-
- # STEP #5
- show_step(5)
- sl_os_deployed.kill_sl_service_on_node('mon01', 'influxdb-relay')
-
- # STEP #6
- show_step(6)
- sl_os_deployed.post_data_into_influx('mon02')
-
- # STEP #7
- show_step(7)
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
-
- # STEP #8
- show_step(8)
- sl_os_deployed.start_service('mon01', 'influxdb-relay')
-
- # STEP #9
- show_step(9)
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon01')
-
- # STEP #10
- show_step(10)
- after_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- for test in after_result:
- if test['name'] not in failed_tests:
- assert 'passed' in test['outcome'], \
- 'Failed test {}'.format(test)
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
- def test_kill_influxdb_mon01_node(self, sl_os_deployed, show_step):
- """Test kill influxdb on mon01 node
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute, monitoring nodes
- 4. Check LMA before mon node shutdown
- 5. Kill influxdb on mon01 node
- 6. Post data into influx
- 7. Get data from all healthy nodes
- 8. Start influx db
- 9. Request data on mon01
- 10. Run LMA tests after fail and compare with result before fail
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- mon_nodes = sl_os_deployed.get_monitoring_nodes()
- LOG.debug('Mon nodes list {0}'.format(mon_nodes))
- before_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- failed_tests = [test['name'] for test in
- before_result if 'passed' not in test['outcome']]
- # STEP #5
- show_step(5)
- sl_os_deployed.kill_sl_service_on_node('mon01', 'influxd')
-
- # STEP #6
- show_step(6)
- sl_os_deployed.post_data_into_influx('mon02')
-
- # STEP #7
- show_step(7)
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
-
- # STEP #8
- show_step(8)
- sl_os_deployed.start_service('mon01', 'influxd')
-
- # STEP #9
- show_step(9)
- self.check_influxdb_xfail(sl_os_deployed, 'mon01', 'mymeas')
-
- # STEP #10
- show_step(10)
- after_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- for test in after_result:
- if test['name'] not in failed_tests:
- assert 'passed' in test['outcome'], \
- 'Failed test {}'.format(test)
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
- def test_stop_influxdb_relay_mon_nodes(self, sl_os_deployed,
- show_step):
- """Test stop influxdb relay on mon01 node
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute, monitoring nodes
- 4. Check LMA before mon node shutdown
- 5. Stop influxdb relay on mon01 and mon02 nodes
- 6. Post data into influx
- 7. Get data from all healthy nodes
- 8. Start influx db
- 9. Request data on mon01, 02
- 10. Run LMA tests after fail and compare with result before fail
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- mon_nodes = sl_os_deployed.get_monitoring_nodes()
- LOG.debug('Mon nodes list {0}'.format(mon_nodes))
- before_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- failed_tests = [test['name'] for test in
- before_result if 'passed' not in test['outcome']]
- # STEP #5
- show_step(5)
- sl_os_deployed.stop_sl_service_on_node('mon01', 'influxdb-relay')
- sl_os_deployed.stop_sl_service_on_node('mon02', 'influxdb-relay')
-
- # STEP #6
- show_step(6)
- sl_os_deployed.post_data_into_influx('mon03')
-
- # STEP #7
- show_step(7)
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
-
- # STEP #8
- show_step(8)
- sl_os_deployed.start_service('mon01', 'influxdb-relay')
- sl_os_deployed.start_service('mon02', 'influxdb-relay')
-
- # STEP #9
- show_step(9)
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon01')
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
-
- # STEP #10
- show_step(10)
- after_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- for test in after_result:
- if test['name'] not in failed_tests:
- assert 'passed' in test['outcome'], \
- 'Failed test {}'.format(test)
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
- def test_stop_influxdb_mon_nodes(self, sl_os_deployed, show_step):
- """Test stop influxdb on mon01 node
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute, monitoring nodes
- 4. Check LMA before mon node shutdown
- 5. Stop influxdb on mon01 and mon02 node
- 6. Post data into influx
- 7. Get data from all healthy nodes
- 8. Start influx db
- 9. Request data on mon01
- 10. Run LMA tests after fail and compare with result before fail
-
-
- """
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- mon_nodes = sl_os_deployed.get_monitoring_nodes()
- LOG.debug('Mon nodes list {0}'.format(mon_nodes))
- before_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- failed_tests = [test['name'] for test in
- before_result if 'passed' not in test['outcome']]
-
- # STEP #5
- show_step(5)
- sl_os_deployed.stop_sl_service_on_node('mon01', 'influxdb')
- sl_os_deployed.stop_sl_service_on_node('mon02', 'influxdb')
-
- # STEP #6
- show_step(6)
- sl_os_deployed.post_data_into_influx('mon03')
-
- # STEP #7
- show_step(7)
- assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
-
- # STEP #8
- show_step(8)
- sl_os_deployed.start_service('mon01', 'influxdb')
- sl_os_deployed.start_service('mon02', 'influxdb')
-
- # STEP #9
- show_step(9)
- self.check_influxdb_xfail(sl_os_deployed, 'mon01', 'mymeas')
- self.check_influxdb_xfail(sl_os_deployed, 'mon02', 'mymeas')
-
- # STEP #10
- show_step(10)
- after_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- for test in after_result:
- if test['name'] not in failed_tests:
- assert 'passed' in test['outcome'], \
- 'Failed test {}'.format(test)
- LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
- def test_restart_mon_with_vip(self, sl_os_deployed,
- openstack_actions, salt_actions,
- common_services_actions, show_step):
- """Test restart mon with VIP
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute and monitoring nodes
- 4. Check LMA before mon node restart
- 5. Find mon minion id with VIP
- 6. Restart mon minion id with VIP
- 7. Check that VIP was actually migrated on a new node
- 8. Run tempest smoke after failover
-
-
- """
- # TR case #4753939
- common_services_actions.check_keepalived_pillar()
- salt = salt_actions
-
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- mon_nodes = sl_os_deployed.get_monitoring_nodes()
- LOG.debug('Mon nodes list {0}'.format(mon_nodes))
- before_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- failed_tests = [test['name'] for test in
- before_result if 'passed' not in test['outcome']]
-
- # STEP #5
- show_step(5)
- mon_vip_pillar = salt.get_pillar(
- tgt="mon0*",
- pillar="_param:cluster_vip_address")[0]
- vip = [vip for minion_id, vip in mon_vip_pillar.items()][0]
- minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
- LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
-
- # STEP #6
- show_step(6)
- openstack_actions.warm_restart_nodes(minion_vip)
-
- # STEP #7
- show_step(7)
- # Check that VIP has been actually migrated to a new node
- new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
- vip)
- LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
- assert new_minion_vip != minion_vip, (
- "VIP {0} wasn't migrated from {1} after node reboot!"
- .format(vip, new_minion_vip))
- common_services_actions.check_keepalived_pillar()
-
- # STEP #8
- show_step(8)
- # Run SL component tetsts
- after_result = sl_os_deployed.run_sl_tests_json(
- 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
- 'tests/prometheus/', 'test_alerts.py')
- for test in after_result:
- if test['name'] not in failed_tests:
- assert 'passed' in test['outcome'], \
- 'Failed test {}'.format(test)
- LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_failover_nodes.py b/tcp_tests/tests/system/test_failover_nodes.py
new file mode 100644
index 0000000..0abada9
--- /dev/null
+++ b/tcp_tests/tests/system/test_failover_nodes.py
@@ -0,0 +1,370 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import ext
+
+LOG = logger.logger
+
+
+class TestFailoverNodes(object):
+ """Test class for testing OpenStack nodes failover"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_warm_shutdown_ctl01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test warm shutdown ctl01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Shutdown ctl01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_shutdown_openstack_nodes('ctl01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_ctl01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test restart ctl01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Restart ctl01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_restart_nodes('ctl01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_warm_shutdown_cmp01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test warm shutdown cmp01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Shutdown cmp01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_shutdown_openstack_nodes('cmp01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_cmp01_node(self, underlay, openstack_deployed,
+ openstack_actions, show_step):
+ """Test restart cmp01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Restart cmp01
+ 5. Run tempest smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ openstack_actions.warm_restart_nodes('cmp01')
+ # STEP #5
+ show_step(5)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+ def test_restart_mon01_node(self, openstack_actions,
+ sl_os_deployed, show_step):
+ """Test restart mon01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Check targets before restart
+ 5. Restart mon01
+ 6. Check targets after restart
+ 7. Run LMA smoke after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ mon_nodes = sl_os_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+ sl_os_deployed.check_prometheus_targets(mon_nodes)
+ before_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ failed_tests = [test['name'] for test in
+ before_result if 'passed' not in test['outcome']]
+ # STEP #5
+ show_step(5)
+ openstack_actions.warm_restart_nodes('mon01')
+ # STEP #6
+ show_step(6)
+ sl_os_deployed.check_prometheus_targets(mon_nodes)
+ # STEP #7
+ show_step(7)
+ # Run SL component tetsts
+ after_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ for test in after_result:
+ if test['name'] not in failed_tests:
+ assert 'passed' in test['outcome'], \
+ 'Failed test {}'.format(test)
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+ def test_warm_shutdown_mon01_node(self, openstack_actions,
+ sl_os_deployed,
+ show_step):
+ """Test warm shutdown mon01
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Check LMA before mon node shutdown
+ 5. Shutdown mon01 node
+ 6. Run LMA tests after failover
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ mon_nodes = sl_os_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+ sl_os_deployed.check_prometheus_targets(mon_nodes)
+ before_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ failed_tests = [test['name'] for test in
+ before_result if 'passed' not in test['outcome']]
+ # STEP #5
+ show_step(5)
+ openstack_actions.warm_shutdown_openstack_nodes('mon01')
+ # STEP #6
+ show_step(6)
+ # Run SL component tetsts
+ after_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ for test in after_result:
+ if test['name'] not in failed_tests:
+ assert 'passed' in test['outcome'], \
+ 'Failed test {}'.format(test)
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+ def test_restart_mon_with_vip(self, sl_os_deployed,
+ openstack_actions, salt_actions,
+ common_services_actions, show_step):
+ """Test restart mon with VIP
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute and monitoring nodes
+ 4. Check LMA before mon node restart
+ 5. Find mon minion id with VIP
+ 6. Restart mon minion id with VIP
+ 7. Check that VIP was actually migrated on a new node
+ 8. Run tempest smoke after failover
+
+
+ """
+ # TR case #4753939
+ common_services_actions.check_keepalived_pillar()
+ salt = salt_actions
+
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ mon_nodes = sl_os_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+ before_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ failed_tests = [test['name'] for test in
+ before_result if 'passed' not in test['outcome']]
+
+ # STEP #5
+ show_step(5)
+ mon_vip_pillar = salt.get_pillar(
+ tgt="mon0*",
+ pillar="_param:cluster_vip_address")[0]
+ vip = [vip for minion_id, vip in mon_vip_pillar.items()][0]
+ minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+ LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
+
+ # STEP #6
+ show_step(6)
+ openstack_actions.warm_restart_nodes(minion_vip)
+
+ # STEP #7
+ show_step(7)
+ # Check that VIP has been actually migrated to a new node
+ new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
+ vip)
+ LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
+ assert new_minion_vip != minion_vip, (
+ "VIP {0} wasn't migrated from {1} after node reboot!"
+ .format(vip, new_minion_vip))
+ common_services_actions.check_keepalived_pillar()
+
+ # STEP #8
+ show_step(8)
+ # Run SL component tetsts
+ after_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ for test in after_result:
+ if test['name'] not in failed_tests:
+ assert 'passed' in test['outcome'], \
+ 'Failed test {}'.format(test)
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.openstack_deployed)
+ def test_restart_ctl_with_vip(self, underlay, openstack_deployed,
+ openstack_actions, salt_actions,
+ common_services_actions, show_step):
+ """Test restart clt with VIP
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Find controller minion id with VIP
+ 5. Restart controller minion id with VIP
+ 6. Check that VIP was actually migrated on a new node
+ 7. Run tempest smoke after failover
+
+
+ """
+ # TR case #3385671
+ common_services_actions.check_keepalived_pillar()
+ salt = salt_actions
+
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ ctl_vip_pillar = salt.get_pillar(
+ tgt="I@nova:controller:enabled:True",
+ pillar="_param:cluster_vip_address")[0]
+ vip = [vip for minion_id, vip in ctl_vip_pillar.items()][0]
+ minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
+ LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
+
+ # STEP #5
+ show_step(5)
+ openstack_actions.warm_restart_nodes(minion_vip)
+
+ # STEP #6
+ show_step(6)
+ # Check that VIP has been actually migrated to a new node
+ new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
+ vip)
+ LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
+ assert new_minion_vip != minion_vip, (
+ "VIP {0} wasn't migrated from {1} after node reboot!"
+ .format(vip, new_minion_vip))
+ common_services_actions.check_keepalived_pillar()
+
+ # STEP #7
+ show_step(7)
+ openstack_actions.run_tempest(pattern='smoke')
+
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_failover_openstack_services.py b/tcp_tests/tests/system/test_failover_openstack_services.py
index efbba58..37cff72 100644
--- a/tcp_tests/tests/system/test_failover_openstack_services.py
+++ b/tcp_tests/tests/system/test_failover_openstack_services.py
@@ -600,60 +600,3 @@
assert not results['fail'], self.show_failed_msg(results['fail'])
LOG.info("*************** DONE **************")
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- def test_restart_ctl_with_vip(self, underlay, openstack_deployed,
- openstack_actions, salt_actions,
- common_services_actions, show_step):
- """Test restart clt with VIP
-
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute nodes
- 4. Find controller minion id with VIP
- 5. Restart controller minion id with VIP
- 6. Check that VIP was actually migrated on a new node
- 7. Run tempest smoke after failover
-
-
- """
- # TR case #3385671
- common_services_actions.check_keepalived_pillar()
- salt = salt_actions
-
- # STEP #1,2,3
- show_step(1)
- show_step(2)
- show_step(3)
-
- # STEP #4
- show_step(4)
- ctl_vip_pillar = salt.get_pillar(
- tgt="I@nova:controller:enabled:True",
- pillar="_param:cluster_vip_address")[0]
- vip = [vip for minion_id, vip in ctl_vip_pillar.items()][0]
- minion_vip = common_services_actions.get_keepalived_vip_minion_id(vip)
- LOG.info("VIP {0} is on {1}".format(vip, minion_vip))
-
- # STEP #5
- show_step(5)
- openstack_actions.warm_restart_nodes(minion_vip)
-
- # STEP #6
- show_step(6)
- # Check that VIP has been actually migrated to a new node
- new_minion_vip = common_services_actions.get_keepalived_vip_minion_id(
- vip)
- LOG.info("Migrated VIP {0} is on {1}".format(vip, new_minion_vip))
- assert new_minion_vip != minion_vip, (
- "VIP {0} wasn't migrated from {1} after node reboot!"
- .format(vip, new_minion_vip))
- common_services_actions.check_keepalived_pillar()
-
- # STEP #7
- show_step(7)
- openstack_actions.run_tempest(pattern='smoke')
-
- LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_failover_stacklight_services.py b/tcp_tests/tests/system/test_failover_stacklight_services.py
new file mode 100644
index 0000000..3bb47eb
--- /dev/null
+++ b/tcp_tests/tests/system/test_failover_stacklight_services.py
@@ -0,0 +1,314 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from devops.helpers import helpers
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import ext
+
+LOG = logger.logger
+
+
+class TestFailoverStacklightServices(object):
+ """Test class for testing OpenStack nodes failover"""
+
+ @staticmethod
+ def check_influxdb_xfail(sl_deployed, node_name, value):
+
+ def check_influxdb_data():
+ return value in sl_deployed.check_data_in_influxdb(node_name)
+
+ try:
+ helpers.wait(
+ check_influxdb_data,
+ timeout=10, interval=2,
+ timeout_msg=('Influxdb data {0} was not replicated to {1} '
+ '[https://mirantis.jira.com/browse/PROD-16272]'
+ .format(value, node_name)))
+ except Exception:
+ pytest.xfail('Influxdb data {0} was not replicated to {1} '
+ '[https://mirantis.jira.com/browse/PROD-16272]'
+ .format(value, node_name))
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+ def test_kill_influxdb_relay_mon01_node(self, sl_os_deployed,
+ show_step):
+ """Test kill influxdb relay on mon01 node
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Check LMA before mon node shutdown
+ 5. Kill influxdb relay on mon01 node
+ 6. Post data into influx
+ 7. Get data from all healthy nodes
+ 8. Start influx db
+ 9. Request data on mon01
+ 10. Run LMA tests after fail and compare with result before fail
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ mon_nodes = sl_os_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+ before_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ failed_tests = [test['name'] for test in
+ before_result if 'passed' not in test['outcome']]
+
+ # STEP #5
+ show_step(5)
+ sl_os_deployed.kill_sl_service_on_node('mon01', 'influxdb-relay')
+
+ # STEP #6
+ show_step(6)
+ sl_os_deployed.post_data_into_influx('mon02')
+
+ # STEP #7
+ show_step(7)
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
+
+ # STEP #8
+ show_step(8)
+ sl_os_deployed.start_service('mon01', 'influxdb-relay')
+
+ # STEP #9
+ show_step(9)
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon01')
+
+ # STEP #10
+ show_step(10)
+ after_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ for test in after_result:
+ if test['name'] not in failed_tests:
+ assert 'passed' in test['outcome'], \
+ 'Failed test {}'.format(test)
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+ def test_kill_influxdb_mon01_node(self, sl_os_deployed, show_step):
+ """Test kill influxdb on mon01 node
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Check LMA before mon node shutdown
+ 5. Kill influxdb on mon01 node
+ 6. Post data into influx
+ 7. Get data from all healthy nodes
+ 8. Start influx db
+ 9. Request data on mon01
+ 10. Run LMA tests after fail and compare with result before fail
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ mon_nodes = sl_os_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+ before_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ failed_tests = [test['name'] for test in
+ before_result if 'passed' not in test['outcome']]
+ # STEP #5
+ show_step(5)
+ sl_os_deployed.kill_sl_service_on_node('mon01', 'influxd')
+
+ # STEP #6
+ show_step(6)
+ sl_os_deployed.post_data_into_influx('mon02')
+
+ # STEP #7
+ show_step(7)
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
+
+ # STEP #8
+ show_step(8)
+ sl_os_deployed.start_service('mon01', 'influxd')
+
+ # STEP #9
+ show_step(9)
+ self.check_influxdb_xfail(sl_os_deployed, 'mon01', 'mymeas')
+
+ # STEP #10
+ show_step(10)
+ after_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ for test in after_result:
+ if test['name'] not in failed_tests:
+ assert 'passed' in test['outcome'], \
+ 'Failed test {}'.format(test)
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+ def test_stop_influxdb_relay_mon_nodes(self, sl_os_deployed,
+ show_step):
+ """Test stop influxdb relay on mon01 node
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Check LMA before mon node shutdown
+ 5. Stop influxdb relay on mon01 and mon02 nodes
+ 6. Post data into influx
+ 7. Get data from all healthy nodes
+ 8. Start influx db
+ 9. Request data on mon01, 02
+ 10. Run LMA tests after fail and compare with result before fail
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ mon_nodes = sl_os_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+ before_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ failed_tests = [test['name'] for test in
+ before_result if 'passed' not in test['outcome']]
+ # STEP #5
+ show_step(5)
+ sl_os_deployed.stop_sl_service_on_node('mon01', 'influxdb-relay')
+ sl_os_deployed.stop_sl_service_on_node('mon02', 'influxdb-relay')
+
+ # STEP #6
+ show_step(6)
+ sl_os_deployed.post_data_into_influx('mon03')
+
+ # STEP #7
+ show_step(7)
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
+
+ # STEP #8
+ show_step(8)
+ sl_os_deployed.start_service('mon01', 'influxdb-relay')
+ sl_os_deployed.start_service('mon02', 'influxdb-relay')
+
+ # STEP #9
+ show_step(9)
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon01')
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon02')
+
+ # STEP #10
+ show_step(10)
+ after_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ for test in after_result:
+ if test['name'] not in failed_tests:
+ assert 'passed' in test['outcome'], \
+ 'Failed test {}'.format(test)
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.sl_deployed)
+ def test_stop_influxdb_mon_nodes(self, sl_os_deployed, show_step):
+ """Test stop influxdb on mon01 node
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute, monitoring nodes
+ 4. Check LMA before mon node shutdown
+ 5. Stop influxdb on mon01 and mon02 node
+ 6. Post data into influx
+ 7. Get data from all healthy nodes
+ 8. Start influx db
+ 9. Request data on mon01
+ 10. Run LMA tests after fail and compare with result before fail
+
+
+ """
+ # STEP #1,2,3
+ show_step(1)
+ show_step(2)
+ show_step(3)
+
+ # STEP #4
+ show_step(4)
+ mon_nodes = sl_os_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+ before_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ failed_tests = [test['name'] for test in
+ before_result if 'passed' not in test['outcome']]
+
+ # STEP #5
+ show_step(5)
+ sl_os_deployed.stop_sl_service_on_node('mon01', 'influxdb')
+ sl_os_deployed.stop_sl_service_on_node('mon02', 'influxdb')
+
+ # STEP #6
+ show_step(6)
+ sl_os_deployed.post_data_into_influx('mon03')
+
+ # STEP #7
+ show_step(7)
+ assert 'mymeas' in sl_os_deployed.check_data_in_influxdb('mon03')
+
+ # STEP #8
+ show_step(8)
+ sl_os_deployed.start_service('mon01', 'influxdb')
+ sl_os_deployed.start_service('mon02', 'influxdb')
+
+ # STEP #9
+ show_step(9)
+ self.check_influxdb_xfail(sl_os_deployed, 'mon01', 'mymeas')
+ self.check_influxdb_xfail(sl_os_deployed, 'mon02', 'mymeas')
+
+ # STEP #10
+ show_step(10)
+ after_result = sl_os_deployed.run_sl_tests_json(
+ 'cfg01', '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus/', 'test_alerts.py')
+ for test in after_result:
+ if test['name'] not in failed_tests:
+ assert 'passed' in test['outcome'], \
+ 'Failed test {}'.format(test)
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_cookied_ocata.py b/tcp_tests/tests/system/test_install_cookied_ocata.py
index 0678365..a6d2313 100644
--- a/tcp_tests/tests/system/test_install_cookied_ocata.py
+++ b/tcp_tests/tests/system/test_install_cookied_ocata.py
@@ -58,7 +58,8 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
- def test_cookied_ocata_cicd_oss_install(self, underlay, openstack_deployed,
+ def test_cookied_ocata_cicd_oss_install(self, underlay, salt_actions,
+ openstack_deployed,
oss_deployed, sl_deployed,
show_step):
"""Test for deploying an mcp environment and check it
@@ -79,8 +80,7 @@
openstack_deployed.run_tempest(pattern=settings.PATTERN)
openstack_deployed.download_tempest_report()
- expected_service_list = ['monitoring_remote_storage_adapter',
- 'monitoring_server',
+ expected_service_list = ['monitoring_server',
'monitoring_remote_agent',
'dashboard_grafana',
'monitoring_alertmanager',
@@ -91,6 +91,13 @@
LOG.debug('Mon nodes list {0}'.format(mon_nodes))
show_step(7)
+ prometheus_relay_enabled = salt_actions.get_pillar(
+ tgt=mon_nodes[0],
+ pillar="prometheus:relay:enabled")[0]
+ if not prometheus_relay_enabled:
+ # InfluxDB is used if prometheus relay service is not installed
+ expected_service_list.append('monitoring_remote_storage_adapter')
+
sl_deployed.check_docker_services(mon_nodes, expected_service_list)
show_step(8)
@@ -100,11 +107,13 @@
# Run SL component tetsts
sl_deployed.run_sl_functional_tests(
'cfg01',
- '/root/stacklight-pytest/stacklight_tests/tests/prometheus')
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
show_step(10)
# Download report
sl_deployed.download_sl_test_report(
'cfg01',
- '/root/stacklight-pytest/stacklight_tests')
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index b1adedc..ce9174f 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -26,6 +26,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.pike_ovs
def test_mcp_pike_ovs_install(self, underlay,
openstack_deployed,
openstack_actions):
@@ -48,6 +49,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.pike_ovs_sl
def test_mcp_pike_ovs_sl_install(self, underlay, config,
openstack_deployed,
sl_deployed):
@@ -82,6 +84,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.pike_ovs_dvr
def test_mcp_pike_dvr_install(self,
underlay,
openstack_deployed,
@@ -104,6 +107,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.pike_ovs_dvr_sl
def test_mcp_pike_dvr_sl_install(self, underlay, config,
openstack_deployed,
sl_deployed):
@@ -139,8 +143,76 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
- def test_mcp11_pike_dpdk_install(self, underlay, openstack_deployed,
- show_step, openstack_actions):
+ def test_mcp_pike_dpdk_install(self, underlay, openstack_deployed,
+ show_step, openstack_actions):
+ """Test for deploying an mcp dpdk environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ """
+ LOG.info("*************** DONE **************")
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN)
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.pike_cookied_ovs_sl
+ def test_mcp_pike_cookied_ovs_install(self, underlay,
+ openstack_deployed,
+ openstack_actions,
+ sl_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN)
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.pike_cookied_ovs_dvr_sl
+ def test_mcp_pike_cookied_dvr_install(self,
+ underlay,
+ openstack_deployed,
+ openstack_actions,
+ sl_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN)
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_mcp_pike_cookied_dpdk_install(self, underlay, openstack_deployed,
+ show_step, openstack_actions):
"""Test for deploying an mcp dpdk environment and check it
Scenario:
1. Prepare salt on hosts